From ec9884833be8c707c7ebff1c1d4db8c0f2cf09b8 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 2 Apr 2014 17:45:34 -0700 Subject: [PATCH 01/64] first cut at reflections --- interface/src/Application.cpp | 10 ++ interface/src/Application.h | 2 + interface/src/AudioReflector.cpp | 274 +++++++++++++++++++++++++++++++ interface/src/AudioReflector.h | 48 ++++++ interface/src/avatar/Head.h | 7 + 5 files changed, 341 insertions(+) create mode 100644 interface/src/AudioReflector.cpp create mode 100644 interface/src/AudioReflector.h diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index bdbe0194e6..fd566e345e 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1597,6 +1597,13 @@ void Application::init() { connect(_rearMirrorTools, SIGNAL(restoreView()), SLOT(restoreMirrorView())); connect(_rearMirrorTools, SIGNAL(shrinkView()), SLOT(shrinkMirrorView())); connect(_rearMirrorTools, SIGNAL(resetView()), SLOT(resetSensors())); + + + // set up our audio reflector + _audioReflector.setMyAvatar(getAvatar()); + _audioReflector.setVoxels(_voxels.getTree()); + _audioReflector.setAudio(getAudio()); + connect(getAudio(), &Audio::audioBufferWrittenToDevice, &_audioReflector, &AudioReflector::addSamples); } void Application::closeMirrorView() { @@ -2378,6 +2385,9 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) { // disable specular lighting for ground and voxels glMaterialfv(GL_FRONT, GL_SPECULAR, NO_SPECULAR_COLOR); + + // draw the audio reflector overlay + _audioReflector.render(); // Draw voxels if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) { diff --git a/interface/src/Application.h b/interface/src/Application.h index e12d0b307f..50275d96d2 100644 --- a/interface/src/Application.h +++ b/interface/src/Application.h @@ -33,6 +33,7 @@ #include #include "Audio.h" +#include "AudioReflector.h" #include "BuckyBalls.h" #include "Camera.h" #include "DatagramProcessor.h" @@ -497,6 +498,7 @@ private: TouchEvent _lastTouchEvent; Overlays _overlays; + AudioReflector _audioReflector; }; #endif /* defined(__interface__Application__) */ diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp new file mode 100644 index 0000000000..f6d186d5c9 --- /dev/null +++ b/interface/src/AudioReflector.cpp @@ -0,0 +1,274 @@ +// +// AudioReflector.cpp +// interface +// +// Created by Brad Hefta-Gaub on 4/2/2014 +// Copyright (c) 2014 High Fidelity, Inc. All rights reserved. +// + +#include "AudioReflector.h" + +void AudioReflector::render() { + if (!_myAvatar) { + return; // exit early if not set up correctly + } + + + /* + glm::vec3 position = _myAvatar->getHead()->getPosition(); + const float radius = 0.25f; + glPushMatrix(); + glTranslatef(position.x, position.y, position.z); + glutWireSphere(radius, 15, 15); + glPopMatrix(); + */ + + drawRays(); +} + + +// delay = 1ms per foot +// = 3ms per meter +// attenuation = +// BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance)) + +int getDelayFromDistance(float distance) { + const int DELAY_PER_METER = 3; + return DELAY_PER_METER * distance; +} + +const float BOUNCE_ATTENUATION_FACTOR = 0.5f; + +float getDistanceAttenuationCoefficient(float distance) { + const float DISTANCE_SCALE = 2.5f; + const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; + const float DISTANCE_LOG_BASE = 2.5f; + const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE); + + float distanceSquareToSource = distance * distance; + + // calculate the distance coefficient using the distance to this node + float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR, + DISTANCE_SCALE_LOG + + (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); + distanceCoefficient = std::min(1.0f, distanceCoefficient); + + return distanceCoefficient; +} + +glm::vec3 getFaceNormal(BoxFace face) { + if (face == MIN_X_FACE) { + return glm::vec3(-1, 0, 0); + } else if (face == MAX_X_FACE) { + return glm::vec3(1, 0, 0); + } else if (face == MIN_Y_FACE) { + return glm::vec3(0, -1, 0); + } else if (face == MAX_Y_FACE) { + return glm::vec3(0, 1, 0); + } else if (face == MIN_Z_FACE) { + return glm::vec3(0, 0, -1); + } else if (face == MAX_Z_FACE) { + return glm::vec3(0, 0, 1); + } + return glm::vec3(0, 0, 0); //error case +} + + +void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& originalDirection, + int bounces, const glm::vec3& originalColor) { + + glm::vec3 start = origin; + glm::vec3 direction = originalDirection; + glm::vec3 color = originalColor; + OctreeElement* elementHit; + float distance; + BoxFace face; + const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point + const float COLOR_ADJUST_PER_BOUNCE = 0.75f; + + for (int i = 0; i < bounces; i++) { + if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + drawVector(start, end, color); + + glm::vec3 faceNormal = getFaceNormal(face); + direction = glm::normalize(glm::reflect(direction,faceNormal)); + start = end; + color = color * COLOR_ADJUST_PER_BOUNCE; + } + } +} + +void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, + int bounces, const AudioRingBuffer& samplesRingBuffer) { + + int samplesTouched = 0; + + glm::vec3 rightEarPosition = _myAvatar->getHead()->getRightEarPosition(); + glm::vec3 leftEarPosition = _myAvatar->getHead()->getLeftEarPosition(); + glm::vec3 start = origin; + glm::vec3 direction = originalDirection; + OctreeElement* elementHit; + float distance; + BoxFace face; + const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point + + // set up our buffers for our attenuated and delayed samples + AudioRingBuffer attenuatedLeftSamples(samplesRingBuffer.getSampleCapacity()); + AudioRingBuffer attenuatedRightSamples(samplesRingBuffer.getSampleCapacity()); + + const int NUMBER_OF_CHANNELS = 2; + int totalNumberOfSamples = samplesByteArray.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); + + for (int bounceNumber = 1; bounceNumber <= bounces; bounceNumber++) { + if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + + glm::vec3 faceNormal = getFaceNormal(face); + direction = glm::normalize(glm::reflect(direction,faceNormal)); + start = end; + + // calculate the distance to the ears + float rightEarDistance = glm::distance(end, rightEarPosition); + float leftEarDistance = glm::distance(end, leftEarPosition); + int rightEarDelay = getDelayFromDistance(rightEarDistance); + int leftEarDelay = getDelayFromDistance(leftEarDistance); + float rightEarAttenuation = getDistanceAttenuationCoefficient(rightEarDistance) * + (bounceNumber * BOUNCE_ATTENUATION_FACTOR); + float leftEarAttenuation = getDistanceAttenuationCoefficient(leftEarDistance) * + (bounceNumber * BOUNCE_ATTENUATION_FACTOR); + + // run through the samples, and attenuate them + for (int sample = 0; sample < totalNumberOfSamples; sample++) { + int16_t leftSample = samplesRingBuffer[sample * NUMBER_OF_CHANNELS]; + int16_t rightSample = samplesRingBuffer[(sample * NUMBER_OF_CHANNELS) + 1]; + + attenuatedLeftSamples[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; + attenuatedLeftSamples[sample * NUMBER_OF_CHANNELS + 1] = 0; + + attenuatedRightSamples[sample * NUMBER_OF_CHANNELS] = 0; + attenuatedRightSamples[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; + + samplesTouched++; + } + + // now inject the attenuated array with the appropriate delay + _audio->addDelayedAudio(attenuatedLeftSamples, leftEarDelay); + _audio->addDelayedAudio(attenuatedRightSamples, rightEarDelay); + } + } +} + +void AudioReflector::addSamples(AudioRingBuffer samples) { + quint64 start = usecTimestampNow(); + + glm::vec3 origin = _myAvatar->getHead()->getPosition(); + + glm::quat orientation = _myAvatar->getOrientation(); // _myAvatar->getHead()->getOrientation(); + glm::vec3 right = glm::normalize(orientation * IDENTITY_RIGHT); + glm::vec3 up = glm::normalize(orientation * IDENTITY_UP); + glm::vec3 front = glm::normalize(orientation * IDENTITY_FRONT); + glm::vec3 left = -right; + glm::vec3 down = -up; + glm::vec3 back = -front; + glm::vec3 frontRightUp = glm::normalize(front + right + up); + glm::vec3 frontLeftUp = glm::normalize(front + left + up); + glm::vec3 backRightUp = glm::normalize(back + right + up); + glm::vec3 backLeftUp = glm::normalize(back + left + up); + glm::vec3 frontRightDown = glm::normalize(front + right + down); + glm::vec3 frontLeftDown = glm::normalize(front + left + down); + glm::vec3 backRightDown = glm::normalize(back + right + down); + glm::vec3 backLeftDown = glm::normalize(back + left + down); + + const int BOUNCE_COUNT = 5; + + calculateReflections(origin, frontRightUp, BOUNCE_COUNT, samples); + calculateReflections(origin, frontLeftUp, BOUNCE_COUNT, samples); + calculateReflections(origin, backRightUp, BOUNCE_COUNT, samples); + calculateReflections(origin, backLeftUp, BOUNCE_COUNT, samples); + calculateReflections(origin, frontRightDown, BOUNCE_COUNT, samples); + calculateReflections(origin, frontLeftDown, BOUNCE_COUNT, samples); + calculateReflections(origin, backRightDown, BOUNCE_COUNT, samples); + calculateReflections(origin, backLeftDown, BOUNCE_COUNT, samples); + + calculateReflections(origin, front, BOUNCE_COUNT, samples); + calculateReflections(origin, back, BOUNCE_COUNT, samples); + calculateReflections(origin, left, BOUNCE_COUNT, samples); + calculateReflections(origin, right, BOUNCE_COUNT, samples); + calculateReflections(origin, up, BOUNCE_COUNT, samples); + calculateReflections(origin, down, BOUNCE_COUNT, samples); + quint64 end = usecTimestampNow(); + + qDebug() << "AudioReflector::addSamples()... samples.size()=" << samples.size() << " elapsed=" << (end - start); + +} + +void AudioReflector::drawRays() { + glm::vec3 origin = _myAvatar->getHead()->getPosition(); + //glm::vec3 origin = _myAvatar->getHead()->getRightEarPosition(); + + glm::quat orientation = _myAvatar->getOrientation(); // _myAvatar->getHead()->getOrientation(); + glm::vec3 right = glm::normalize(orientation * IDENTITY_RIGHT); + glm::vec3 up = glm::normalize(orientation * IDENTITY_UP); + glm::vec3 front = glm::normalize(orientation * IDENTITY_FRONT); + glm::vec3 left = -right; + glm::vec3 down = -up; + glm::vec3 back = -front; + glm::vec3 frontRightUp = glm::normalize(front + right + up); + glm::vec3 frontLeftUp = glm::normalize(front + left + up); + glm::vec3 backRightUp = glm::normalize(back + right + up); + glm::vec3 backLeftUp = glm::normalize(back + left + up); + glm::vec3 frontRightDown = glm::normalize(front + right + down); + glm::vec3 frontLeftDown = glm::normalize(front + left + down); + glm::vec3 backRightDown = glm::normalize(back + right + down); + glm::vec3 backLeftDown = glm::normalize(back + left + down); + + + const glm::vec3 RED(1,0,0); + const glm::vec3 GREEN(0,1,0); + const glm::vec3 BLUE(0,0,1); + const glm::vec3 PURPLE(1,0,1); + const glm::vec3 YELLOW(1,1,0); + const glm::vec3 CYAN(0,1,1); + const glm::vec3 DARK_RED(0.8f,0.2f,0.2f); + const glm::vec3 DARK_GREEN(0.2f,0.8f,0.2f); + const glm::vec3 DARK_BLUE(0.2f,0.2f,0.8f); + const glm::vec3 DARK_PURPLE(0.8f,0.2f,0.8f); + const glm::vec3 DARK_YELLOW(0.8f,0.8f,0.2f); + const glm::vec3 DARK_CYAN(0.2f,0.8f,0.8f); + + const glm::vec3 WHITE(1,1,1); + const glm::vec3 GRAY(0.5f,0.5f,0.5f); + + const int BOUNCE_COUNT = 5; + + drawReflections(origin, frontRightUp, BOUNCE_COUNT, RED); + drawReflections(origin, frontLeftUp, BOUNCE_COUNT, GREEN); + drawReflections(origin, backRightUp, BOUNCE_COUNT, BLUE); + drawReflections(origin, backLeftUp, BOUNCE_COUNT, CYAN); + drawReflections(origin, frontRightDown, BOUNCE_COUNT, PURPLE); + drawReflections(origin, frontLeftDown, BOUNCE_COUNT, YELLOW); + drawReflections(origin, backRightDown, BOUNCE_COUNT, WHITE); + drawReflections(origin, backLeftDown, BOUNCE_COUNT, DARK_RED); + + drawReflections(origin, front, BOUNCE_COUNT, DARK_GREEN); + drawReflections(origin, back, BOUNCE_COUNT, DARK_BLUE); + drawReflections(origin, left, BOUNCE_COUNT, DARK_CYAN); + drawReflections(origin, right, BOUNCE_COUNT, DARK_PURPLE); + drawReflections(origin, up, BOUNCE_COUNT, DARK_YELLOW); + drawReflections(origin, down, BOUNCE_COUNT, GRAY); +} + +void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) { + glDisable(GL_LIGHTING); // ?? + glLineWidth(2.0); + + // Draw the vector itself + glBegin(GL_LINES); + glColor3f(color.x,color.y,color.z); + glVertex3f(start.x, start.y, start.z); + glVertex3f(end.x, end.y, end.z); + glEnd(); + + glEnable(GL_LIGHTING); // ?? +} diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h new file mode 100644 index 0000000000..a348fc67ea --- /dev/null +++ b/interface/src/AudioReflector.h @@ -0,0 +1,48 @@ +// +// AudioReflector.h +// interface +// +// Created by Brad Hefta-Gaub on 4/2/2014 +// Copyright (c) 2014 High Fidelity, Inc. All rights reserved. +// + +#ifndef __interface__AudioReflector__ +#define __interface__AudioReflector__ + +#include + +#include "Audio.h" +#include "avatar/MyAvatar.h" + +class AudioReflector : public QObject { + Q_OBJECT +public: + AudioReflector(QObject* parent = 0) : QObject(parent) { }; + + void setVoxels(VoxelTree* voxels) { _voxels = voxels; } + void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; } + void setAudio(Audio* audio) { _audio = audio; } + + void render(); + +public slots: + void addSamples(AudioRingBuffer samples); + +signals: + +private: + VoxelTree* _voxels; // used to access voxel scene + MyAvatar* _myAvatar; // access to listener + Audio* _audio; // access to audio API + + void drawRays(); + void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); + void drawReflections(const glm::vec3& origin, const glm::vec3& direction, int bounces, const glm::vec3& color); + + void calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, + int bounces, const AudioRingBuffer& samplesRingBuffer); + +}; + + +#endif /* defined(__interface__AudioReflector__) */ diff --git a/interface/src/avatar/Head.h b/interface/src/avatar/Head.h index 8a03cfc7ad..0ec0b17a8c 100644 --- a/interface/src/avatar/Head.h +++ b/interface/src/avatar/Head.h @@ -27,6 +27,8 @@ enum eyeContactTargets { MOUTH }; +const float EYE_EAR_GAP = 0.08f; + class Avatar; class ProgramObject; @@ -70,6 +72,11 @@ public: glm::quat getEyeRotation(const glm::vec3& eyePosition) const; + const glm::vec3& getRightEyePosition() const { return _rightEyePosition; } + const glm::vec3& getLeftEyePosition() const { return _leftEyePosition; } + glm::vec3 getRightEarPosition() const { return _rightEyePosition + (getRightDirection() * EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); } + glm::vec3 getLeftEarPosition() const { return _leftEyePosition + (getRightDirection() * -EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); } + FaceModel& getFaceModel() { return _faceModel; } const FaceModel& getFaceModel() const { return _faceModel; } From f7d926931423d450fdb2beb0c18755e1cc47bbc2 Mon Sep 17 00:00:00 2001 From: matsukaze Date: Thu, 3 Apr 2014 17:36:03 -0700 Subject: [PATCH 02/64] Modifications to support spatial audio processing. --- interface/src/Audio.cpp | 122 +++++++++++++++++++++++++++++++++++----- interface/src/Audio.h | 13 ++++- interface/src/Menu.cpp | 5 ++ interface/src/Menu.h | 1 + 4 files changed, 125 insertions(+), 16 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 63c683dbb0..503ec5155b 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -76,7 +76,11 @@ Audio::Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples, QObject* p _collisionSoundDuration(0.0f), _proceduralEffectSample(0), _numFramesDisplayStarve(0), - _muted(false) + _muted(false), + _processSpatialAudio(false), + _spatialAudioStart(0), + _spatialAudioFinish(0), + _spatialAudioRingBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL) { // clear the array of locally injected samples memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL); @@ -584,7 +588,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { _totalPacketsReceived++; double timeDiff = diffclock(&_lastReceiveTime, ¤tReceiveTime); - + // Discard first few received packets for computing jitter (often they pile up on start) if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) { _stdev.addValue(timeDiff); @@ -604,7 +608,8 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { if (_audioOutput) { // Audio output must exist and be correctly set up if we're going to process received audio - processReceivedAudio(audioByteArray); + _ringBuffer.parseData(audioByteArray); + processReceivedAudio(_spatialAudioStart, _ringBuffer); } Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size()); @@ -612,6 +617,68 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { _lastReceiveTime = currentReceiveTime; } +unsigned int Audio::timeValToSampleTick(const quint64 time, int sampleRate) { + unsigned int sample = (unsigned int)(time / 1000000 * sampleRate); + return sample; +} + +void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& spatialAudio) { + + // Calculate the number of remaining samples available + unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable(); + if (sampleTime >= _spatialAudioFinish) { + if (_spatialAudioStart == _spatialAudioFinish) { + + // Nothing in the spatial audio ring buffer yet + // Just do a straight copy, clipping if necessary + unsigned int sampleCt = (remaining < spatialAudio.samplesAvailable()) ? remaining : spatialAudio.samplesAvailable(); + if (sampleCt) { + _spatialAudioRingBuffer.writeSamples(spatialAudio.getBuffer(), sampleCt); + } + _spatialAudioFinish = _spatialAudioStart + spatialAudio.samplesAvailable() / _desiredOutputFormat.channelCount(); + + } else { + + // Spatial audio ring buffer already has data, but there is no overlap with the new sample + // compute the appropriate time delay and pad with silence until the new start time + unsigned int delay = sampleTime - _spatialAudioFinish; + unsigned int ct = delay * _desiredOutputFormat.channelCount(); + unsigned int silentCt = (remaining < ct) ? remaining : ct; + if (silentCt) { + _spatialAudioRingBuffer.addSilentFrame(silentCt); + } + + // Recalculate the number of remaining samples + remaining -= silentCt; + unsigned int sampleCt = (remaining < spatialAudio.samplesAvailable()) ? remaining : spatialAudio.samplesAvailable(); + + // Copy the new spatial audio to the accumulation ring buffer + if (sampleCt) { + _spatialAudioRingBuffer.writeSamples(spatialAudio.getBuffer(), sampleCt); + } + _spatialAudioFinish += (sampleCt + silentCt) / _desiredOutputFormat.channelCount(); + } + } else { + + // There is overlap between the spatial audio buffer and the new sample, + // acumulate the overlap + unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount(); + unsigned int accumulationCt = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount(); + int j = 0; + for (int i = accumulationCt; --i >= 0; j++) { + _spatialAudioRingBuffer[j + offset] += spatialAudio[j]; + } + + // Copy the remaining unoverlapped spatial audio to the accumulation buffer + unsigned int sampleCt = (remaining < spatialAudio.samplesAvailable()) ? remaining : spatialAudio.samplesAvailable(); + if (sampleCt) { + _spatialAudioRingBuffer.writeSamples(spatialAudio.getBuffer() + accumulationCt, sampleCt); + } + _spatialAudioFinish += sampleCt / _desiredOutputFormat.channelCount(); + } + spatialAudio.reset(); +} + bool Audio::mousePressEvent(int x, int y) { if (_iconBounds.contains(x, y)) { toggleMute(); @@ -629,24 +696,23 @@ void Audio::toggleAudioNoiseReduction() { _noiseGateEnabled = !_noiseGateEnabled; } -void Audio::processReceivedAudio(const QByteArray& audioByteArray) { - _ringBuffer.parseData(audioByteArray); +void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer) { float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate()) * (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount()); - if (!_ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) { + if (!ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) { // we don't have any audio data left in the output buffer // we just starved //qDebug() << "Audio output just starved."; - _ringBuffer.setIsStarved(true); + ringBuffer.setIsStarved(true); _numFramesDisplayStarve = 10; } // if there is anything in the ring buffer, decide what to do - if (_ringBuffer.samplesAvailable() > 0) { + if (ringBuffer.samplesAvailable() > 0) { - int numNetworkOutputSamples = _ringBuffer.samplesAvailable(); + int numNetworkOutputSamples = ringBuffer.samplesAvailable(); int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; QByteArray outputBuffer; @@ -654,19 +720,35 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { int numSamplesNeededToStartPlayback = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2); - if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) { + if (!ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) { // We are still waiting for enough samples to begin playback // qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback; } else { // We are either already playing back, or we have enough audio to start playing back. //qDebug() << "pushing " << numNetworkOutputSamples; - _ringBuffer.setIsStarved(false); + ringBuffer.setIsStarved(false); - // copy the samples we'll resample from the ring buffer - this also - // pushes the read pointer of the ring buffer forwards int16_t* ringBufferSamples= new int16_t[numNetworkOutputSamples]; - _ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); - + if (_processSpatialAudio) { + unsigned int sampleTime = _spatialAudioFinish; + // Accumulate direct transmission of audio from sender to receiver + addSpatialAudioToBuffer(sampleTime, ringBuffer); + + // Send audio off for spatial processing + emit processSpatialAudio(sampleTime, ringBuffer, _desiredOutputFormat); + + // copy the samples we'll resample from the spatial audio ring buffer - this also + // pushes the read pointer of the spatial audio ring buffer forwards + _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + _spatialAudioStart += ringBuffer.samplesAvailable() / _desiredOutputFormat.channelCount(); + + } else { + + // copy the samples we'll resample from the ring buffer - this also + // pushes the read pointer of the ring buffer forwards + ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + } + // add the next numNetworkOutputSamples from each QByteArray // in our _localInjectionByteArrays QVector to the localInjectedSamples @@ -723,6 +805,10 @@ void Audio::toggleToneInjection() { _toneInjectionEnabled = !_toneInjectionEnabled; } +void Audio::toggleAudioSpatialProcessing() { + _processSpatialAudio = !_processSpatialAudio; +} + // Take a pointer to the acquired microphone input samples and add procedural sounds void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) { float sample; @@ -943,6 +1029,12 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo) _proceduralAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); gettimeofday(&_lastReceiveTime, NULL); + + // setup spatial audio ringbuffer + int numFrameSamples = _outputFormat.sampleRate() * _desiredOutputFormat.channelCount(); + _spatialAudioRingBuffer.resizeForFrameSize(numFrameSamples); + _spatialAudioStart = _spatialAudioFinish = 0; + supportedFormat = true; } } diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 88488922f3..879eb27d42 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -78,11 +78,13 @@ public slots: void start(); void stop(); void addReceivedAudioToBuffer(const QByteArray& audioByteArray); + void addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& spatialAudio); void handleAudioInput(); void reset(); void toggleMute(); void toggleAudioNoiseReduction(); void toggleToneInjection(); + void toggleAudioSpatialProcessing(); virtual void handleAudioByteArray(const QByteArray& audioByteArray); @@ -98,6 +100,7 @@ public slots: signals: bool muteToggled(); + void processSpatialAudio(unsigned int sampleTime, const AudioRingBuffer& ringBuffer, const QAudioFormat& format); private: @@ -166,6 +169,14 @@ private: // Audio callback in class context. inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight); + // Process received audio by spatial attenuation geometric response + bool _processSpatialAudio; + unsigned int _spatialAudioStart; ///< Start of spatial audio interval (in sample rate time base) + unsigned int _spatialAudioFinish; ///< End of spatial audio interval (in sample rate time base) + AudioRingBuffer _spatialAudioRingBuffer; ///< Spatially processed audio + + unsigned int timeValToSampleTick(const quint64 time, int sampleRate); + // Process procedural audio by // 1. Echo to the local procedural output device // 2. Mix with the audio input @@ -175,7 +186,7 @@ private: void addProceduralSounds(int16_t* monoInput, int numSamples); // Process received audio - void processReceivedAudio(const QByteArray& audioByteArray); + void processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer); bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo); bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo); diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 89de7a2d03..3dd4733c64 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -366,6 +366,11 @@ Menu::Menu() : false, appInstance->getAudio(), SLOT(toggleToneInjection())); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessing, + Qt::CTRL | Qt::SHIFT | Qt::Key_M, + false, + appInstance->getAudio(), + SLOT(toggleAudioSpatialProcessing())); addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 5aa0a13c9c..b6feb2e2f5 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -243,6 +243,7 @@ namespace MenuOption { const QString Enable3DTVMode = "Enable 3DTV Mode"; const QString AudioNoiseReduction = "Audio Noise Reduction"; const QString AudioToneInjection = "Inject Test Tone"; + const QString AudioSpatialProcessing = "Audio Spatial Processing"; const QString EchoServerAudio = "Echo Server Audio"; const QString EchoLocalAudio = "Echo Local Audio"; const QString MuteAudio = "Mute Microphone"; From 42efb0db6552581936ebd316b224e1c4bab957d1 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 2 Apr 2014 18:18:17 -0700 Subject: [PATCH 03/64] glue in processSpatialAudio() --- interface/src/Application.cpp | 2 +- interface/src/Audio.h | 2 +- interface/src/AudioReflector.cpp | 63 ++++++++++++++----------- interface/src/AudioReflector.h | 5 +- libraries/audio/src/AudioRingBuffer.cpp | 4 ++ libraries/audio/src/AudioRingBuffer.h | 1 + 6 files changed, 46 insertions(+), 31 deletions(-) diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index fd566e345e..bf3166a928 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1603,7 +1603,7 @@ void Application::init() { _audioReflector.setMyAvatar(getAvatar()); _audioReflector.setVoxels(_voxels.getTree()); _audioReflector.setAudio(getAudio()); - connect(getAudio(), &Audio::audioBufferWrittenToDevice, &_audioReflector, &AudioReflector::addSamples); + connect(getAudio(), &Audio::processSpatialAudio, &_audioReflector, &AudioReflector::processSpatialAudio); } void Application::closeMirrorView() { diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 879eb27d42..2a5119a0f9 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -100,7 +100,7 @@ public slots: signals: bool muteToggled(); - void processSpatialAudio(unsigned int sampleTime, const AudioRingBuffer& ringBuffer, const QAudioFormat& format); + void processSpatialAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer, const QAudioFormat& format); private: diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index f6d186d5c9..077c370c63 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -33,8 +33,8 @@ void AudioReflector::render() { // BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance)) int getDelayFromDistance(float distance) { - const int DELAY_PER_METER = 3; - return DELAY_PER_METER * distance; + const int MS_DELAY_PER_METER = 3; + return MS_DELAY_PER_METER * distance; } const float BOUNCE_ATTENUATION_FACTOR = 0.5f; @@ -99,8 +99,10 @@ void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& o } } + void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, - int bounces, const AudioRingBuffer& samplesRingBuffer) { + int bounces, const AudioRingBuffer& samplesRingBuffer, + unsigned int sampleTime, int sampleRate) { int samplesTouched = 0; @@ -114,11 +116,11 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point // set up our buffers for our attenuated and delayed samples - AudioRingBuffer attenuatedLeftSamples(samplesRingBuffer.getSampleCapacity()); - AudioRingBuffer attenuatedRightSamples(samplesRingBuffer.getSampleCapacity()); - const int NUMBER_OF_CHANNELS = 2; - int totalNumberOfSamples = samplesByteArray.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); + AudioRingBuffer attenuatedLeftSamples(samplesRingBuffer.samplesAvailable()); + AudioRingBuffer attenuatedRightSamples(samplesRingBuffer.samplesAvailable()); + + int totalNumberOfSamples = samplesRingBuffer.samplesAvailable(); for (int bounceNumber = 1; bounceNumber <= bounces; bounceNumber++) { if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { @@ -131,8 +133,11 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve // calculate the distance to the ears float rightEarDistance = glm::distance(end, rightEarPosition); float leftEarDistance = glm::distance(end, leftEarPosition); - int rightEarDelay = getDelayFromDistance(rightEarDistance); - int leftEarDelay = getDelayFromDistance(leftEarDistance); + int rightEarDelayMsecs = getDelayFromDistance(rightEarDistance); + int leftEarDelayMsecs = getDelayFromDistance(leftEarDistance); + int rightEarDelay = rightEarDelayMsecs / MSECS_PER_SECOND * sampleRate; + int leftEarDelay = leftEarDelayMsecs / MSECS_PER_SECOND * sampleRate; + float rightEarAttenuation = getDistanceAttenuationCoefficient(rightEarDistance) * (bounceNumber * BOUNCE_ATTENUATION_FACTOR); float leftEarAttenuation = getDistanceAttenuationCoefficient(leftEarDistance) * @@ -153,13 +158,17 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve } // now inject the attenuated array with the appropriate delay - _audio->addDelayedAudio(attenuatedLeftSamples, leftEarDelay); - _audio->addDelayedAudio(attenuatedRightSamples, rightEarDelay); + + unsigned int sampleTimeLeft = sampleTime + leftEarDelay; + unsigned int sampleTimeRight = sampleTime + rightEarDelay; + + _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples); + _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples); } } } -void AudioReflector::addSamples(AudioRingBuffer samples) { +void AudioReflector::processSpatialAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer, const QAudioFormat& format) { quint64 start = usecTimestampNow(); glm::vec3 origin = _myAvatar->getHead()->getPosition(); @@ -182,24 +191,24 @@ void AudioReflector::addSamples(AudioRingBuffer samples) { const int BOUNCE_COUNT = 5; - calculateReflections(origin, frontRightUp, BOUNCE_COUNT, samples); - calculateReflections(origin, frontLeftUp, BOUNCE_COUNT, samples); - calculateReflections(origin, backRightUp, BOUNCE_COUNT, samples); - calculateReflections(origin, backLeftUp, BOUNCE_COUNT, samples); - calculateReflections(origin, frontRightDown, BOUNCE_COUNT, samples); - calculateReflections(origin, frontLeftDown, BOUNCE_COUNT, samples); - calculateReflections(origin, backRightDown, BOUNCE_COUNT, samples); - calculateReflections(origin, backLeftDown, BOUNCE_COUNT, samples); + calculateReflections(origin, frontRightUp, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, frontLeftUp, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, backRightUp, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, backLeftUp, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, frontRightDown, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, frontLeftDown, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, backRightDown, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, backLeftDown, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, front, BOUNCE_COUNT, samples); - calculateReflections(origin, back, BOUNCE_COUNT, samples); - calculateReflections(origin, left, BOUNCE_COUNT, samples); - calculateReflections(origin, right, BOUNCE_COUNT, samples); - calculateReflections(origin, up, BOUNCE_COUNT, samples); - calculateReflections(origin, down, BOUNCE_COUNT, samples); + calculateReflections(origin, front, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, back, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, left, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, right, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, up, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, down, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); quint64 end = usecTimestampNow(); - qDebug() << "AudioReflector::addSamples()... samples.size()=" << samples.size() << " elapsed=" << (end - start); + //qDebug() << "AudioReflector::addSamples()... samples.size()=" << samples.size() << " elapsed=" << (end - start); } diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index a348fc67ea..7934b30ad2 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -26,7 +26,7 @@ public: void render(); public slots: - void addSamples(AudioRingBuffer samples); + void processSpatialAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer, const QAudioFormat& format); signals: @@ -40,7 +40,8 @@ private: void drawReflections(const glm::vec3& origin, const glm::vec3& direction, int bounces, const glm::vec3& color); void calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, - int bounces, const AudioRingBuffer& samplesRingBuffer); + int bounces, const AudioRingBuffer& samplesRingBuffer, + unsigned int sampleTime, int sampleRate); }; diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 376b60ffa1..5dac3e0456 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -125,6 +125,10 @@ int16_t& AudioRingBuffer::operator[](const int index) { return *shiftedPositionAccomodatingWrap(_nextOutput, index); } +const int16_t& AudioRingBuffer::operator[] (const int index) const { + return *shiftedPositionAccomodatingWrap(_nextOutput, index); +} + void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) { _nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples); } diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index e55eeda40e..e3c82a1509 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -57,6 +57,7 @@ public: qint64 writeData(const char* data, qint64 maxSize); int16_t& operator[](const int index); + const int16_t& operator[] (const int index) const; void shiftReadPosition(unsigned int numSamples); From 7717a09f916c3c2efd4dd9268ead85da6dfe4abd Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 2 Apr 2014 18:48:28 -0700 Subject: [PATCH 04/64] use QByteArray for processSpatialAudio() signal --- interface/src/Audio.cpp | 7 +++-- interface/src/Audio.h | 2 +- interface/src/AudioReflector.cpp | 51 +++++++++++++++++++------------- interface/src/AudioReflector.h | 4 +-- 4 files changed, 37 insertions(+), 27 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 503ec5155b..668c8f353b 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -734,14 +734,15 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB // Accumulate direct transmission of audio from sender to receiver addSpatialAudioToBuffer(sampleTime, ringBuffer); - // Send audio off for spatial processing - emit processSpatialAudio(sampleTime, ringBuffer, _desiredOutputFormat); - // copy the samples we'll resample from the spatial audio ring buffer - this also // pushes the read pointer of the spatial audio ring buffer forwards _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); _spatialAudioStart += ringBuffer.samplesAvailable() / _desiredOutputFormat.channelCount(); + // Send audio off for spatial processing + emit processSpatialAudio(sampleTime, QByteArray((char*)ringBufferSamples, numNetworkOutputSamples), _desiredOutputFormat); + + } else { // copy the samples we'll resample from the ring buffer - this also diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 2a5119a0f9..51a3aab8ae 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -100,7 +100,7 @@ public slots: signals: bool muteToggled(); - void processSpatialAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer, const QAudioFormat& format); + void processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); private: diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 077c370c63..aea1583c68 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -101,7 +101,7 @@ void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& o void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, - int bounces, const AudioRingBuffer& samplesRingBuffer, + int bounces, const QByteArray& originalSamples, unsigned int sampleTime, int sampleRate) { int samplesTouched = 0; @@ -117,10 +117,13 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve // set up our buffers for our attenuated and delayed samples const int NUMBER_OF_CHANNELS = 2; - AudioRingBuffer attenuatedLeftSamples(samplesRingBuffer.samplesAvailable()); - AudioRingBuffer attenuatedRightSamples(samplesRingBuffer.samplesAvailable()); + + int totalNumberOfSamples = originalSamples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); + + const int16_t* originalSamplesData = (const int16_t*)originalSamples.constData(); + AudioRingBuffer attenuatedLeftSamples(totalNumberOfSamples); + AudioRingBuffer attenuatedRightSamples(totalNumberOfSamples); - int totalNumberOfSamples = samplesRingBuffer.samplesAvailable(); for (int bounceNumber = 1; bounceNumber <= bounces; bounceNumber++) { if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { @@ -145,14 +148,18 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve // run through the samples, and attenuate them for (int sample = 0; sample < totalNumberOfSamples; sample++) { - int16_t leftSample = samplesRingBuffer[sample * NUMBER_OF_CHANNELS]; - int16_t rightSample = samplesRingBuffer[(sample * NUMBER_OF_CHANNELS) + 1]; + int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; + int16_t rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; + + //qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample; attenuatedLeftSamples[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; attenuatedLeftSamples[sample * NUMBER_OF_CHANNELS + 1] = 0; attenuatedRightSamples[sample * NUMBER_OF_CHANNELS] = 0; attenuatedRightSamples[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; + + //qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation); samplesTouched++; } @@ -162,13 +169,15 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve unsigned int sampleTimeLeft = sampleTime + leftEarDelay; unsigned int sampleTimeRight = sampleTime + rightEarDelay; + qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight; + _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples); _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples); } } } -void AudioReflector::processSpatialAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer, const QAudioFormat& format) { +void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { quint64 start = usecTimestampNow(); glm::vec3 origin = _myAvatar->getHead()->getPosition(); @@ -191,21 +200,21 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, AudioRingBuffe const int BOUNCE_COUNT = 5; - calculateReflections(origin, frontRightUp, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, frontLeftUp, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, backRightUp, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, backLeftUp, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, frontRightDown, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, frontLeftDown, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, backRightDown, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, backLeftDown, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, frontRightUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, frontLeftUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, backRightUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, backLeftUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, frontRightDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, frontLeftDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, backRightDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, backLeftDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, front, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, back, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, left, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, right, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, up, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); - calculateReflections(origin, down, BOUNCE_COUNT, ringBuffer, sampleTime, format.sampleRate()); + calculateReflections(origin, front, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, back, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, left, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, right, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, up, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + calculateReflections(origin, down, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); quint64 end = usecTimestampNow(); //qDebug() << "AudioReflector::addSamples()... samples.size()=" << samples.size() << " elapsed=" << (end - start); diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 7934b30ad2..4767361c29 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -26,7 +26,7 @@ public: void render(); public slots: - void processSpatialAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer, const QAudioFormat& format); + void processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); signals: @@ -40,7 +40,7 @@ private: void drawReflections(const glm::vec3& origin, const glm::vec3& direction, int bounces, const glm::vec3& color); void calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, - int bounces, const AudioRingBuffer& samplesRingBuffer, + int bounces, const QByteArray& samples, unsigned int sampleTime, int sampleRate); }; From 09bcada2635a4b9d5a975ce25a21fd87dafb31b4 Mon Sep 17 00:00:00 2001 From: matsukaze Date: Thu, 3 Apr 2014 19:41:59 -0700 Subject: [PATCH 05/64] Fixes to audio spatial processing. --- interface/src/Audio.cpp | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 668c8f353b..074610e269 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -633,7 +633,7 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& sp // Just do a straight copy, clipping if necessary unsigned int sampleCt = (remaining < spatialAudio.samplesAvailable()) ? remaining : spatialAudio.samplesAvailable(); if (sampleCt) { - _spatialAudioRingBuffer.writeSamples(spatialAudio.getBuffer(), sampleCt); + _spatialAudioRingBuffer.writeSamples(spatialAudio.getNextOutput(), sampleCt); } _spatialAudioFinish = _spatialAudioStart + spatialAudio.samplesAvailable() / _desiredOutputFormat.channelCount(); @@ -654,7 +654,7 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& sp // Copy the new spatial audio to the accumulation ring buffer if (sampleCt) { - _spatialAudioRingBuffer.writeSamples(spatialAudio.getBuffer(), sampleCt); + _spatialAudioRingBuffer.writeSamples(spatialAudio.getNextOutput(), sampleCt); } _spatialAudioFinish += (sampleCt + silentCt) / _desiredOutputFormat.channelCount(); } @@ -664,19 +664,20 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& sp // acumulate the overlap unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount(); unsigned int accumulationCt = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount(); + accumulationCt = (accumulationCt < spatialAudio.samplesAvailable()) ? accumulationCt : spatialAudio.samplesAvailable(); int j = 0; for (int i = accumulationCt; --i >= 0; j++) { _spatialAudioRingBuffer[j + offset] += spatialAudio[j]; } // Copy the remaining unoverlapped spatial audio to the accumulation buffer - unsigned int sampleCt = (remaining < spatialAudio.samplesAvailable()) ? remaining : spatialAudio.samplesAvailable(); + unsigned int sampleCt = spatialAudio.samplesAvailable() - accumulationCt; + sampleCt = (remaining < sampleCt) ? remaining : sampleCt; if (sampleCt) { - _spatialAudioRingBuffer.writeSamples(spatialAudio.getBuffer() + accumulationCt, sampleCt); + _spatialAudioRingBuffer.writeSamples(spatialAudio.getNextOutput() + accumulationCt, sampleCt); } _spatialAudioFinish += sampleCt / _desiredOutputFormat.channelCount(); } - spatialAudio.reset(); } bool Audio::mousePressEvent(int x, int y) { @@ -730,18 +731,20 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB int16_t* ringBufferSamples= new int16_t[numNetworkOutputSamples]; if (_processSpatialAudio) { - unsigned int sampleTime = _spatialAudioFinish; + unsigned int sampleTime = _spatialAudioStart; // Accumulate direct transmission of audio from sender to receiver addSpatialAudioToBuffer(sampleTime, ringBuffer); + addSpatialAudioToBuffer(sampleTime + 48000, ringBuffer); + + // Send audio off for spatial processing + emit processSpatialAudio(sampleTime, QByteArray((char*)ringBuffer.getBuffer(), numNetworkOutputSamples), _desiredOutputFormat); // copy the samples we'll resample from the spatial audio ring buffer - this also // pushes the read pointer of the spatial audio ring buffer forwards _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); - _spatialAudioStart += ringBuffer.samplesAvailable() / _desiredOutputFormat.channelCount(); - - // Send audio off for spatial processing - emit processSpatialAudio(sampleTime, QByteArray((char*)ringBufferSamples, numNetworkOutputSamples), _desiredOutputFormat); - + int samples = ringBuffer.samplesAvailable(); + _spatialAudioStart += samples / _desiredOutputFormat.channelCount(); + ringBuffer.reset(); } else { From 3808cfa83ddcaf84608e343e9254c702b9bfb836 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Thu, 3 Apr 2014 08:55:48 -0700 Subject: [PATCH 06/64] latest work --- interface/interface_en.ts | 12 ++-- interface/src/Application.cpp | 2 +- interface/src/Audio.cpp | 10 +++- interface/src/Audio.h | 2 +- interface/src/AudioReflector.cpp | 82 ++++++++++++++++++++------- libraries/audio/src/AudioRingBuffer.h | 4 +- 6 files changed, 78 insertions(+), 34 deletions(-) diff --git a/interface/interface_en.ts b/interface/interface_en.ts index 3f859c2cd1..43ec129c99 100644 --- a/interface/interface_en.ts +++ b/interface/interface_en.ts @@ -14,12 +14,12 @@ - + Open Script - + JavaScript Files (*.js) @@ -113,18 +113,18 @@ Menu - + Open .ini config file - - + + Text files (*.ini) - + Save .ini config file diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index bf3166a928..863df28319 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1603,7 +1603,7 @@ void Application::init() { _audioReflector.setMyAvatar(getAvatar()); _audioReflector.setVoxels(_voxels.getTree()); _audioReflector.setAudio(getAudio()); - connect(getAudio(), &Audio::processSpatialAudio, &_audioReflector, &AudioReflector::processSpatialAudio); + connect(getAudio(), &Audio::processSpatialAudio, &_audioReflector, &AudioReflector::processSpatialAudio,Qt::DirectConnection); } void Application::closeMirrorView() { diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 074610e269..930a88e222 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -622,10 +622,11 @@ unsigned int Audio::timeValToSampleTick(const quint64 time, int sampleRate) { return sample; } -void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& spatialAudio) { +void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuffer& spatialAudio) { // Calculate the number of remaining samples available unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable(); + if (sampleTime >= _spatialAudioFinish) { if (_spatialAudioStart == _spatialAudioFinish) { @@ -645,7 +646,7 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& sp unsigned int ct = delay * _desiredOutputFormat.channelCount(); unsigned int silentCt = (remaining < ct) ? remaining : ct; if (silentCt) { - _spatialAudioRingBuffer.addSilentFrame(silentCt); + _spatialAudioRingBuffer.addSilentFrame(silentCt); } // Recalculate the number of remaining samples @@ -660,11 +661,13 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& sp } } else { + // There is overlap between the spatial audio buffer and the new sample, // acumulate the overlap unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount(); unsigned int accumulationCt = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount(); accumulationCt = (accumulationCt < spatialAudio.samplesAvailable()) ? accumulationCt : spatialAudio.samplesAvailable(); + int j = 0; for (int i = accumulationCt; --i >= 0; j++) { _spatialAudioRingBuffer[j + offset] += spatialAudio[j]; @@ -734,7 +737,7 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB unsigned int sampleTime = _spatialAudioStart; // Accumulate direct transmission of audio from sender to receiver addSpatialAudioToBuffer(sampleTime, ringBuffer); - addSpatialAudioToBuffer(sampleTime + 48000, ringBuffer); + //addSpatialAudioToBuffer(sampleTime + 48000, ringBuffer); // Send audio off for spatial processing emit processSpatialAudio(sampleTime, QByteArray((char*)ringBuffer.getBuffer(), numNetworkOutputSamples), _desiredOutputFormat); @@ -744,6 +747,7 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); int samples = ringBuffer.samplesAvailable(); _spatialAudioStart += samples / _desiredOutputFormat.channelCount(); + ringBuffer.reset(); } else { diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 51a3aab8ae..052eb06bdd 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -78,7 +78,7 @@ public slots: void start(); void stop(); void addReceivedAudioToBuffer(const QByteArray& audioByteArray); - void addSpatialAudioToBuffer(unsigned int sampleTime, AudioRingBuffer& spatialAudio); + void addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuffer& spatialAudio); void handleAudioInput(); void reset(); void toggleMute(); diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index aea1583c68..e7d5c29e25 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -37,7 +37,7 @@ int getDelayFromDistance(float distance) { return MS_DELAY_PER_METER * distance; } -const float BOUNCE_ATTENUATION_FACTOR = 0.5f; +const float BOUNCE_ATTENUATION_FACTOR = 0.125f; float getDistanceAttenuationCoefficient(float distance) { const float DISTANCE_SCALE = 2.5f; @@ -99,6 +99,9 @@ void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& o } } +// set up our buffers for our attenuated and delayed samples +const int NUMBER_OF_CHANNELS = 2; + void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int bounces, const QByteArray& originalSamples, @@ -115,14 +118,20 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve BoxFace face; const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point - // set up our buffers for our attenuated and delayed samples - const int NUMBER_OF_CHANNELS = 2; - - int totalNumberOfSamples = originalSamples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); + int totalNumberOfSamples = originalSamples.size() / sizeof(int16_t); + int totalNumberOfStereoSamples = originalSamples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); const int16_t* originalSamplesData = (const int16_t*)originalSamples.constData(); - AudioRingBuffer attenuatedLeftSamples(totalNumberOfSamples); - AudioRingBuffer attenuatedRightSamples(totalNumberOfSamples); + QByteArray attenuatedLeftSamples; + QByteArray attenuatedRightSamples; + attenuatedLeftSamples.resize(originalSamples.size()); + attenuatedRightSamples.resize(originalSamples.size()); + + int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data(); + int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data(); + + AudioRingBuffer attenuatedLeftBuffer(totalNumberOfSamples); + AudioRingBuffer attenuatedRightBuffer(totalNumberOfSamples); for (int bounceNumber = 1; bounceNumber <= bounces; bounceNumber++) { @@ -138,26 +147,25 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve float leftEarDistance = glm::distance(end, leftEarPosition); int rightEarDelayMsecs = getDelayFromDistance(rightEarDistance); int leftEarDelayMsecs = getDelayFromDistance(leftEarDistance); - int rightEarDelay = rightEarDelayMsecs / MSECS_PER_SECOND * sampleRate; - int leftEarDelay = leftEarDelayMsecs / MSECS_PER_SECOND * sampleRate; + int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; - float rightEarAttenuation = getDistanceAttenuationCoefficient(rightEarDistance) * - (bounceNumber * BOUNCE_ATTENUATION_FACTOR); - float leftEarAttenuation = getDistanceAttenuationCoefficient(leftEarDistance) * - (bounceNumber * BOUNCE_ATTENUATION_FACTOR); + float rightEarAttenuation = getDistanceAttenuationCoefficient(rightEarDistance) * (bounceNumber * BOUNCE_ATTENUATION_FACTOR); + float leftEarAttenuation = getDistanceAttenuationCoefficient(leftEarDistance) * (bounceNumber * BOUNCE_ATTENUATION_FACTOR); + //qDebug() << "leftEarAttenuation=" << leftEarAttenuation << "rightEarAttenuation=" << rightEarAttenuation; // run through the samples, and attenuate them - for (int sample = 0; sample < totalNumberOfSamples; sample++) { + for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) { int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; int16_t rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; //qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample; - attenuatedLeftSamples[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; - attenuatedLeftSamples[sample * NUMBER_OF_CHANNELS + 1] = 0; + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; - attenuatedRightSamples[sample * NUMBER_OF_CHANNELS] = 0; - attenuatedRightSamples[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; //qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation); @@ -169,15 +177,47 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve unsigned int sampleTimeLeft = sampleTime + leftEarDelay; unsigned int sampleTimeRight = sampleTime + rightEarDelay; - qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight; + //qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight; + + attenuatedLeftBuffer.writeSamples(attenuatedLeftSamplesData, totalNumberOfSamples); + attenuatedRightBuffer.writeSamples(attenuatedRightSamplesData, totalNumberOfSamples); - _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples); - _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples); + _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftBuffer); + _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightBuffer); + attenuatedLeftBuffer.reset(); + attenuatedRightBuffer.reset(); } } } void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + + + //qDebug() << "AudioReflector::processSpatialAudio()...sampleTime=" << sampleTime << " threadID=" << QThread::currentThreadId(); + + /* + int totalNumberOfSamples = samples.size() / (sizeof(int16_t)); + int numFrameSamples = format.sampleRate() * format.channelCount(); + + qDebug() << " totalNumberOfSamples=" << totalNumberOfSamples; + qDebug() << " numFrameSamples=" << numFrameSamples; + qDebug() << " samples.size()=" << samples.size(); + qDebug() << " sizeof(int16_t)=" << sizeof(int16_t); + + + AudioRingBuffer samplesRingBuffer(totalNumberOfSamples); + qint64 bytesCopied = samplesRingBuffer.writeData(samples.constData(),samples.size()); + for(int i = 0; i < totalNumberOfSamples; i++) { + samplesRingBuffer[i] = samplesRingBuffer[i] * 0.25f; + } + + qDebug() << " bytesCopied=" << bytesCopied; + + _audio->addSpatialAudioToBuffer(sampleTime + 12000, samplesRingBuffer); + + return; + */ + quint64 start = usecTimestampNow(); glm::vec3 origin = _myAvatar->getHead()->getPosition(); diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index e3c82a1509..b0f6aab5ea 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -47,8 +47,8 @@ public: int parseData(const QByteArray& packet); // assume callers using this will never wrap around the end - const int16_t* getNextOutput() { return _nextOutput; } - const int16_t* getBuffer() { return _buffer; } + const int16_t* getNextOutput() const { return _nextOutput; } + const int16_t* getBuffer() const { return _buffer; } qint64 readSamples(int16_t* destination, qint64 maxSamples); qint64 writeSamples(const int16_t* source, qint64 maxSamples); From 1265e5b12d20f58b35cee087c1ba632e5a391c8c Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Thu, 3 Apr 2014 09:23:10 -0700 Subject: [PATCH 07/64] just echo --- interface/src/AudioReflector.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index e7d5c29e25..402b111c26 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -193,30 +193,29 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - //qDebug() << "AudioReflector::processSpatialAudio()...sampleTime=" << sampleTime << " threadID=" << QThread::currentThreadId(); + qDebug() << "AudioReflector::processSpatialAudio()...sampleTime=" << sampleTime << " threadID=" << QThread::currentThreadId(); - /* int totalNumberOfSamples = samples.size() / (sizeof(int16_t)); - int numFrameSamples = format.sampleRate() * format.channelCount(); qDebug() << " totalNumberOfSamples=" << totalNumberOfSamples; - qDebug() << " numFrameSamples=" << numFrameSamples; qDebug() << " samples.size()=" << samples.size(); qDebug() << " sizeof(int16_t)=" << sizeof(int16_t); AudioRingBuffer samplesRingBuffer(totalNumberOfSamples); qint64 bytesCopied = samplesRingBuffer.writeData(samples.constData(),samples.size()); + + /* for(int i = 0; i < totalNumberOfSamples; i++) { samplesRingBuffer[i] = samplesRingBuffer[i] * 0.25f; } + */ qDebug() << " bytesCopied=" << bytesCopied; _audio->addSpatialAudioToBuffer(sampleTime + 12000, samplesRingBuffer); return; - */ quint64 start = usecTimestampNow(); From 06adaa009cc6c679a3df5d513a71f8b8b30c75f8 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Thu, 3 Apr 2014 12:40:35 -0700 Subject: [PATCH 08/64] add first cut at simple low pass filter --- interface/src/Audio.cpp | 43 ++++++++++++++++++++++++++++++++++++++++- interface/src/Audio.h | 2 +- interface/src/Menu.cpp | 3 +++ interface/src/Menu.h | 1 + 4 files changed, 47 insertions(+), 2 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 374976c691..62eaacc7c8 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -468,7 +468,8 @@ void Audio::handleAudioInput() { } // Add tone injection if enabled - const float TONE_FREQ = 220.f / SAMPLE_RATE * TWO_PI; + //const float TONE_FREQ = 220.f / SAMPLE_RATE * TWO_PI; + const float TONE_FREQ = 440.f / SAMPLE_RATE * TWO_PI; const float QUARTER_VOLUME = 8192.f; if (_toneInjectionEnabled) { loudness = 0.f; @@ -760,11 +761,20 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB // copy the samples we'll resample from the ring buffer - this also // pushes the read pointer of the ring buffer forwards ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + + } // add the next numNetworkOutputSamples from each QByteArray // in our _localInjectionByteArrays QVector to the localInjectedSamples + + if (Menu::getInstance()->isOptionChecked(MenuOption::LowPassFilter)) { + int channels = _desiredOutputFormat.channelCount(); + int filterSamples = numNetworkOutputSamples / channels; + lowPassFilter(ringBufferSamples, filterSamples, channels); + } + // copy the packet from the RB to the output linearResampling(ringBufferSamples, (int16_t*) outputBuffer.data(), @@ -894,6 +904,37 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) { } } + +// simple 3 pole low pass filter +void Audio::lowPassFilter(int16_t* inputBuffer, int samples, int channels) { + + //qDebug() << "lowPassFilter() samples=" << samples << " channels=" << channels; + //const int POLE_COUNT = 3; + + for (int c = 0; c < channels; c++) { + const float C1 = 0.0f; // 0.25f; + const float C2 = 1.0f; // 0.5f; + const float C3 = 0.0f; // 0.25f; + int16_t S1,S2,S3; + S1 = inputBuffer[c]; // start with the Nth sample, based on the current channel, this is the fist sample for the channel + for (int i = 0; i < samples; i++) { + int sampleAt = (i * channels) + c; + int nextSampleAt = sampleAt + channels; + S2 = inputBuffer[sampleAt]; + if (i == samples - 1) { + S3 = inputBuffer[sampleAt]; + } else { + S3 = inputBuffer[nextSampleAt]; + } + // save our S1 for next time before we mod this + S1 = inputBuffer[sampleAt]; + inputBuffer[sampleAt] = (C1 * S1) + (C2 * S2) + (C3 * S3); + //qDebug() << "channel=" << c << " sampleAt=" << sampleAt; + } + } +} + + // Starts a collision sound. magnitude is 0-1, with 1 the loudest possible sound. void Audio::startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen) { _collisionSoundMagnitude = magnitude; diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 052eb06bdd..fe3a6cbb7c 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -55,7 +55,7 @@ public: void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; } int getJitterBufferSamples() { return _jitterBufferSamples; } - void lowPassFilter(int16_t* inputBuffer); + void lowPassFilter(int16_t* inputBuffer, int samples, int channels); virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen); virtual void startDrumSound(float volume, float frequency, float duration, float decay); diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 3dd4733c64..9bb63b4a34 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -371,6 +371,9 @@ Menu::Menu() : false, appInstance->getAudio(), SLOT(toggleAudioSpatialProcessing())); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, + Qt::CTRL | Qt::SHIFT | Qt::Key_F, + false); addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index b6feb2e2f5..99df84784a 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -270,6 +270,7 @@ namespace MenuOption { const QString Login = "Login"; const QString Logout = "Logout"; const QString LookAtVectors = "Look-at Vectors"; + const QString LowPassFilter = "Low Pass Filter"; const QString MetavoxelEditor = "Metavoxel Editor..."; const QString Chat = "Chat..."; const QString Metavoxels = "Metavoxels"; From 5872ec0569c19a6ce5f9d759588c3c25fb11f7e1 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Thu, 3 Apr 2014 16:19:33 -0700 Subject: [PATCH 09/64] checkpoint --- interface/src/Audio.cpp | 10 +- interface/src/AudioReflector.cpp | 288 ++++++++++++++++++++++--------- interface/src/AudioReflector.h | 5 +- 3 files changed, 216 insertions(+), 87 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 62eaacc7c8..39f8a7f567 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -468,8 +468,8 @@ void Audio::handleAudioInput() { } // Add tone injection if enabled - //const float TONE_FREQ = 220.f / SAMPLE_RATE * TWO_PI; - const float TONE_FREQ = 440.f / SAMPLE_RATE * TWO_PI; + const float TONE_FREQ = 220.f / SAMPLE_RATE * TWO_PI; + //const float TONE_FREQ = 5000.f / SAMPLE_RATE * TWO_PI; const float QUARTER_VOLUME = 8192.f; if (_toneInjectionEnabled) { loudness = 0.f; @@ -912,9 +912,9 @@ void Audio::lowPassFilter(int16_t* inputBuffer, int samples, int channels) { //const int POLE_COUNT = 3; for (int c = 0; c < channels; c++) { - const float C1 = 0.0f; // 0.25f; - const float C2 = 1.0f; // 0.5f; - const float C3 = 0.0f; // 0.25f; + const float C1 = 0.25f; // 0.0f; // + const float C2 = 0.5f; // 1.0f; // + const float C3 = 0.25f; // 0.0f; // int16_t S1,S2,S3; S1 = inputBuffer[c]; // start with the Nth sample, based on the current channel, this is the fist sample for the channel for (int i = 0; i < samples; i++) { diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 402b111c26..1db234c1ee 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -73,6 +73,42 @@ glm::vec3 getFaceNormal(BoxFace face) { return glm::vec3(0, 0, 0); //error case } +QVector AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces) { + QVector reflectionPoints; + glm::vec3 start = origin; + glm::vec3 direction = originalDirection; + OctreeElement* elementHit; + float distance; + BoxFace face; + const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point + + for (int i = 0; i < maxBounces; i++) { + if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + + reflectionPoints.push_back(end); + + glm::vec3 faceNormal = getFaceNormal(face); + direction = glm::normalize(glm::reflect(direction,faceNormal)); + start = end; + } + } + return reflectionPoints; +} + + +void AudioReflector::newDrawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections) { + + glm::vec3 start = origin; + glm::vec3 color = originalColor; + const float COLOR_ADJUST_PER_BOUNCE = 0.75f; + + foreach (glm::vec3 end, reflections) { + drawVector(start, end, color); + start = end; + color = color * COLOR_ADJUST_PER_BOUNCE; + } +} void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int bounces, const glm::vec3& originalColor) { @@ -98,12 +134,9 @@ void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& o } } } - // set up our buffers for our attenuated and delayed samples const int NUMBER_OF_CHANNELS = 2; - - -void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, +void AudioReflector::echoReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int bounces, const QByteArray& originalSamples, unsigned int sampleTime, int sampleRate) { @@ -191,73 +224,59 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve } void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + bool doNothing = true; + bool doSimpleEcho = false; - - qDebug() << "AudioReflector::processSpatialAudio()...sampleTime=" << sampleTime << " threadID=" << QThread::currentThreadId(); + if (doNothing) { + return; + } else if (doSimpleEcho) { + int totalNumberOfSamples = samples.size() / (sizeof(int16_t)); + AudioRingBuffer samplesRingBuffer(totalNumberOfSamples); + samplesRingBuffer.writeData(samples.constData(),samples.size()); + _audio->addSpatialAudioToBuffer(sampleTime + 12000, samplesRingBuffer); + return; + } else { + quint64 start = usecTimestampNow(); - int totalNumberOfSamples = samples.size() / (sizeof(int16_t)); + glm::vec3 origin = _myAvatar->getHead()->getPosition(); - qDebug() << " totalNumberOfSamples=" << totalNumberOfSamples; - qDebug() << " samples.size()=" << samples.size(); - qDebug() << " sizeof(int16_t)=" << sizeof(int16_t); - + glm::quat orientation = _myAvatar->getOrientation(); // _myAvatar->getHead()->getOrientation(); + glm::vec3 right = glm::normalize(orientation * IDENTITY_RIGHT); + glm::vec3 up = glm::normalize(orientation * IDENTITY_UP); + glm::vec3 front = glm::normalize(orientation * IDENTITY_FRONT); + glm::vec3 left = -right; + glm::vec3 down = -up; + glm::vec3 back = -front; + glm::vec3 frontRightUp = glm::normalize(front + right + up); + glm::vec3 frontLeftUp = glm::normalize(front + left + up); + glm::vec3 backRightUp = glm::normalize(back + right + up); + glm::vec3 backLeftUp = glm::normalize(back + left + up); + glm::vec3 frontRightDown = glm::normalize(front + right + down); + glm::vec3 frontLeftDown = glm::normalize(front + left + down); + glm::vec3 backRightDown = glm::normalize(back + right + down); + glm::vec3 backLeftDown = glm::normalize(back + left + down); - AudioRingBuffer samplesRingBuffer(totalNumberOfSamples); - qint64 bytesCopied = samplesRingBuffer.writeData(samples.constData(),samples.size()); - - /* - for(int i = 0; i < totalNumberOfSamples; i++) { - samplesRingBuffer[i] = samplesRingBuffer[i] * 0.25f; + const int BOUNCE_COUNT = 5; + + echoReflections(origin, frontRightUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, frontLeftUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backRightUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backLeftUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, frontRightDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, frontLeftDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backRightDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backLeftDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + + echoReflections(origin, front, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, back, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, left, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, right, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, up, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + echoReflections(origin, down, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + quint64 end = usecTimestampNow(); + + qDebug() << "AudioReflector::addSamples()... elapsed=" << (end - start); } - */ - - qDebug() << " bytesCopied=" << bytesCopied; - - _audio->addSpatialAudioToBuffer(sampleTime + 12000, samplesRingBuffer); - - return; - - quint64 start = usecTimestampNow(); - - glm::vec3 origin = _myAvatar->getHead()->getPosition(); - - glm::quat orientation = _myAvatar->getOrientation(); // _myAvatar->getHead()->getOrientation(); - glm::vec3 right = glm::normalize(orientation * IDENTITY_RIGHT); - glm::vec3 up = glm::normalize(orientation * IDENTITY_UP); - glm::vec3 front = glm::normalize(orientation * IDENTITY_FRONT); - glm::vec3 left = -right; - glm::vec3 down = -up; - glm::vec3 back = -front; - glm::vec3 frontRightUp = glm::normalize(front + right + up); - glm::vec3 frontLeftUp = glm::normalize(front + left + up); - glm::vec3 backRightUp = glm::normalize(back + right + up); - glm::vec3 backLeftUp = glm::normalize(back + left + up); - glm::vec3 frontRightDown = glm::normalize(front + right + down); - glm::vec3 frontLeftDown = glm::normalize(front + left + down); - glm::vec3 backRightDown = glm::normalize(back + right + down); - glm::vec3 backLeftDown = glm::normalize(back + left + down); - - const int BOUNCE_COUNT = 5; - - calculateReflections(origin, frontRightUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, frontLeftUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, backRightUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, backLeftUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, frontRightDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, frontLeftDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, backRightDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, backLeftDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - - calculateReflections(origin, front, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, back, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, left, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, right, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, up, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - calculateReflections(origin, down, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - quint64 end = usecTimestampNow(); - - //qDebug() << "AudioReflector::addSamples()... samples.size()=" << samples.size() << " elapsed=" << (end - start); - } void AudioReflector::drawRays() { @@ -299,21 +318,130 @@ void AudioReflector::drawRays() { const int BOUNCE_COUNT = 5; - drawReflections(origin, frontRightUp, BOUNCE_COUNT, RED); - drawReflections(origin, frontLeftUp, BOUNCE_COUNT, GREEN); - drawReflections(origin, backRightUp, BOUNCE_COUNT, BLUE); - drawReflections(origin, backLeftUp, BOUNCE_COUNT, CYAN); - drawReflections(origin, frontRightDown, BOUNCE_COUNT, PURPLE); - drawReflections(origin, frontLeftDown, BOUNCE_COUNT, YELLOW); - drawReflections(origin, backRightDown, BOUNCE_COUNT, WHITE); - drawReflections(origin, backLeftDown, BOUNCE_COUNT, DARK_RED); + bool oldWay = false; + + if (oldWay) { + drawReflections(origin, frontRightUp, BOUNCE_COUNT, RED); + drawReflections(origin, frontLeftUp, BOUNCE_COUNT, GREEN); + drawReflections(origin, backRightUp, BOUNCE_COUNT, BLUE); + drawReflections(origin, backLeftUp, BOUNCE_COUNT, CYAN); + drawReflections(origin, frontRightDown, BOUNCE_COUNT, PURPLE); + drawReflections(origin, frontLeftDown, BOUNCE_COUNT, YELLOW); + drawReflections(origin, backRightDown, BOUNCE_COUNT, WHITE); + drawReflections(origin, backLeftDown, BOUNCE_COUNT, DARK_RED); - drawReflections(origin, front, BOUNCE_COUNT, DARK_GREEN); - drawReflections(origin, back, BOUNCE_COUNT, DARK_BLUE); - drawReflections(origin, left, BOUNCE_COUNT, DARK_CYAN); - drawReflections(origin, right, BOUNCE_COUNT, DARK_PURPLE); - drawReflections(origin, up, BOUNCE_COUNT, DARK_YELLOW); - drawReflections(origin, down, BOUNCE_COUNT, GRAY); + drawReflections(origin, front, BOUNCE_COUNT, DARK_GREEN); + drawReflections(origin, back, BOUNCE_COUNT, DARK_BLUE); + drawReflections(origin, left, BOUNCE_COUNT, DARK_CYAN); + drawReflections(origin, right, BOUNCE_COUNT, DARK_PURPLE); + drawReflections(origin, up, BOUNCE_COUNT, DARK_YELLOW); + drawReflections(origin, down, BOUNCE_COUNT, GRAY); + } else { + QVector frontRightUpReflections = calculateReflections(origin, frontRightUp, BOUNCE_COUNT); + QVector frontLeftUpReflections = calculateReflections(origin, frontLeftUp, BOUNCE_COUNT); + QVector backRightUpReflections = calculateReflections(origin, backRightUp, BOUNCE_COUNT); + QVector backLeftUpReflections = calculateReflections(origin, backLeftUp, BOUNCE_COUNT); + QVector frontRightDownReflections = calculateReflections(origin, frontRightDown, BOUNCE_COUNT); + QVector frontLeftDownReflections = calculateReflections(origin, frontLeftDown, BOUNCE_COUNT); + QVector backRightDownReflections = calculateReflections(origin, backRightDown, BOUNCE_COUNT); + QVector backLeftDownReflections = calculateReflections(origin, backLeftDown, BOUNCE_COUNT); + QVector frontReflections = calculateReflections(origin, front, BOUNCE_COUNT); + QVector backReflections = calculateReflections(origin, back, BOUNCE_COUNT); + QVector leftReflections = calculateReflections(origin, left, BOUNCE_COUNT); + QVector rightReflections = calculateReflections(origin, right, BOUNCE_COUNT); + QVector upReflections = calculateReflections(origin, up, BOUNCE_COUNT); + QVector downReflections = calculateReflections(origin, down, BOUNCE_COUNT); + + glm::vec3 frontRightUpColor = RED; + glm::vec3 frontLeftUpColor = GREEN; + glm::vec3 backRightUpColor = BLUE; + glm::vec3 backLeftUpColor = CYAN; + glm::vec3 frontRightDownColor = PURPLE; + glm::vec3 frontLeftDownColor = YELLOW; + glm::vec3 backRightDownColor = WHITE; + glm::vec3 backLeftDownColor = DARK_RED; + glm::vec3 frontColor = GRAY; + glm::vec3 backColor = DARK_GREEN; + glm::vec3 leftColor = DARK_BLUE; + glm::vec3 rightColor = DARK_CYAN; + glm::vec3 upColor = DARK_PURPLE; + glm::vec3 downColor = DARK_YELLOW; + + // attempt to determine insidness/outsideness based on number of directional rays that reflect + bool inside = false; + + bool blockedUp = (frontRightUpReflections.size() > 0) && + (frontLeftUpReflections.size() > 0) && + (backRightUpReflections.size() > 0) && + (backLeftUpReflections.size() > 0) && + (upReflections.size() > 0); + + bool blockedDown = (frontRightDownReflections.size() > 0) && + (frontLeftDownReflections.size() > 0) && + (backRightDownReflections.size() > 0) && + (backLeftDownReflections.size() > 0) && + (downReflections.size() > 0); + + bool blockedFront = (frontRightUpReflections.size() > 0) && + (frontLeftUpReflections.size() > 0) && + (frontRightDownReflections.size() > 0) && + (frontLeftDownReflections.size() > 0) && + (frontReflections.size() > 0); + + bool blockedBack = (backRightUpReflections.size() > 0) && + (backLeftUpReflections.size() > 0) && + (backRightDownReflections.size() > 0) && + (backLeftDownReflections.size() > 0) && + (backReflections.size() > 0); + + bool blockedLeft = (frontLeftUpReflections.size() > 0) && + (backLeftUpReflections.size() > 0) && + (frontLeftDownReflections.size() > 0) && + (backLeftDownReflections.size() > 0) && + (leftReflections.size() > 0); + + bool blockedRight = (frontRightUpReflections.size() > 0) && + (backRightUpReflections.size() > 0) && + (frontRightDownReflections.size() > 0) && + (backRightDownReflections.size() > 0) && + (rightReflections.size() > 0); + + inside = blockedUp && blockedDown && blockedFront && blockedBack && blockedLeft && blockedRight; + + if (inside) { + frontRightUpColor = RED; + frontLeftUpColor = RED; + backRightUpColor = RED; + backLeftUpColor = RED; + frontRightDownColor = RED; + frontLeftDownColor = RED; + backRightDownColor = RED; + backLeftDownColor = RED; + frontColor = RED; + backColor = RED; + leftColor = RED; + rightColor = RED; + upColor = RED; + downColor = RED; + } + + newDrawReflections(origin, frontRightUpColor, frontRightUpReflections); + newDrawReflections(origin, frontLeftUpColor, frontLeftUpReflections); + newDrawReflections(origin, backRightUpColor, backRightUpReflections); + newDrawReflections(origin, backLeftUpColor, backLeftUpReflections); + newDrawReflections(origin, frontRightDownColor, frontRightDownReflections); + newDrawReflections(origin, frontLeftDownColor, frontLeftDownReflections); + newDrawReflections(origin, backRightDownColor, backRightDownReflections); + newDrawReflections(origin, backLeftDownColor, backLeftDownReflections); + + newDrawReflections(origin, frontColor, frontReflections); + newDrawReflections(origin, backColor, backReflections); + newDrawReflections(origin, leftColor, leftReflections); + newDrawReflections(origin, rightColor, rightReflections); + newDrawReflections(origin, upColor, upReflections); + newDrawReflections(origin, downColor, downReflections); + + } } void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) { diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 4767361c29..ea379f2790 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -38,11 +38,12 @@ private: void drawRays(); void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); void drawReflections(const glm::vec3& origin, const glm::vec3& direction, int bounces, const glm::vec3& color); - - void calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, + void echoReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int bounces, const QByteArray& samples, unsigned int sampleTime, int sampleRate); + QVector calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces); + void newDrawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections); }; From 836fda2c051d866b0d5cde905689875ff2d15b5a Mon Sep 17 00:00:00 2001 From: matsukaze Date: Fri, 4 Apr 2014 09:07:27 -0700 Subject: [PATCH 10/64] Clamp the audio summation. --- interface/src/Audio.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 930a88e222..374976c691 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include "Application.h" #include "Audio.h" @@ -624,9 +625,11 @@ unsigned int Audio::timeValToSampleTick(const quint64 time, int sampleRate) { void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuffer& spatialAudio) { - // Calculate the number of remaining samples available + // Calculate the number of remaining samples available, the source spatial audio buffer will get + // clipped if there are insufficient samples available in the accumulation buffer. unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable(); + // Locate where in the accumulation buffer the new samples need to go if (sampleTime >= _spatialAudioFinish) { if (_spatialAudioStart == _spatialAudioFinish) { @@ -636,12 +639,12 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuff if (sampleCt) { _spatialAudioRingBuffer.writeSamples(spatialAudio.getNextOutput(), sampleCt); } - _spatialAudioFinish = _spatialAudioStart + spatialAudio.samplesAvailable() / _desiredOutputFormat.channelCount(); + _spatialAudioFinish = _spatialAudioStart + sampleCt / _desiredOutputFormat.channelCount(); } else { - // Spatial audio ring buffer already has data, but there is no overlap with the new sample - // compute the appropriate time delay and pad with silence until the new start time + // Spatial audio ring buffer already has data, but there is no overlap with the new sample. + // Compute the appropriate time delay and pad with silence until the new start time. unsigned int delay = sampleTime - _spatialAudioFinish; unsigned int ct = delay * _desiredOutputFormat.channelCount(); unsigned int silentCt = (remaining < ct) ? remaining : ct; @@ -670,16 +673,18 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuff int j = 0; for (int i = accumulationCt; --i >= 0; j++) { - _spatialAudioRingBuffer[j + offset] += spatialAudio[j]; + int tmp = _spatialAudioRingBuffer[j + offset] + spatialAudio[j]; + _spatialAudioRingBuffer[j + offset] = + static_cast(glm::clamp(tmp, std::numeric_limits::min(), std::numeric_limits::max())); } - // Copy the remaining unoverlapped spatial audio to the accumulation buffer + // Copy the remaining unoverlapped spatial audio to the accumulation buffer, if any unsigned int sampleCt = spatialAudio.samplesAvailable() - accumulationCt; sampleCt = (remaining < sampleCt) ? remaining : sampleCt; if (sampleCt) { _spatialAudioRingBuffer.writeSamples(spatialAudio.getNextOutput() + accumulationCt, sampleCt); + _spatialAudioFinish += sampleCt / _desiredOutputFormat.channelCount(); } - _spatialAudioFinish += sampleCt / _desiredOutputFormat.channelCount(); } } From 33718684e6166f6a582351f1347b265ff3c728b0 Mon Sep 17 00:00:00 2001 From: matsukaze Date: Fri, 4 Apr 2014 16:09:25 -0700 Subject: [PATCH 11/64] More audio fixes. --- interface/src/Audio.cpp | 42 +++++++++++++++++++------------- interface/src/Audio.h | 2 +- interface/src/AudioReflector.cpp | 18 +++----------- 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 374976c691..4a501f74b7 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -623,9 +623,9 @@ unsigned int Audio::timeValToSampleTick(const quint64 time, int sampleRate) { return sample; } -void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuffer& spatialAudio) { +void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, QByteArray& spatialAudio, unsigned int numSamples) { - // Calculate the number of remaining samples available, the source spatial audio buffer will get + // Calculate the number of remaining samples available. The source spatial audio buffer will get // clipped if there are insufficient samples available in the accumulation buffer. unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable(); @@ -635,9 +635,9 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuff // Nothing in the spatial audio ring buffer yet // Just do a straight copy, clipping if necessary - unsigned int sampleCt = (remaining < spatialAudio.samplesAvailable()) ? remaining : spatialAudio.samplesAvailable(); + unsigned int sampleCt = (remaining < numSamples) ? remaining : numSamples; if (sampleCt) { - _spatialAudioRingBuffer.writeSamples(spatialAudio.getNextOutput(), sampleCt); + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCt); } _spatialAudioFinish = _spatialAudioStart + sampleCt / _desiredOutputFormat.channelCount(); @@ -654,35 +654,37 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuff // Recalculate the number of remaining samples remaining -= silentCt; - unsigned int sampleCt = (remaining < spatialAudio.samplesAvailable()) ? remaining : spatialAudio.samplesAvailable(); + unsigned int sampleCt = (remaining < numSamples) ? remaining : numSamples; // Copy the new spatial audio to the accumulation ring buffer if (sampleCt) { - _spatialAudioRingBuffer.writeSamples(spatialAudio.getNextOutput(), sampleCt); + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCt); } _spatialAudioFinish += (sampleCt + silentCt) / _desiredOutputFormat.channelCount(); } } else { - // There is overlap between the spatial audio buffer and the new sample, // acumulate the overlap + + // Calculate the offset from the buffer's current read position, which should be located at _spatialAudioStart unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount(); unsigned int accumulationCt = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount(); - accumulationCt = (accumulationCt < spatialAudio.samplesAvailable()) ? accumulationCt : spatialAudio.samplesAvailable(); + accumulationCt = (accumulationCt < numSamples) ? accumulationCt : numSamples; + int16_t* spatial = reinterpret_cast(spatialAudio.data()); int j = 0; for (int i = accumulationCt; --i >= 0; j++) { - int tmp = _spatialAudioRingBuffer[j + offset] + spatialAudio[j]; + int tmp = _spatialAudioRingBuffer[j + offset] + spatial[j]; _spatialAudioRingBuffer[j + offset] = static_cast(glm::clamp(tmp, std::numeric_limits::min(), std::numeric_limits::max())); } // Copy the remaining unoverlapped spatial audio to the accumulation buffer, if any - unsigned int sampleCt = spatialAudio.samplesAvailable() - accumulationCt; + unsigned int sampleCt = numSamples - accumulationCt; sampleCt = (remaining < sampleCt) ? remaining : sampleCt; if (sampleCt) { - _spatialAudioRingBuffer.writeSamples(spatialAudio.getNextOutput() + accumulationCt, sampleCt); + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data() + accumulationCt, sampleCt); _spatialAudioFinish += sampleCt / _desiredOutputFormat.channelCount(); } } @@ -737,23 +739,29 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB //qDebug() << "pushing " << numNetworkOutputSamples; ringBuffer.setIsStarved(false); - int16_t* ringBufferSamples= new int16_t[numNetworkOutputSamples]; + int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; if (_processSpatialAudio) { unsigned int sampleTime = _spatialAudioStart; + QByteArray buffer; + buffer.resize(numDeviceOutputSamples * sizeof(int16_t)); + + ringBuffer.readSamples((int16_t*)buffer.data(), numDeviceOutputSamples); // Accumulate direct transmission of audio from sender to receiver - addSpatialAudioToBuffer(sampleTime, ringBuffer); + addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); //addSpatialAudioToBuffer(sampleTime + 48000, ringBuffer); // Send audio off for spatial processing - emit processSpatialAudio(sampleTime, QByteArray((char*)ringBuffer.getBuffer(), numNetworkOutputSamples), _desiredOutputFormat); + emit processSpatialAudio(sampleTime, buffer, _desiredOutputFormat); // copy the samples we'll resample from the spatial audio ring buffer - this also // pushes the read pointer of the spatial audio ring buffer forwards _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); - int samples = ringBuffer.samplesAvailable(); - _spatialAudioStart += samples / _desiredOutputFormat.channelCount(); + + // Advance the start point for the next packet of audio to arrive + _spatialAudioStart += numNetworkOutputSamples; - ringBuffer.reset(); + // Advance the read position by the same amount + //ringBuffer.shiftReadPosition(numNetworkOutputSamples); } else { diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 052eb06bdd..80f2e93688 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -78,7 +78,7 @@ public slots: void start(); void stop(); void addReceivedAudioToBuffer(const QByteArray& audioByteArray); - void addSpatialAudioToBuffer(unsigned int sampleTime, const AudioRingBuffer& spatialAudio); + void addSpatialAudioToBuffer(unsigned int sampleTime, QByteArray& spatialAudio, unsigned int numSamples); void handleAudioInput(); void reset(); void toggleMute(); diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index e7d5c29e25..d9471ab796 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -130,10 +130,6 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data(); int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data(); - AudioRingBuffer attenuatedLeftBuffer(totalNumberOfSamples); - AudioRingBuffer attenuatedRightBuffer(totalNumberOfSamples); - - for (int bounceNumber = 1; bounceNumber <= bounces; bounceNumber++) { if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); @@ -179,13 +175,8 @@ void AudioReflector::calculateReflections(const glm::vec3& origin, const glm::ve //qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight; - attenuatedLeftBuffer.writeSamples(attenuatedLeftSamplesData, totalNumberOfSamples); - attenuatedRightBuffer.writeSamples(attenuatedRightSamplesData, totalNumberOfSamples); - - _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftBuffer); - _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightBuffer); - attenuatedLeftBuffer.reset(); - attenuatedRightBuffer.reset(); + _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); + _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); } } } @@ -195,7 +186,6 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArr //qDebug() << "AudioReflector::processSpatialAudio()...sampleTime=" << sampleTime << " threadID=" << QThread::currentThreadId(); - /* int totalNumberOfSamples = samples.size() / (sizeof(int16_t)); int numFrameSamples = format.sampleRate() * format.channelCount(); @@ -205,6 +195,7 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArr qDebug() << " sizeof(int16_t)=" << sizeof(int16_t); + /* AudioRingBuffer samplesRingBuffer(totalNumberOfSamples); qint64 bytesCopied = samplesRingBuffer.writeData(samples.constData(),samples.size()); for(int i = 0; i < totalNumberOfSamples; i++) { @@ -212,8 +203,7 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArr } qDebug() << " bytesCopied=" << bytesCopied; - - _audio->addSpatialAudioToBuffer(sampleTime + 12000, samplesRingBuffer); + _audio->addSpatialAudioToBuffer(sampleTime + 12000, samples, totalNumberOfSamples); return; */ From 9e157ff1c5bdd48013ae6e5f8c808b875b0f3019 Mon Sep 17 00:00:00 2001 From: matsukaze Date: Fri, 4 Apr 2014 16:17:49 -0700 Subject: [PATCH 12/64] More audio fixes. --- interface/src/Audio.cpp | 22 +++++++++++++++------- interface/src/Audio.h | 2 +- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 33fc6ec4d2..5efb120793 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -624,7 +624,7 @@ unsigned int Audio::timeValToSampleTick(const quint64 time, int sampleRate) { return sample; } -void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, QByteArray& spatialAudio, unsigned int numSamples) { +void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) { // Calculate the number of remaining samples available. The source spatial audio buffer will get // clipped if there are insufficient samples available in the accumulation buffer. @@ -673,10 +673,12 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, QByteArray& spatial unsigned int accumulationCt = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount(); accumulationCt = (accumulationCt < numSamples) ? accumulationCt : numSamples; - int16_t* spatial = reinterpret_cast(spatialAudio.data()); + const int16_t* spatial = reinterpret_cast(spatialAudio.data()); int j = 0; for (int i = accumulationCt; --i >= 0; j++) { - int tmp = _spatialAudioRingBuffer[j + offset] + spatial[j]; + int t1 = _spatialAudioRingBuffer[j + offset]; + int t2 = spatial[j]; + int tmp = t1 + t2; _spatialAudioRingBuffer[j + offset] = static_cast(glm::clamp(tmp, std::numeric_limits::min(), std::numeric_limits::max())); } @@ -686,6 +688,7 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, QByteArray& spatial sampleCt = (remaining < sampleCt) ? remaining : sampleCt; if (sampleCt) { _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data() + accumulationCt, sampleCt); + // Extend the finish time by the amount of unoverlapped samples _spatialAudioFinish += sampleCt / _desiredOutputFormat.channelCount(); } } @@ -744,12 +747,12 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB if (_processSpatialAudio) { unsigned int sampleTime = _spatialAudioStart; QByteArray buffer; - buffer.resize(numDeviceOutputSamples * sizeof(int16_t)); + buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); - ringBuffer.readSamples((int16_t*)buffer.data(), numDeviceOutputSamples); + ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); // Accumulate direct transmission of audio from sender to receiver addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); - //addSpatialAudioToBuffer(sampleTime + 48000, ringBuffer); + //addSpatialAudioToBuffer(sampleTime + 48000, buffer, numNetworkOutputSamples); // Send audio off for spatial processing emit processSpatialAudio(sampleTime, buffer, _desiredOutputFormat); @@ -759,7 +762,7 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); // Advance the start point for the next packet of audio to arrive - _spatialAudioStart += numNetworkOutputSamples; + _spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount(); // Advance the read position by the same amount //ringBuffer.shiftReadPosition(numNetworkOutputSamples); @@ -838,6 +841,11 @@ void Audio::toggleToneInjection() { void Audio::toggleAudioSpatialProcessing() { _processSpatialAudio = !_processSpatialAudio; + if (_processSpatialAudio) { + _spatialAudioStart = 0; + _spatialAudioFinish = 0; + _spatialAudioRingBuffer.reset(); + } } // Take a pointer to the acquired microphone input samples and add procedural sounds diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 7520d97c0b..11ad235289 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -78,7 +78,7 @@ public slots: void start(); void stop(); void addReceivedAudioToBuffer(const QByteArray& audioByteArray); - void addSpatialAudioToBuffer(unsigned int sampleTime, QByteArray& spatialAudio, unsigned int numSamples); + void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples); void handleAudioInput(); void reset(); void toggleMute(); From 23e5452a8984ad1c734ddfdabf38a3df6310d418 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Thu, 3 Apr 2014 16:31:28 -0700 Subject: [PATCH 13/64] next cut --- interface/src/AudioReflector.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 8587aa1c90..bb07a586ed 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -215,7 +215,7 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const glm::vec3& o } void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - bool doNothing = true; + bool doNothing = false; bool doSimpleEcho = false; if (doNothing) { @@ -264,7 +264,7 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArr echoReflections(origin, down, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); quint64 end = usecTimestampNow(); - qDebug() << "AudioReflector::addSamples()... elapsed=" << (end - start); + //qDebug() << "AudioReflector::addSamples()... elapsed=" << (end - start); } } From 98f0fe8619d3ba8f3e37dfbaedbc15b97bc4f0d8 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Thu, 3 Apr 2014 18:57:08 -0700 Subject: [PATCH 14/64] latest pass, improved distance calulations, added menu item to remove original source --- interface/src/Audio.cpp | 9 +- interface/src/Audio.h | 4 +- interface/src/AudioReflector.cpp | 394 ++++++++++++++++++------------- interface/src/AudioReflector.h | 8 +- interface/src/Menu.cpp | 5 + interface/src/Menu.h | 1 + 6 files changed, 243 insertions(+), 178 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index e76adc4178..708bb72cc7 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -611,7 +611,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { if (_audioOutput) { // Audio output must exist and be correctly set up if we're going to process received audio _ringBuffer.parseData(audioByteArray); - processReceivedAudio(_spatialAudioStart, _ringBuffer); + processReceivedAudio(_ringBuffer); } Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size()); @@ -711,7 +711,7 @@ void Audio::toggleAudioNoiseReduction() { _noiseGateEnabled = !_noiseGateEnabled; } -void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer) { +void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate()) * (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount()); @@ -751,8 +751,9 @@ void Audio::processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringB ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); // Accumulate direct transmission of audio from sender to receiver - addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); - //addSpatialAudioToBuffer(sampleTime + 48000, buffer, numNetworkOutputSamples); + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal)) { + addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); + } // Send audio off for spatial processing emit processSpatialAudio(sampleTime, buffer, _desiredOutputFormat); diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 11ad235289..c3417ae891 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -74,6 +74,8 @@ public: int getNetworkSampleRate() { return SAMPLE_RATE; } int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; } + bool getProcessSpatialAudio() const { return _processSpatialAudio; } + public slots: void start(); void stop(); @@ -186,7 +188,7 @@ private: void addProceduralSounds(int16_t* monoInput, int numSamples); // Process received audio - void processReceivedAudio(unsigned int sampleTime, AudioRingBuffer& ringBuffer); + void processReceivedAudio(AudioRingBuffer& ringBuffer); bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo); bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo); diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index bb07a586ed..177d0ecb93 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -13,17 +13,9 @@ void AudioReflector::render() { return; // exit early if not set up correctly } - - /* - glm::vec3 position = _myAvatar->getHead()->getPosition(); - const float radius = 0.25f; - glPushMatrix(); - glTranslatef(position.x, position.y, position.z); - glutWireSphere(radius, 15, 15); - glPopMatrix(); - */ - - drawRays(); + if (_audio->getProcessSpatialAudio()) { + drawRays(); + } } @@ -39,6 +31,13 @@ int getDelayFromDistance(float distance) { const float BOUNCE_ATTENUATION_FACTOR = 0.125f; +// each bounce we adjust our attenuation by this factor, the result is an asymptotically decreasing attenuation... +// 0.125, 0.25, 0.5, ... +const float PER_BOUNCE_ATTENUATION_ADJUSTMENT = 2.0f; + +// we don't grow larger than this, which means by the 4th bounce we don't get that much less quiet +const float MAX_BOUNCE_ATTENUATION = 0.9f; + float getDistanceAttenuationCoefficient(float distance) { const float DISTANCE_SCALE = 2.5f; const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; @@ -97,7 +96,7 @@ QVector AudioReflector::calculateReflections(const glm::vec3& origin, } -void AudioReflector::newDrawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections) { +void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections) { glm::vec3 start = origin; glm::vec3 color = originalColor; @@ -110,33 +109,94 @@ void AudioReflector::newDrawReflections(const glm::vec3& origin, const glm::vec3 } } -void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& originalDirection, - int bounces, const glm::vec3& originalColor) { - - glm::vec3 start = origin; - glm::vec3 direction = originalDirection; - glm::vec3 color = originalColor; - OctreeElement* elementHit; - float distance; - BoxFace face; - const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point - const float COLOR_ADJUST_PER_BOUNCE = 0.75f; - - for (int i = 0; i < bounces; i++) { - if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { - glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); - drawVector(start, end, color); - - glm::vec3 faceNormal = getFaceNormal(face); - direction = glm::normalize(glm::reflect(direction,faceNormal)); - start = end; - color = color * COLOR_ADJUST_PER_BOUNCE; - } - } -} // set up our buffers for our attenuated and delayed samples const int NUMBER_OF_CHANNELS = 2; -void AudioReflector::echoReflections(const glm::vec3& origin, const glm::vec3& originalDirection, + + +void AudioReflector::echoReflections(const glm::vec3& origin, const QVector& reflections, const QByteArray& samples, + unsigned int sampleTime, int sampleRate) { + + glm::vec3 rightEarPosition = _myAvatar->getHead()->getRightEarPosition(); + glm::vec3 leftEarPosition = _myAvatar->getHead()->getLeftEarPosition(); + glm::vec3 start = origin; + + int totalNumberOfSamples = samples.size() / sizeof(int16_t); + int totalNumberOfStereoSamples = samples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); + + const int16_t* originalSamplesData = (const int16_t*)samples.constData(); + QByteArray attenuatedLeftSamples; + QByteArray attenuatedRightSamples; + attenuatedLeftSamples.resize(samples.size()); + attenuatedRightSamples.resize(samples.size()); + + int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data(); + int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data(); + + float rightDistance = 0; + float leftDistance = 0; + float bounceAttenuation = BOUNCE_ATTENUATION_FACTOR; + + foreach (glm::vec3 end, reflections) { + + rightDistance += glm::distance(start, end); + leftDistance += glm::distance(start, end); + + // calculate the distance to the ears + float rightEarDistance = glm::distance(end, rightEarPosition); + float leftEarDistance = glm::distance(end, leftEarPosition); + + float rightTotalDistance = rightEarDistance + rightDistance; + float leftTotalDistance = leftEarDistance + leftDistance; + + int rightEarDelayMsecs = getDelayFromDistance(rightTotalDistance); + int leftEarDelayMsecs = getDelayFromDistance(leftTotalDistance); + int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + + //qDebug() << "leftTotalDistance=" << leftTotalDistance << "rightTotalDistance=" << rightTotalDistance; + //qDebug() << "leftEarDelay=" << leftEarDelay << "rightEarDelay=" << rightEarDelay; + + float rightEarAttenuation = getDistanceAttenuationCoefficient(rightTotalDistance) * bounceAttenuation; + float leftEarAttenuation = getDistanceAttenuationCoefficient(leftTotalDistance) * bounceAttenuation; + + //qDebug() << "leftEarAttenuation=" << leftEarAttenuation << "rightEarAttenuation=" << rightEarAttenuation; + + + bounceAttenuation = std::min(MAX_BOUNCE_ATTENUATION, bounceAttenuation * PER_BOUNCE_ATTENUATION_ADJUSTMENT); + + // run through the samples, and attenuate them + for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) { + int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; + int16_t rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; + + //qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample; + + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; + + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; + + //qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation); + } + + // now inject the attenuated array with the appropriate delay + + unsigned int sampleTimeLeft = sampleTime + leftEarDelay; + unsigned int sampleTimeRight = sampleTime + rightEarDelay; + + //qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight; + + _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); + _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); + + + start = end; + } +} + + +void AudioReflector::oldEchoReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int bounces, const QByteArray& originalSamples, unsigned int sampleTime, int sampleRate) { @@ -225,7 +285,7 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArr _audio->addSpatialAudioToBuffer(sampleTime + 12000, samples, totalNumberOfSamples); return; } else { - quint64 start = usecTimestampNow(); + //quint64 start = usecTimestampNow(); glm::vec3 origin = _myAvatar->getHead()->getPosition(); @@ -247,22 +307,37 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArr const int BOUNCE_COUNT = 5; - echoReflections(origin, frontRightUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, frontLeftUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, backRightUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, backLeftUp, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, frontRightDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, frontLeftDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, backRightDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, backLeftDown, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); + QVector frontRightUpReflections = calculateReflections(origin, frontRightUp, BOUNCE_COUNT); + QVector frontLeftUpReflections = calculateReflections(origin, frontLeftUp, BOUNCE_COUNT); + QVector backRightUpReflections = calculateReflections(origin, backRightUp, BOUNCE_COUNT); + QVector backLeftUpReflections = calculateReflections(origin, backLeftUp, BOUNCE_COUNT); + QVector frontRightDownReflections = calculateReflections(origin, frontRightDown, BOUNCE_COUNT); + QVector frontLeftDownReflections = calculateReflections(origin, frontLeftDown, BOUNCE_COUNT); + QVector backRightDownReflections = calculateReflections(origin, backRightDown, BOUNCE_COUNT); + QVector backLeftDownReflections = calculateReflections(origin, backLeftDown, BOUNCE_COUNT); + QVector frontReflections = calculateReflections(origin, front, BOUNCE_COUNT); + QVector backReflections = calculateReflections(origin, back, BOUNCE_COUNT); + QVector leftReflections = calculateReflections(origin, left, BOUNCE_COUNT); + QVector rightReflections = calculateReflections(origin, right, BOUNCE_COUNT); + QVector upReflections = calculateReflections(origin, up, BOUNCE_COUNT); + QVector downReflections = calculateReflections(origin, down, BOUNCE_COUNT); - echoReflections(origin, front, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, back, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, left, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, right, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, up, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - echoReflections(origin, down, BOUNCE_COUNT, samples, sampleTime, format.sampleRate()); - quint64 end = usecTimestampNow(); + echoReflections(origin, frontRightUpReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, frontLeftUpReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backRightUpReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backLeftUpReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, frontRightDownReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, frontLeftDownReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backRightDownReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backLeftDownReflections, samples, sampleTime, format.sampleRate()); + + echoReflections(origin, frontReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, backReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, leftReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, rightReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, upReflections, samples, sampleTime, format.sampleRate()); + echoReflections(origin, downReflections, samples, sampleTime, format.sampleRate()); + //quint64 end = usecTimestampNow(); //qDebug() << "AudioReflector::addSamples()... elapsed=" << (end - start); } @@ -307,130 +382,109 @@ void AudioReflector::drawRays() { const int BOUNCE_COUNT = 5; - bool oldWay = false; + QVector frontRightUpReflections = calculateReflections(origin, frontRightUp, BOUNCE_COUNT); + QVector frontLeftUpReflections = calculateReflections(origin, frontLeftUp, BOUNCE_COUNT); + QVector backRightUpReflections = calculateReflections(origin, backRightUp, BOUNCE_COUNT); + QVector backLeftUpReflections = calculateReflections(origin, backLeftUp, BOUNCE_COUNT); + QVector frontRightDownReflections = calculateReflections(origin, frontRightDown, BOUNCE_COUNT); + QVector frontLeftDownReflections = calculateReflections(origin, frontLeftDown, BOUNCE_COUNT); + QVector backRightDownReflections = calculateReflections(origin, backRightDown, BOUNCE_COUNT); + QVector backLeftDownReflections = calculateReflections(origin, backLeftDown, BOUNCE_COUNT); + QVector frontReflections = calculateReflections(origin, front, BOUNCE_COUNT); + QVector backReflections = calculateReflections(origin, back, BOUNCE_COUNT); + QVector leftReflections = calculateReflections(origin, left, BOUNCE_COUNT); + QVector rightReflections = calculateReflections(origin, right, BOUNCE_COUNT); + QVector upReflections = calculateReflections(origin, up, BOUNCE_COUNT); + QVector downReflections = calculateReflections(origin, down, BOUNCE_COUNT); + + glm::vec3 frontRightUpColor = RED; + glm::vec3 frontLeftUpColor = GREEN; + glm::vec3 backRightUpColor = BLUE; + glm::vec3 backLeftUpColor = CYAN; + glm::vec3 frontRightDownColor = PURPLE; + glm::vec3 frontLeftDownColor = YELLOW; + glm::vec3 backRightDownColor = WHITE; + glm::vec3 backLeftDownColor = DARK_RED; + glm::vec3 frontColor = GRAY; + glm::vec3 backColor = DARK_GREEN; + glm::vec3 leftColor = DARK_BLUE; + glm::vec3 rightColor = DARK_CYAN; + glm::vec3 upColor = DARK_PURPLE; + glm::vec3 downColor = DARK_YELLOW; - if (oldWay) { - drawReflections(origin, frontRightUp, BOUNCE_COUNT, RED); - drawReflections(origin, frontLeftUp, BOUNCE_COUNT, GREEN); - drawReflections(origin, backRightUp, BOUNCE_COUNT, BLUE); - drawReflections(origin, backLeftUp, BOUNCE_COUNT, CYAN); - drawReflections(origin, frontRightDown, BOUNCE_COUNT, PURPLE); - drawReflections(origin, frontLeftDown, BOUNCE_COUNT, YELLOW); - drawReflections(origin, backRightDown, BOUNCE_COUNT, WHITE); - drawReflections(origin, backLeftDown, BOUNCE_COUNT, DARK_RED); + // attempt to determine insidness/outsideness based on number of directional rays that reflect + bool inside = false; + + bool blockedUp = (frontRightUpReflections.size() > 0) && + (frontLeftUpReflections.size() > 0) && + (backRightUpReflections.size() > 0) && + (backLeftUpReflections.size() > 0) && + (upReflections.size() > 0); - drawReflections(origin, front, BOUNCE_COUNT, DARK_GREEN); - drawReflections(origin, back, BOUNCE_COUNT, DARK_BLUE); - drawReflections(origin, left, BOUNCE_COUNT, DARK_CYAN); - drawReflections(origin, right, BOUNCE_COUNT, DARK_PURPLE); - drawReflections(origin, up, BOUNCE_COUNT, DARK_YELLOW); - drawReflections(origin, down, BOUNCE_COUNT, GRAY); - } else { - QVector frontRightUpReflections = calculateReflections(origin, frontRightUp, BOUNCE_COUNT); - QVector frontLeftUpReflections = calculateReflections(origin, frontLeftUp, BOUNCE_COUNT); - QVector backRightUpReflections = calculateReflections(origin, backRightUp, BOUNCE_COUNT); - QVector backLeftUpReflections = calculateReflections(origin, backLeftUp, BOUNCE_COUNT); - QVector frontRightDownReflections = calculateReflections(origin, frontRightDown, BOUNCE_COUNT); - QVector frontLeftDownReflections = calculateReflections(origin, frontLeftDown, BOUNCE_COUNT); - QVector backRightDownReflections = calculateReflections(origin, backRightDown, BOUNCE_COUNT); - QVector backLeftDownReflections = calculateReflections(origin, backLeftDown, BOUNCE_COUNT); - QVector frontReflections = calculateReflections(origin, front, BOUNCE_COUNT); - QVector backReflections = calculateReflections(origin, back, BOUNCE_COUNT); - QVector leftReflections = calculateReflections(origin, left, BOUNCE_COUNT); - QVector rightReflections = calculateReflections(origin, right, BOUNCE_COUNT); - QVector upReflections = calculateReflections(origin, up, BOUNCE_COUNT); - QVector downReflections = calculateReflections(origin, down, BOUNCE_COUNT); + bool blockedDown = (frontRightDownReflections.size() > 0) && + (frontLeftDownReflections.size() > 0) && + (backRightDownReflections.size() > 0) && + (backLeftDownReflections.size() > 0) && + (downReflections.size() > 0); - glm::vec3 frontRightUpColor = RED; - glm::vec3 frontLeftUpColor = GREEN; - glm::vec3 backRightUpColor = BLUE; - glm::vec3 backLeftUpColor = CYAN; - glm::vec3 frontRightDownColor = PURPLE; - glm::vec3 frontLeftDownColor = YELLOW; - glm::vec3 backRightDownColor = WHITE; - glm::vec3 backLeftDownColor = DARK_RED; - glm::vec3 frontColor = GRAY; - glm::vec3 backColor = DARK_GREEN; - glm::vec3 leftColor = DARK_BLUE; - glm::vec3 rightColor = DARK_CYAN; - glm::vec3 upColor = DARK_PURPLE; - glm::vec3 downColor = DARK_YELLOW; - - // attempt to determine insidness/outsideness based on number of directional rays that reflect - bool inside = false; - - bool blockedUp = (frontRightUpReflections.size() > 0) && - (frontLeftUpReflections.size() > 0) && - (backRightUpReflections.size() > 0) && - (backLeftUpReflections.size() > 0) && - (upReflections.size() > 0); + bool blockedFront = (frontRightUpReflections.size() > 0) && + (frontLeftUpReflections.size() > 0) && + (frontRightDownReflections.size() > 0) && + (frontLeftDownReflections.size() > 0) && + (frontReflections.size() > 0); - bool blockedDown = (frontRightDownReflections.size() > 0) && - (frontLeftDownReflections.size() > 0) && - (backRightDownReflections.size() > 0) && - (backLeftDownReflections.size() > 0) && - (downReflections.size() > 0); + bool blockedBack = (backRightUpReflections.size() > 0) && + (backLeftUpReflections.size() > 0) && + (backRightDownReflections.size() > 0) && + (backLeftDownReflections.size() > 0) && + (backReflections.size() > 0); + + bool blockedLeft = (frontLeftUpReflections.size() > 0) && + (backLeftUpReflections.size() > 0) && + (frontLeftDownReflections.size() > 0) && + (backLeftDownReflections.size() > 0) && + (leftReflections.size() > 0); - bool blockedFront = (frontRightUpReflections.size() > 0) && - (frontLeftUpReflections.size() > 0) && - (frontRightDownReflections.size() > 0) && - (frontLeftDownReflections.size() > 0) && - (frontReflections.size() > 0); - - bool blockedBack = (backRightUpReflections.size() > 0) && - (backLeftUpReflections.size() > 0) && - (backRightDownReflections.size() > 0) && - (backLeftDownReflections.size() > 0) && - (backReflections.size() > 0); - - bool blockedLeft = (frontLeftUpReflections.size() > 0) && - (backLeftUpReflections.size() > 0) && - (frontLeftDownReflections.size() > 0) && - (backLeftDownReflections.size() > 0) && - (leftReflections.size() > 0); - - bool blockedRight = (frontRightUpReflections.size() > 0) && - (backRightUpReflections.size() > 0) && - (frontRightDownReflections.size() > 0) && - (backRightDownReflections.size() > 0) && - (rightReflections.size() > 0); - - inside = blockedUp && blockedDown && blockedFront && blockedBack && blockedLeft && blockedRight; - - if (inside) { - frontRightUpColor = RED; - frontLeftUpColor = RED; - backRightUpColor = RED; - backLeftUpColor = RED; - frontRightDownColor = RED; - frontLeftDownColor = RED; - backRightDownColor = RED; - backLeftDownColor = RED; - frontColor = RED; - backColor = RED; - leftColor = RED; - rightColor = RED; - upColor = RED; - downColor = RED; - } - - newDrawReflections(origin, frontRightUpColor, frontRightUpReflections); - newDrawReflections(origin, frontLeftUpColor, frontLeftUpReflections); - newDrawReflections(origin, backRightUpColor, backRightUpReflections); - newDrawReflections(origin, backLeftUpColor, backLeftUpReflections); - newDrawReflections(origin, frontRightDownColor, frontRightDownReflections); - newDrawReflections(origin, frontLeftDownColor, frontLeftDownReflections); - newDrawReflections(origin, backRightDownColor, backRightDownReflections); - newDrawReflections(origin, backLeftDownColor, backLeftDownReflections); - - newDrawReflections(origin, frontColor, frontReflections); - newDrawReflections(origin, backColor, backReflections); - newDrawReflections(origin, leftColor, leftReflections); - newDrawReflections(origin, rightColor, rightReflections); - newDrawReflections(origin, upColor, upReflections); - newDrawReflections(origin, downColor, downReflections); + bool blockedRight = (frontRightUpReflections.size() > 0) && + (backRightUpReflections.size() > 0) && + (frontRightDownReflections.size() > 0) && + (backRightDownReflections.size() > 0) && + (rightReflections.size() > 0); + inside = blockedUp && blockedDown && blockedFront && blockedBack && blockedLeft && blockedRight; + + if (inside) { + frontRightUpColor = RED; + frontLeftUpColor = RED; + backRightUpColor = RED; + backLeftUpColor = RED; + frontRightDownColor = RED; + frontLeftDownColor = RED; + backRightDownColor = RED; + backLeftDownColor = RED; + frontColor = RED; + backColor = RED; + leftColor = RED; + rightColor = RED; + upColor = RED; + downColor = RED; } + + drawReflections(origin, frontRightUpColor, frontRightUpReflections); + drawReflections(origin, frontLeftUpColor, frontLeftUpReflections); + drawReflections(origin, backRightUpColor, backRightUpReflections); + drawReflections(origin, backLeftUpColor, backLeftUpReflections); + drawReflections(origin, frontRightDownColor, frontRightDownReflections); + drawReflections(origin, frontLeftDownColor, frontLeftDownReflections); + drawReflections(origin, backRightDownColor, backRightDownReflections); + drawReflections(origin, backLeftDownColor, backLeftDownReflections); + + drawReflections(origin, frontColor, frontReflections); + drawReflections(origin, backColor, backReflections); + drawReflections(origin, leftColor, leftReflections); + drawReflections(origin, rightColor, rightReflections); + drawReflections(origin, upColor, upReflections); + drawReflections(origin, downColor, downReflections); } void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) { diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index ea379f2790..d15927dddc 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -37,13 +37,15 @@ private: void drawRays(); void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); - void drawReflections(const glm::vec3& origin, const glm::vec3& direction, int bounces, const glm::vec3& color); - void echoReflections(const glm::vec3& origin, const glm::vec3& originalDirection, + void oldEchoReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int bounces, const QByteArray& samples, unsigned int sampleTime, int sampleRate); + void echoReflections(const glm::vec3& origin, const QVector& reflections, const QByteArray& samples, + unsigned int sampleTime, int sampleRate); + QVector calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces); - void newDrawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections); + void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections); }; diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 9bb63b4a34..d000b9cd6a 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -371,6 +371,11 @@ Menu::Menu() : false, appInstance->getAudio(), SLOT(toggleAudioSpatialProcessing())); + + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingIncudeOriginal, + Qt::CTRL | Qt::SHIFT | Qt::Key_O, + true); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, Qt::CTRL | Qt::SHIFT | Qt::Key_F, false); diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 99df84784a..3d3961edaa 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -244,6 +244,7 @@ namespace MenuOption { const QString AudioNoiseReduction = "Audio Noise Reduction"; const QString AudioToneInjection = "Inject Test Tone"; const QString AudioSpatialProcessing = "Audio Spatial Processing"; + const QString AudioSpatialProcessingIncudeOriginal = "Audio Spatial Processing includes Original"; const QString EchoServerAudio = "Echo Server Audio"; const QString EchoLocalAudio = "Echo Local Audio"; const QString MuteAudio = "Mute Microphone"; From f45f30234f1ab669055af35af458f3f29cd12bc1 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Thu, 3 Apr 2014 23:03:06 -0700 Subject: [PATCH 15/64] more tweaks to reflections --- interface/src/AudioReflector.cpp | 417 +++++++++++++++---------------- interface/src/AudioReflector.h | 51 +++- 2 files changed, 243 insertions(+), 225 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 177d0ecb93..1562055be7 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -6,8 +6,17 @@ // Copyright (c) 2014 High Fidelity, Inc. All rights reserved. // +#include + #include "AudioReflector.h" +AudioReflector::AudioReflector(QObject* parent) : + QObject(parent) +{ + reset(); +} + + void AudioReflector::render() { if (!_myAvatar) { return; // exit early if not set up correctly @@ -36,7 +45,7 @@ const float BOUNCE_ATTENUATION_FACTOR = 0.125f; const float PER_BOUNCE_ATTENUATION_ADJUSTMENT = 2.0f; // we don't grow larger than this, which means by the 4th bounce we don't get that much less quiet -const float MAX_BOUNCE_ATTENUATION = 0.9f; +const float MAX_BOUNCE_ATTENUATION = 0.99f; float getDistanceAttenuationCoefficient(float distance) { const float DISTANCE_SCALE = 2.5f; @@ -72,6 +81,97 @@ glm::vec3 getFaceNormal(BoxFace face) { return glm::vec3(0, 0, 0); //error case } +void AudioReflector::reset() { + _reflections = 0; + _averageAttenuation = 0.0f; + _maxAttenuation = 0.0f; + _minAttenuation = 0.0f; + _averageDelay = 0; + _maxDelay = 0; + _minDelay = 0; + + _reflections = _frontRightUpReflections.size() + + _frontLeftUpReflections.size() + + _backRightUpReflections.size() + + _backLeftUpReflections.size() + + _frontRightDownReflections.size() + + _frontLeftDownReflections.size() + + _backRightDownReflections.size() + + _backLeftDownReflections.size() + + _frontReflections.size() + + _backReflections.size() + + _leftReflections.size() + + _rightReflections.size() + + _upReflections.size() + + _downReflections.size(); + +} + +void AudioReflector::calculateAllReflections() { + + // only recalculate when we've moved... + // TODO: what about case where new voxels are added in front of us??? + if (_myAvatar->getHead()->getPosition() != _origin) { + QMutexLocker locker(&_mutex); + + qDebug() << "origin has changed..."; + qDebug(" _myAvatar->getHead()->getPosition()=%f,%f,%f", + _myAvatar->getHead()->getPosition().x, + _myAvatar->getHead()->getPosition().y, + _myAvatar->getHead()->getPosition().z); + + qDebug(" _origin=%f,%f,%f", + _origin.x, + _origin.y, + _origin.z); + + + quint64 start = usecTimestampNow(); + + _origin = _myAvatar->getHead()->getPosition(); + + glm::quat orientation = _myAvatar->getOrientation(); // _myAvatar->getHead()->getOrientation(); + glm::vec3 right = glm::normalize(orientation * IDENTITY_RIGHT); + glm::vec3 up = glm::normalize(orientation * IDENTITY_UP); + glm::vec3 front = glm::normalize(orientation * IDENTITY_FRONT); + glm::vec3 left = -right; + glm::vec3 down = -up; + glm::vec3 back = -front; + glm::vec3 frontRightUp = glm::normalize(front + right + up); + glm::vec3 frontLeftUp = glm::normalize(front + left + up); + glm::vec3 backRightUp = glm::normalize(back + right + up); + glm::vec3 backLeftUp = glm::normalize(back + left + up); + glm::vec3 frontRightDown = glm::normalize(front + right + down); + glm::vec3 frontLeftDown = glm::normalize(front + left + down); + glm::vec3 backRightDown = glm::normalize(back + right + down); + glm::vec3 backLeftDown = glm::normalize(back + left + down); + + const int BOUNCE_COUNT = 5; + + _frontRightUpReflections = calculateReflections(_origin, frontRightUp, BOUNCE_COUNT); + _frontLeftUpReflections = calculateReflections(_origin, frontLeftUp, BOUNCE_COUNT); + _backRightUpReflections = calculateReflections(_origin, backRightUp, BOUNCE_COUNT); + _backLeftUpReflections = calculateReflections(_origin, backLeftUp, BOUNCE_COUNT); + _frontRightDownReflections = calculateReflections(_origin, frontRightDown, BOUNCE_COUNT); + _frontLeftDownReflections = calculateReflections(_origin, frontLeftDown, BOUNCE_COUNT); + _backRightDownReflections = calculateReflections(_origin, backRightDown, BOUNCE_COUNT); + _backLeftDownReflections = calculateReflections(_origin, backLeftDown, BOUNCE_COUNT); + _frontReflections = calculateReflections(_origin, front, BOUNCE_COUNT); + _backReflections = calculateReflections(_origin, back, BOUNCE_COUNT); + _leftReflections = calculateReflections(_origin, left, BOUNCE_COUNT); + _rightReflections = calculateReflections(_origin, right, BOUNCE_COUNT); + _upReflections = calculateReflections(_origin, up, BOUNCE_COUNT); + _downReflections = calculateReflections(_origin, down, BOUNCE_COUNT); + + quint64 end = usecTimestampNow(); + + reset(); + + qDebug() << "Reflections recalculated in " << (end - start) << "usecs"; + + } +} + QVector AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces) { QVector reflectionPoints; glm::vec3 start = origin; @@ -150,6 +250,14 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector + #include #include "Audio.h" @@ -17,7 +19,7 @@ class AudioReflector : public QObject { Q_OBJECT public: - AudioReflector(QObject* parent = 0) : QObject(parent) { }; + AudioReflector(QObject* parent = NULL); void setVoxels(VoxelTree* voxels) { _voxels = voxels; } void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; } @@ -25,6 +27,14 @@ public: void render(); + int getReflections() const { return _reflections; } + int getAverageDelayMsecs() const { return _averageDelay; } + float getAverageAttenuation() const { return _averageAttenuation; } + int getMaxDelayMsecs() const { return _maxDelay; } + float getMaxAttenuation() const { return _maxAttenuation; } + int getMinDelayMsecs() const { return _minDelay; } + float getMinAttenuation() const { return _minAttenuation; } + public slots: void processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); @@ -37,15 +47,48 @@ private: void drawRays(); void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); - void oldEchoReflections(const glm::vec3& origin, const glm::vec3& originalDirection, - int bounces, const QByteArray& samples, - unsigned int sampleTime, int sampleRate); void echoReflections(const glm::vec3& origin, const QVector& reflections, const QByteArray& samples, unsigned int sampleTime, int sampleRate); QVector calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces); void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections); + + void calculateAllReflections(); + void reset(); + + int _reflections; + + int _delayCount; + int _totalDelay; + int _averageDelay; + int _maxDelay; + int _minDelay; + + int _attenuationCount; + float _totalAttenuation; + float _averageAttenuation; + float _maxAttenuation; + float _minAttenuation; + + glm::vec3 _origin; + QVector _frontRightUpReflections; + QVector _frontLeftUpReflections; + QVector _backRightUpReflections; + QVector _backLeftUpReflections; + QVector _frontRightDownReflections; + QVector _frontLeftDownReflections; + QVector _backRightDownReflections; + QVector _backLeftDownReflections; + QVector _frontReflections; + QVector _backReflections; + QVector _leftReflections; + QVector _rightReflections; + QVector _upReflections; + QVector _downReflections; + + QMutex _mutex; + }; From 4c0f83913bc08c5c481d26b04906ae3a682837e3 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Fri, 4 Apr 2014 07:04:02 -0700 Subject: [PATCH 16/64] tweaks to bounce --- interface/src/AudioReflector.cpp | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 1562055be7..f61f411abc 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -38,12 +38,14 @@ int getDelayFromDistance(float distance) { return MS_DELAY_PER_METER * distance; } -const float BOUNCE_ATTENUATION_FACTOR = 0.125f; +// **option 1**: this is what we're using +const float PER_BOUNCE_ATTENUATION_FACTOR = 0.5f; +// **option 2**: we're not using these +const float BOUNCE_ATTENUATION_FACTOR = 0.125f; // each bounce we adjust our attenuation by this factor, the result is an asymptotically decreasing attenuation... // 0.125, 0.25, 0.5, ... const float PER_BOUNCE_ATTENUATION_ADJUSTMENT = 2.0f; - // we don't grow larger than this, which means by the 4th bounce we don't get that much less quiet const float MAX_BOUNCE_ATTENUATION = 0.99f; @@ -234,9 +236,12 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector Date: Fri, 4 Apr 2014 11:25:43 -0700 Subject: [PATCH 18/64] add reflection stats to stats display, add pre-delay, add toggle for separate ears, add surface randomness --- interface/src/Application.cpp | 30 ++++++++++++++++++++ interface/src/AudioReflector.cpp | 48 ++++++++++++++++++++++---------- interface/src/AudioReflector.h | 15 +++++----- interface/src/Menu.cpp | 8 ++++++ interface/src/Menu.h | 2 ++ 5 files changed, 81 insertions(+), 22 deletions(-) diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index 863df28319..89f6a8e223 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -2835,6 +2835,36 @@ void Application::displayStats() { verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, downloadStats.str().c_str(), WHITE_TEXT); + + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { + // add some reflection stats + char reflectionsStatus[128]; + + sprintf(reflectionsStatus, "Reflections: %d, Pre-Delay: %f, Separate Ears:%s", + _audioReflector.getReflections(), + _audioReflector.getDelayFromDistance(0.0f), + debug::valueOf(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars))); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT); + + sprintf(reflectionsStatus, "Delay: average %f, max %f, min %f", + _audioReflector.getAverageDelayMsecs(), + _audioReflector.getMaxDelayMsecs(), + _audioReflector.getMinDelayMsecs()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT); + + sprintf(reflectionsStatus, "Attenuation: average %f, max %f, min %f", + _audioReflector.getAverageAttenuation(), + _audioReflector.getMaxAttenuation(), + _audioReflector.getMinAttenuation()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT); + } + } verticalOffset = 0; diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index f61f411abc..b7ee81ff1c 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -9,6 +9,7 @@ #include #include "AudioReflector.h" +#include "Menu.h" AudioReflector::AudioReflector(QObject* parent) : QObject(parent) @@ -28,14 +29,23 @@ void AudioReflector::render() { } + // delay = 1ms per foot // = 3ms per meter // attenuation = // BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance)) +const float PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections -int getDelayFromDistance(float distance) { +float AudioReflector::getDelayFromDistance(float distance) { const int MS_DELAY_PER_METER = 3; - return MS_DELAY_PER_METER * distance; + float delay = (MS_DELAY_PER_METER * distance); + + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) { + delay += PRE_DELAY; + } + + + return delay; } // **option 1**: this is what we're using @@ -61,24 +71,29 @@ float getDistanceAttenuationCoefficient(float distance) { float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR, DISTANCE_SCALE_LOG + (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); - distanceCoefficient = std::min(1.0f, distanceCoefficient); + + const float DISTANCE_SCALING_FACTOR = 2.0f; + + distanceCoefficient = std::min(1.0f, distanceCoefficient * DISTANCE_SCALING_FACTOR); return distanceCoefficient; } glm::vec3 getFaceNormal(BoxFace face) { + float surfaceRandomness = randFloatInRange(0.99,1.0); + float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f; if (face == MIN_X_FACE) { - return glm::vec3(-1, 0, 0); + return glm::vec3(-surfaceRandomness, surfaceRemainder, surfaceRemainder); } else if (face == MAX_X_FACE) { - return glm::vec3(1, 0, 0); + return glm::vec3(surfaceRandomness, surfaceRemainder, surfaceRemainder); } else if (face == MIN_Y_FACE) { - return glm::vec3(0, -1, 0); + return glm::vec3(surfaceRemainder, -surfaceRandomness, surfaceRemainder); } else if (face == MAX_Y_FACE) { - return glm::vec3(0, 1, 0); + return glm::vec3(surfaceRemainder, surfaceRandomness, surfaceRemainder); } else if (face == MIN_Z_FACE) { - return glm::vec3(0, 0, -1); + return glm::vec3(surfaceRemainder, surfaceRemainder, -surfaceRandomness); } else if (face == MAX_Z_FACE) { - return glm::vec3(0, 0, 1); + return glm::vec3(surfaceRemainder, surfaceRemainder, surfaceRandomness); } return glm::vec3(0, 0, 0); //error case } @@ -113,7 +128,7 @@ void AudioReflector::calculateAllReflections() { // only recalculate when we've moved... // TODO: what about case where new voxels are added in front of us??? - if (_myAvatar->getHead()->getPosition() != _origin) { + if (_reflections == 0 || _myAvatar->getHead()->getPosition() != _origin) { QMutexLocker locker(&_mutex); qDebug() << "origin has changed..."; @@ -218,8 +233,11 @@ const int NUMBER_OF_CHANNELS = 2; void AudioReflector::echoReflections(const glm::vec3& origin, const QVector& reflections, const QByteArray& samples, unsigned int sampleTime, int sampleRate) { - glm::vec3 rightEarPosition = _myAvatar->getHead()->getRightEarPosition(); - glm::vec3 leftEarPosition = _myAvatar->getHead()->getLeftEarPosition(); + bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars); + glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() : + _myAvatar->getHead()->getPosition(); + glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() : + _myAvatar->getHead()->getPosition(); glm::vec3 start = origin; int totalNumberOfSamples = samples.size() / sizeof(int16_t); @@ -253,8 +271,8 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector::max(); _minAttenuation = std::numeric_limits::max(); - _totalDelay = 0; + _totalDelay = 0.0f; _delayCount = 0; _totalAttenuation = 0.0f; _attenuationCount = 0; diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index f7e710d556..119a45f211 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -28,12 +28,13 @@ public: void render(); int getReflections() const { return _reflections; } - int getAverageDelayMsecs() const { return _averageDelay; } + float getAverageDelayMsecs() const { return _averageDelay; } float getAverageAttenuation() const { return _averageAttenuation; } - int getMaxDelayMsecs() const { return _maxDelay; } + float getMaxDelayMsecs() const { return _maxDelay; } float getMaxAttenuation() const { return _maxAttenuation; } - int getMinDelayMsecs() const { return _minDelay; } + float getMinDelayMsecs() const { return _minDelay; } float getMinAttenuation() const { return _minAttenuation; } + float getDelayFromDistance(float distance); public slots: void processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); @@ -60,10 +61,10 @@ private: int _reflections; int _delayCount; - int _totalDelay; - int _averageDelay; - int _maxDelay; - int _minDelay; + float _totalDelay; + float _averageDelay; + float _maxDelay; + float _minDelay; int _attenuationCount; float _totalAttenuation; diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index d000b9cd6a..f0c7b27780 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -375,6 +375,14 @@ Menu::Menu() : addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingIncudeOriginal, Qt::CTRL | Qt::SHIFT | Qt::Key_O, true); + + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingSeparateEars, + Qt::CTRL | Qt::SHIFT | Qt::Key_E, + true); + + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingPreDelay, + Qt::CTRL | Qt::SHIFT | Qt::Key_D, + true); addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, Qt::CTRL | Qt::SHIFT | Qt::Key_F, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 3d3961edaa..fbaf8b57a7 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -245,6 +245,8 @@ namespace MenuOption { const QString AudioToneInjection = "Inject Test Tone"; const QString AudioSpatialProcessing = "Audio Spatial Processing"; const QString AudioSpatialProcessingIncudeOriginal = "Audio Spatial Processing includes Original"; + const QString AudioSpatialProcessingSeparateEars = "Audio Spatial Processing separates ears"; + const QString AudioSpatialProcessingPreDelay = "Audio Spatial Processing add Pre-Delay"; const QString EchoServerAudio = "Echo Server Audio"; const QString EchoLocalAudio = "Echo Local Audio"; const QString MuteAudio = "Mute Microphone"; From 593fc6c96309acdc4534d841a89d862e1e0cd153 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Fri, 4 Apr 2014 17:29:39 -0700 Subject: [PATCH 19/64] lots of knobs and dials --- interface/src/Application.cpp | 25 +++-- interface/src/AudioReflector.cpp | 169 +++++++++++++++++++------------ interface/src/AudioReflector.h | 22 +++- interface/src/Menu.cpp | 14 ++- interface/src/Menu.h | 2 + 5 files changed, 154 insertions(+), 78 deletions(-) diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index 89f6a8e223..ac5a374a1f 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -2787,7 +2787,7 @@ void Application::displayStats() { glm::vec3 avatarPos = _myAvatar->getPosition(); - lines = _statsExpanded ? 5 : 3; + lines = _statsExpanded ? 8 : 3; displayStatsBackground(backgroundColor, horizontalOffset, 0, _glWidget->width() - (mirrorEnabled ? 301 : 411) - horizontalOffset, lines * STATS_PELS_PER_LINE + 10); horizontalOffset += 5; @@ -2840,26 +2840,34 @@ void Application::displayStats() { // add some reflection stats char reflectionsStatus[128]; - sprintf(reflectionsStatus, "Reflections: %d, Pre-Delay: %f, Separate Ears:%s", + sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s", _audioReflector.getReflections(), - _audioReflector.getDelayFromDistance(0.0f), - debug::valueOf(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars))); + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal) + ? "with" : "without"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars) + ? "two" : "one"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource) + ? "stereo" : "mono") + ); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT); - sprintf(reflectionsStatus, "Delay: average %f, max %f, min %f", + sprintf(reflectionsStatus, "Delay: pre: %f, average %f, max %f, min %f, speed: %f", + _audioReflector.getDelayFromDistance(0.0f), _audioReflector.getAverageDelayMsecs(), _audioReflector.getMaxDelayMsecs(), - _audioReflector.getMinDelayMsecs()); + _audioReflector.getMinDelayMsecs(), + _audioReflector.getSoundMsPerMeter()); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT); - sprintf(reflectionsStatus, "Attenuation: average %f, max %f, min %f", + sprintf(reflectionsStatus, "Attenuation: average %f, max %f, min %f, distance scale: %f", _audioReflector.getAverageAttenuation(), _audioReflector.getMaxAttenuation(), - _audioReflector.getMinAttenuation()); + _audioReflector.getMinAttenuation(), + _audioReflector.getDistanceAttenuationScalingFactor()); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT); @@ -3725,6 +3733,7 @@ void Application::loadScript(const QString& scriptName) { scriptEngine->registerGlobalObject("Menu", MenuScriptingInterface::getInstance()); scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance()); scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance()); + scriptEngine->registerGlobalObject("AudioReflector", &_audioReflector); QThread* workerThread = new QThread(this); diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index b7ee81ff1c..9a4da87014 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -11,8 +11,18 @@ #include "AudioReflector.h" #include "Menu.h" + +const float DEFAULT_PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections +const float DEFAULT_MS_DELAY_PER_METER = 3.0f; +const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f; +const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f; + + AudioReflector::AudioReflector(QObject* parent) : - QObject(parent) + QObject(parent), + _preDelay(DEFAULT_PRE_DELAY), + _soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), + _distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR) { reset(); } @@ -34,14 +44,12 @@ void AudioReflector::render() { // = 3ms per meter // attenuation = // BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance)) -const float PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections float AudioReflector::getDelayFromDistance(float distance) { - const int MS_DELAY_PER_METER = 3; - float delay = (MS_DELAY_PER_METER * distance); + float delay = (_soundMsPerMeter * distance); if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) { - delay += PRE_DELAY; + delay += _preDelay; } @@ -52,14 +60,14 @@ float AudioReflector::getDelayFromDistance(float distance) { const float PER_BOUNCE_ATTENUATION_FACTOR = 0.5f; // **option 2**: we're not using these -const float BOUNCE_ATTENUATION_FACTOR = 0.125f; +//const float BOUNCE_ATTENUATION_FACTOR = 0.125f; // each bounce we adjust our attenuation by this factor, the result is an asymptotically decreasing attenuation... // 0.125, 0.25, 0.5, ... -const float PER_BOUNCE_ATTENUATION_ADJUSTMENT = 2.0f; +//const float PER_BOUNCE_ATTENUATION_ADJUSTMENT = 2.0f; // we don't grow larger than this, which means by the 4th bounce we don't get that much less quiet -const float MAX_BOUNCE_ATTENUATION = 0.99f; +//const float MAX_BOUNCE_ATTENUATION = 0.99f; -float getDistanceAttenuationCoefficient(float distance) { +float AudioReflector::getDistanceAttenuationCoefficient(float distance) { const float DISTANCE_SCALE = 2.5f; const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; const float DISTANCE_LOG_BASE = 2.5f; @@ -72,13 +80,15 @@ float getDistanceAttenuationCoefficient(float distance) { DISTANCE_SCALE_LOG + (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); - const float DISTANCE_SCALING_FACTOR = 2.0f; - - distanceCoefficient = std::min(1.0f, distanceCoefficient * DISTANCE_SCALING_FACTOR); + distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor()); return distanceCoefficient; } +float getBounceAttenuationCoefficient(int bounceCount) { + return PER_BOUNCE_ATTENUATION_FACTOR * bounceCount; +} + glm::vec3 getFaceNormal(BoxFace face) { float surfaceRandomness = randFloatInRange(0.99,1.0); float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f; @@ -128,29 +138,52 @@ void AudioReflector::calculateAllReflections() { // only recalculate when we've moved... // TODO: what about case where new voxels are added in front of us??? - if (_reflections == 0 || _myAvatar->getHead()->getPosition() != _origin) { + bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented); + glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation(); + + bool shouldRecalc = _reflections == 0 || _myAvatar->getHead()->getPosition() != _origin || (orientation != _orientation); + + /* + qDebug() << "wantHeadOrientation=" << wantHeadOrientation; + + qDebug(" _myAvatar->getHead()->getPosition()=%f,%f,%f", + _myAvatar->getHead()->getPosition().x, + _myAvatar->getHead()->getPosition().y, + _myAvatar->getHead()->getPosition().z); + + qDebug(" _origin=%f,%f,%f", + _origin.x, + _origin.y, + _origin.z); + + qDebug(" orientation=%f,%f,%f,%f", + orientation.x, + orientation.y, + orientation.z, + orientation.w); + + qDebug(" _orientation=%f,%f,%f,%f", + _orientation.x, + _orientation.y, + _orientation.z, + _orientation.w); + */ + if (shouldRecalc) { + //qDebug() << "origin or orientation has changed..."; + QMutexLocker locker(&_mutex); - qDebug() << "origin has changed..."; - qDebug(" _myAvatar->getHead()->getPosition()=%f,%f,%f", - _myAvatar->getHead()->getPosition().x, - _myAvatar->getHead()->getPosition().y, - _myAvatar->getHead()->getPosition().z); - - qDebug(" _origin=%f,%f,%f", - _origin.x, - _origin.y, - _origin.z); quint64 start = usecTimestampNow(); _origin = _myAvatar->getHead()->getPosition(); + glm::vec3 averageEarPosition = _myAvatar->getHead()->getPosition(); - glm::quat orientation = _myAvatar->getOrientation(); // _myAvatar->getHead()->getOrientation(); - glm::vec3 right = glm::normalize(orientation * IDENTITY_RIGHT); - glm::vec3 up = glm::normalize(orientation * IDENTITY_UP); - glm::vec3 front = glm::normalize(orientation * IDENTITY_FRONT); + _orientation = orientation; + glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); + glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP); + glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT); glm::vec3 left = -right; glm::vec3 down = -up; glm::vec3 back = -front; @@ -163,33 +196,31 @@ void AudioReflector::calculateAllReflections() { glm::vec3 backRightDown = glm::normalize(back + right + down); glm::vec3 backLeftDown = glm::normalize(back + left + down); - const int BOUNCE_COUNT = 5; - - _frontRightUpReflections = calculateReflections(_origin, frontRightUp, BOUNCE_COUNT); - _frontLeftUpReflections = calculateReflections(_origin, frontLeftUp, BOUNCE_COUNT); - _backRightUpReflections = calculateReflections(_origin, backRightUp, BOUNCE_COUNT); - _backLeftUpReflections = calculateReflections(_origin, backLeftUp, BOUNCE_COUNT); - _frontRightDownReflections = calculateReflections(_origin, frontRightDown, BOUNCE_COUNT); - _frontLeftDownReflections = calculateReflections(_origin, frontLeftDown, BOUNCE_COUNT); - _backRightDownReflections = calculateReflections(_origin, backRightDown, BOUNCE_COUNT); - _backLeftDownReflections = calculateReflections(_origin, backLeftDown, BOUNCE_COUNT); - _frontReflections = calculateReflections(_origin, front, BOUNCE_COUNT); - _backReflections = calculateReflections(_origin, back, BOUNCE_COUNT); - _leftReflections = calculateReflections(_origin, left, BOUNCE_COUNT); - _rightReflections = calculateReflections(_origin, right, BOUNCE_COUNT); - _upReflections = calculateReflections(_origin, up, BOUNCE_COUNT); - _downReflections = calculateReflections(_origin, down, BOUNCE_COUNT); + _frontRightUpReflections = calculateReflections(averageEarPosition, _origin, frontRightUp); + _frontLeftUpReflections = calculateReflections(averageEarPosition, _origin, frontLeftUp); + _backRightUpReflections = calculateReflections(averageEarPosition, _origin, backRightUp); + _backLeftUpReflections = calculateReflections(averageEarPosition, _origin, backLeftUp); + _frontRightDownReflections = calculateReflections(averageEarPosition, _origin, frontRightDown); + _frontLeftDownReflections = calculateReflections(averageEarPosition, _origin, frontLeftDown); + _backRightDownReflections = calculateReflections(averageEarPosition, _origin, backRightDown); + _backLeftDownReflections = calculateReflections(averageEarPosition, _origin, backLeftDown); + _frontReflections = calculateReflections(averageEarPosition, _origin, front); + _backReflections = calculateReflections(averageEarPosition, _origin, back); + _leftReflections = calculateReflections(averageEarPosition, _origin, left); + _rightReflections = calculateReflections(averageEarPosition, _origin, right); + _upReflections = calculateReflections(averageEarPosition, _origin, up); + _downReflections = calculateReflections(averageEarPosition, _origin, down); quint64 end = usecTimestampNow(); reset(); - qDebug() << "Reflections recalculated in " << (end - start) << "usecs"; + //qDebug() << "Reflections recalculated in " << (end - start) << "usecs"; } } -QVector AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces) { +QVector AudioReflector::calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection) { QVector reflectionPoints; glm::vec3 start = origin; glm::vec3 direction = originalDirection; @@ -197,16 +228,28 @@ QVector AudioReflector::calculateReflections(const glm::vec3& origin, float distance; BoxFace face; const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point + float currentAttenuation = 1.0f; + float totalDistance = 0.0f; + int bounceCount = 1; - for (int i = 0; i < maxBounces; i++) { + while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) { if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + + totalDistance += glm::distance(start, end); + float earDistance = glm::distance(end, earPosition); + float totalDistance = earDistance + distance; + currentAttenuation = getDistanceAttenuationCoefficient(totalDistance) * getBounceAttenuationCoefficient(bounceCount); - reflectionPoints.push_back(end); - - glm::vec3 faceNormal = getFaceNormal(face); - direction = glm::normalize(glm::reflect(direction,faceNormal)); - start = end; + if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) { + reflectionPoints.push_back(end); + glm::vec3 faceNormal = getFaceNormal(face); + direction = glm::normalize(glm::reflect(direction,faceNormal)); + start = end; + bounceCount++; + } + } else { + currentAttenuation = 0.0f; } } return reflectionPoints; @@ -234,6 +277,7 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVectorisOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars); + bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource); glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() : _myAvatar->getHead()->getPosition(); glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() : @@ -254,12 +298,10 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector& reflections, const QByteArray& samples, unsigned int sampleTime, int sampleRate); - QVector calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces); + QVector calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection); void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections); void calculateAllReflections(); void reset(); + float getDistanceAttenuationCoefficient(float distance); int _reflections; @@ -73,6 +84,7 @@ private: float _minAttenuation; glm::vec3 _origin; + glm::quat _orientation; QVector _frontRightUpReflections; QVector _frontLeftUpReflections; QVector _backRightUpReflections; @@ -89,6 +101,10 @@ private: QVector _downReflections; QMutex _mutex; + + float _preDelay; + float _soundMsPerMeter; + float _distanceAttenuationScalingFactor; }; diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index f0c7b27780..1a0adb5550 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -331,8 +331,8 @@ Menu::Menu() : QMenu* renderDebugMenu = developerMenu->addMenu("Render Debugging Tools"); - addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings, Qt::CTRL | Qt::SHIFT | Qt::Key_P); - addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings, Qt::CTRL | Qt::SHIFT | Qt::Key_S); + addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings); + addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings); addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::CullSharedFaces, @@ -383,7 +383,15 @@ Menu::Menu() : addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingPreDelay, Qt::CTRL | Qt::SHIFT | Qt::Key_D, true); - + + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingStereoSource, + Qt::CTRL | Qt::SHIFT | Qt::Key_S, + true); + + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingHeadOriented, + Qt::CTRL | Qt::SHIFT | Qt::Key_H, + true); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, Qt::CTRL | Qt::SHIFT | Qt::Key_F, false); diff --git a/interface/src/Menu.h b/interface/src/Menu.h index fbaf8b57a7..fd7873fae9 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -247,6 +247,8 @@ namespace MenuOption { const QString AudioSpatialProcessingIncudeOriginal = "Audio Spatial Processing includes Original"; const QString AudioSpatialProcessingSeparateEars = "Audio Spatial Processing separates ears"; const QString AudioSpatialProcessingPreDelay = "Audio Spatial Processing add Pre-Delay"; + const QString AudioSpatialProcessingStereoSource = "Audio Spatial Processing Stereo Source"; + const QString AudioSpatialProcessingHeadOriented = "Audio Spatial Processing Head Oriented"; const QString EchoServerAudio = "Echo Server Audio"; const QString EchoLocalAudio = "Echo Local Audio"; const QString MuteAudio = "Mute Microphone"; From d198b04daf891655fb6fbff88eabbd3256882fc6 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Fri, 4 Apr 2014 17:29:50 -0700 Subject: [PATCH 20/64] lots of knobs and dials --- examples/audioReflectorTools.js | 208 ++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 examples/audioReflectorTools.js diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js new file mode 100644 index 0000000000..e19e757367 --- /dev/null +++ b/examples/audioReflectorTools.js @@ -0,0 +1,208 @@ +// +// overlaysExample.js +// hifi +// +// Created by Brad Hefta-Gaub on 2/14/14. +// Copyright (c) 2014 HighFidelity, Inc. All rights reserved. +// +// This is an example script that demonstrates use of the Overlays class +// +// + + +var delayScale = 100.0; +var speedScale = 20; +var factorScale = 5.0; + +// This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to +// move the slider +var delayY = 300; +var delaySlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: delayY, width: 150, height: 35}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + +// This is the thumb of our slider +var delayMinThumbX = 110; +var delayMaxThumbX = delayMinThumbX + 110; +var delayThumbX = (delayMinThumbX + delayMaxThumbX) / 2; +var delayThumb = Overlays.addOverlay("image", { + x: delayThumbX, + y: delayY + 9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + +// This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to +// move the slider +var speedY = 350; +var speedSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: speedY, width: 150, height: 35}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + +// This is the thumb of our slider +var speedMinThumbX = 110; +var speedMaxThumbX = speedMinThumbX + 110; +var speedThumbX = (speedMinThumbX + speedMaxThumbX) / 2; +var speedThumb = Overlays.addOverlay("image", { + x: speedThumbX, + y: speedY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + +// This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to +// move the slider +var factorY = 400; +var factorSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: factorY, width: 150, height: 35}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + +// This is the thumb of our slider +var factorMinThumbX = 110; +var factorMaxThumbX = factorMinThumbX + 110; +var factorThumbX = (factorMinThumbX + factorMaxThumbX) / 2; +var factorThumb = Overlays.addOverlay("image", { + x: factorThumbX, + y: factorY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +// When our script shuts down, we should clean up all of our overlays +function scriptEnding() { + Overlays.deleteOverlay(factorThumb); + Overlays.deleteOverlay(factorSlider); + Overlays.deleteOverlay(speedThumb); + Overlays.deleteOverlay(speedSlider); + Overlays.deleteOverlay(delayThumb); + Overlays.deleteOverlay(delaySlider); +} +Script.scriptEnding.connect(scriptEnding); + + +var count = 0; + +// Our update() function is called at approximately 60fps, and we will use it to animate our various overlays +function update(deltaTime) { + count++; +} +Script.update.connect(update); + + +// The slider is handled in the mouse event callbacks. +var movingSliderDelay = false; +var movingSliderSpeed = false; +var movingSliderFactor = false; +var thumbClickOffsetX = 0; +function mouseMoveEvent(event) { + if (movingSliderDelay) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < delayMinThumbX) { + newThumbX = delayMinThumbX; + } + if (newThumbX > delayMaxThumbX) { + newThumbX = delayMaxThumbX; + } + Overlays.editOverlay(delayThumb, { x: newThumbX } ); + var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale; + AudioReflector.setPreDelay(delay); + } + if (movingSliderSpeed) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < speedMinThumbX) { + newThumbX = speedMminThumbX; + } + if (newThumbX > speedMaxThumbX) { + newThumbX = speedMaxThumbX; + } + Overlays.editOverlay(speedThumb, { x: newThumbX } ); + var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale; + AudioReflector.setSoundMsPerMeter(speed); + } + if (movingSliderFactor) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < factorMinThumbX) { + newThumbX = factorMminThumbX; + } + if (newThumbX > factorMaxThumbX) { + newThumbX = factorMaxThumbX; + } + Overlays.editOverlay(factorThumb, { x: newThumbX } ); + var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale; + AudioReflector.setDistanceAttenuationScalingFactor(factor); + } +} + +// we also handle click detection in our mousePressEvent() +function mousePressEvent(event) { + var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y}); + + // If the user clicked on the thumb, handle the slider logic + if (clickedOverlay == delayThumb) { + movingSliderDelay = true; + thumbClickOffsetX = event.x - delayThumbX; + } + + // If the user clicked on the thumb, handle the slider logic + if (clickedOverlay == speedThumb) { + movingSliderSpeed = true; + thumbClickOffsetX = event.x - speedThumbX; + } + + // If the user clicked on the thumb, handle the slider logic + if (clickedOverlay == factorThumb) { +print("movingSliderFactor..."); + movingSliderFactor = true; + thumbClickOffsetX = event.x - factorThumbX; + } +} +function mouseReleaseEvent(event) { + if (movingSliderDelay) { + movingSliderDelay = false; + var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale; + AudioReflector.setPreDelay(delay); + delayThumbX = newThumbX; + } + if (movingSliderSpeed) { + movingSliderSpeed = false; + var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale; + AudioReflector.setSoundMsPerMeter(speed); + speedThumbX = newThumbX; + } + if (movingSliderFactor) { + movingSliderFactor = false; + var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale; + AudioReflector.setDistanceAttenuationScalingFactor(factor); + factorThumbX = newThumbX; + } +} + +Controller.mouseMoveEvent.connect(mouseMoveEvent); +Controller.mousePressEvent.connect(mousePressEvent); +Controller.mouseReleaseEvent.connect(mouseReleaseEvent); + From dfd6411a4fd83d3636ca8e3d7758d1b04f37e3a4 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Sun, 6 Apr 2014 10:11:11 -0700 Subject: [PATCH 21/64] add hooks for local audio echo, first cut at diffusion --- interface/src/Application.cpp | 3 +- interface/src/Audio.cpp | 9 ++- interface/src/Audio.h | 3 +- interface/src/AudioReflector.cpp | 134 +++++++++++++++++++++++++++---- interface/src/AudioReflector.h | 15 +++- 5 files changed, 142 insertions(+), 22 deletions(-) diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index ac5a374a1f..4a7da51887 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1603,7 +1603,8 @@ void Application::init() { _audioReflector.setMyAvatar(getAvatar()); _audioReflector.setVoxels(_voxels.getTree()); _audioReflector.setAudio(getAudio()); - connect(getAudio(), &Audio::processSpatialAudio, &_audioReflector, &AudioReflector::processSpatialAudio,Qt::DirectConnection); + connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection); + connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection); } void Application::closeMirrorView() { diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 708bb72cc7..f7bb3f78c9 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -360,6 +360,11 @@ void Audio::handleAudioInput() { QByteArray inputByteArray = _inputDevice->readAll(); + // send our local loopback to any interested parties + if (_processSpatialAudio && !_muted && _audioOutput) { + emit processLocalAudio(_spatialAudioStart, inputByteArray, _inputFormat); + } + if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) { // if this person wants local loopback add that to the locally injected audio @@ -367,7 +372,7 @@ void Audio::handleAudioInput() { // we didn't have the loopback output device going so set that up now _loopbackOutputDevice = _loopbackAudioOutput->start(); } - + if (_inputFormat == _outputFormat) { if (_loopbackOutputDevice) { _loopbackOutputDevice->write(inputByteArray); @@ -756,7 +761,7 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { } // Send audio off for spatial processing - emit processSpatialAudio(sampleTime, buffer, _desiredOutputFormat); + emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat); // copy the samples we'll resample from the spatial audio ring buffer - this also // pushes the read pointer of the spatial audio ring buffer forwards diff --git a/interface/src/Audio.h b/interface/src/Audio.h index c3417ae891..e449d8f6c7 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -102,7 +102,8 @@ public slots: signals: bool muteToggled(); - void processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); private: diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 9a4da87014..6c4e78eae2 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -16,13 +16,16 @@ const float DEFAULT_PRE_DELAY = 20.0f; // this delay in msecs will always be add const float DEFAULT_MS_DELAY_PER_METER = 3.0f; const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f; const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f; - +const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is this long +const int DEFAULT_DIFFUSION_FANOUT = 2; +const int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10; AudioReflector::AudioReflector(QObject* parent) : QObject(parent), _preDelay(DEFAULT_PRE_DELAY), _soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), - _distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR) + _distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), + _diffusionFanout(DEFAULT_DIFFUSION_FANOUT) { reset(); } @@ -90,22 +93,27 @@ float getBounceAttenuationCoefficient(int bounceCount) { } glm::vec3 getFaceNormal(BoxFace face) { - float surfaceRandomness = randFloatInRange(0.99,1.0); + glm::vec3 slightlyRandomFaceNormal; + + float surfaceRandomness = randFloatInRange(0.99f,1.0f); float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f; + float altRemainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + float altRemainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + if (face == MIN_X_FACE) { - return glm::vec3(-surfaceRandomness, surfaceRemainder, surfaceRemainder); + slightlyRandomFaceNormal = glm::vec3(-surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); } else if (face == MAX_X_FACE) { - return glm::vec3(surfaceRandomness, surfaceRemainder, surfaceRemainder); + slightlyRandomFaceNormal = glm::vec3(surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); } else if (face == MIN_Y_FACE) { - return glm::vec3(surfaceRemainder, -surfaceRandomness, surfaceRemainder); + slightlyRandomFaceNormal = glm::vec3(surfaceRemainder * altRemainderSignA, -surfaceRandomness, surfaceRemainder * altRemainderSignB); } else if (face == MAX_Y_FACE) { - return glm::vec3(surfaceRemainder, surfaceRandomness, surfaceRemainder); + slightlyRandomFaceNormal = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRandomness, surfaceRemainder * altRemainderSignB); } else if (face == MIN_Z_FACE) { - return glm::vec3(surfaceRemainder, surfaceRemainder, -surfaceRandomness); + slightlyRandomFaceNormal = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, -surfaceRandomness); } else if (face == MAX_Z_FACE) { - return glm::vec3(surfaceRemainder, surfaceRemainder, surfaceRandomness); + slightlyRandomFaceNormal = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, surfaceRandomness); } - return glm::vec3(0, 0, 0); //error case + return slightlyRandomFaceNormal; } void AudioReflector::reset() { @@ -214,13 +222,96 @@ void AudioReflector::calculateAllReflections() { quint64 end = usecTimestampNow(); reset(); - + //qDebug() << "Reflections recalculated in " << (end - start) << "usecs"; } } -QVector AudioReflector::calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection) { +// TODO: add diffusion ratio. percentage of echo energy that diffuses +// so say that 50% of the energy that hits the echo point diffuses in fanout directions +void AudioReflector::calculateDiffusions(const glm::vec3& earPosition, const glm::vec3& origin, + const glm::vec3& thisReflection, float thisDistance, float thisAttenuation, int thisBounceCount, + BoxFace thisReflectionFace, QVector reflectionPoints) { + + //return; // do nothing + + QVector diffusionDirections; + + // diffusions fan out from random places on the semisphere of the collision point + for(int i = 0; i < _diffusionFanout; i++) { + glm::vec3 randomDirection; + + float surfaceRandomness = randFloatInRange(0.5f,1.0f); + float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f; + float altRemainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + float altRemainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + + if (thisReflectionFace == MIN_X_FACE) { + randomDirection = glm::vec3(-surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); + } else if (thisReflectionFace == MAX_X_FACE) { + randomDirection = glm::vec3(surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); + } else if (thisReflectionFace == MIN_Y_FACE) { + randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, -surfaceRandomness, surfaceRemainder * altRemainderSignB); + } else if (thisReflectionFace == MAX_Y_FACE) { + randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRandomness, surfaceRemainder * altRemainderSignB); + } else if (thisReflectionFace == MIN_Z_FACE) { + randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, -surfaceRandomness); + } else if (thisReflectionFace == MAX_Z_FACE) { + randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, surfaceRandomness); + } + diffusionDirections.push_back(randomDirection); + } + + foreach(glm::vec3 direction, diffusionDirections) { + + glm::vec3 start = thisReflection; + OctreeElement* elementHit; + float distance; + BoxFace face; + const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point + float currentAttenuation = thisAttenuation; + float totalDistance = thisDistance; + float totalDelay = getDelayFromDistance(totalDistance); + int bounceCount = thisBounceCount; + + while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS && bounceCount < ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { + if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + + totalDistance += glm::distance(start, end); + float earDistance = glm::distance(end, earPosition); + float totalDistanceToEar = earDistance + distance; + totalDelay = getDelayFromDistance(totalDistanceToEar); + currentAttenuation = getDistanceAttenuationCoefficient(totalDistanceToEar) * getBounceAttenuationCoefficient(bounceCount); + + if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { + reflectionPoints.push_back(end); + glm::vec3 faceNormal = getFaceNormal(face); + direction = glm::normalize(glm::reflect(direction,faceNormal)); + start = end; + bounceCount++; + + /* + // handle diffusion here + if (_diffusionFanout > 0 && bounceCount < ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { + glm::vec3 thisReflection = end; + calculateDiffusions(earPosition, origin, end, totalDistance, + currentAttenuation, bounceCount, face, reflectionPoints); + } + */ + } + } else { + currentAttenuation = 0.0f; + } + } + } +} + + +QVector AudioReflector::calculateReflections(const glm::vec3& earPosition, + const glm::vec3& origin, const glm::vec3& originalDirection) { + QVector reflectionPoints; glm::vec3 start = origin; glm::vec3 direction = originalDirection; @@ -230,23 +321,32 @@ QVector AudioReflector::calculateReflections(const glm::vec3& earPosi const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point float currentAttenuation = 1.0f; float totalDistance = 0.0f; + float totalDelay = 0.0f; int bounceCount = 1; - while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) { + while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS && bounceCount < ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); totalDistance += glm::distance(start, end); float earDistance = glm::distance(end, earPosition); float totalDistance = earDistance + distance; + totalDelay = getDelayFromDistance(totalDistance); currentAttenuation = getDistanceAttenuationCoefficient(totalDistance) * getBounceAttenuationCoefficient(bounceCount); - if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) { + if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { reflectionPoints.push_back(end); glm::vec3 faceNormal = getFaceNormal(face); direction = glm::normalize(glm::reflect(direction,faceNormal)); start = end; bounceCount++; + + // handle diffusion here + if (_diffusionFanout > 0) { + glm::vec3 thisReflection = end; + calculateDiffusions(earPosition, origin, end, totalDistance, + currentAttenuation, bounceCount, face, reflectionPoints); + } } } else { currentAttenuation = 0.0f; @@ -373,7 +473,11 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection); + void calculateDiffusions(const glm::vec3& earPosition, const glm::vec3& origin, + const glm::vec3& thisReflection, float thisDistance, float thisAttenuation, int thisBounceCount, + BoxFace thisReflectionFace, QVector reflectionPoints); + + void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections); void calculateAllReflections(); @@ -105,6 +112,8 @@ private: float _preDelay; float _soundMsPerMeter; float _distanceAttenuationScalingFactor; + + int _diffusionFanout; // number of points of diffusion from each reflection point }; From a37921c1d5304194d8a41836965d0072b0786260 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Sun, 13 Apr 2014 19:09:07 -0700 Subject: [PATCH 22/64] move isNaN() to shared utils --- libraries/octree/src/ViewFrustum.cpp | 6 +----- libraries/shared/src/SharedUtil.cpp | 19 +++++++++++++++++++ libraries/shared/src/SharedUtil.h | 9 +++++++++ 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/libraries/octree/src/ViewFrustum.cpp b/libraries/octree/src/ViewFrustum.cpp index fa6873b093..bc617c9bc7 100644 --- a/libraries/octree/src/ViewFrustum.cpp +++ b/libraries/octree/src/ViewFrustum.cpp @@ -424,10 +424,6 @@ bool ViewFrustum::matches(const ViewFrustum& compareTo, bool debug) const { return result; } -bool isNaN(float f) { - return f != f; -} - bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const { // Compute distance between the two positions @@ -449,7 +445,7 @@ bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const float angleEyeOffsetOrientation = compareTo._eyeOffsetOrientation == _eyeOffsetOrientation ? 0.0f : glm::degrees(glm::angle(dQEyeOffsetOrientation)); if (isNaN(angleEyeOffsetOrientation)) { - angleOrientation = 0.0f; + angleEyeOffsetOrientation = 0.0f; } bool result = diff --git a/libraries/shared/src/SharedUtil.cpp b/libraries/shared/src/SharedUtil.cpp index efd5180d03..786302ae2a 100644 --- a/libraries/shared/src/SharedUtil.cpp +++ b/libraries/shared/src/SharedUtil.cpp @@ -661,3 +661,22 @@ glm::vec3 safeEulerAngles(const glm::quat& q) { } } +bool isNaN(float f) { + return f != f; +} + + +bool isSimilarOrientation(const glm::quat& orientionA, const glm::quat& orientionB, float similarEnough) { + // Compute the angular distance between the two orientations + float angleOrientation = orientionA == orientionB ? 0.0f : glm::degrees(glm::angle(orientionA * glm::inverse(orientionB))); + if (isNaN(angleOrientation)) { + angleOrientation = 0.0f; + } + return (angleOrientation <= similarEnough); +} + +bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, float similarEnough) { + // Compute the distance between the two points + float positionDistance = glm::distance(positionA, positionB); + return (positionDistance <= similarEnough); +} diff --git a/libraries/shared/src/SharedUtil.h b/libraries/shared/src/SharedUtil.h index d8d686c63b..91b0054794 100644 --- a/libraries/shared/src/SharedUtil.h +++ b/libraries/shared/src/SharedUtil.h @@ -168,4 +168,13 @@ int unpackFloatVec3FromSignedTwoByteFixed(const unsigned char* sourceBuffer, glm /// \return vec3 with euler angles in radians glm::vec3 safeEulerAngles(const glm::quat& q); +/// \return bool are two orientations similar to each other +const float ORIENTATION_SIMILAR_ENOUGH = 5.0f; // 10 degrees in any direction +bool isSimilarOrientation(const glm::quat& orientionA, const glm::quat& orientionB, + float similarEnough = ORIENTATION_SIMILAR_ENOUGH); +const float POSITION_SIMILAR_ENOUGH = 0.1f; // 0.1 meter +bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, float similarEnough = POSITION_SIMILAR_ENOUGH); + +bool isNaN(float f); + #endif /* defined(__hifi__SharedUtil__) */ From 38dbc56d1ad8ffb0aa0bc659a46e388099a616db Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Sun, 13 Apr 2014 19:09:28 -0700 Subject: [PATCH 23/64] first cut at new version of audio refelctor --- interface/src/AudioReflector.cpp | 377 ++++++++++++++++++++++++++++++- interface/src/AudioReflector.h | 81 ++++++- 2 files changed, 445 insertions(+), 13 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 6c4e78eae2..e870e01539 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -17,15 +17,22 @@ const float DEFAULT_MS_DELAY_PER_METER = 3.0f; const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f; const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f; const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is this long -const int DEFAULT_DIFFUSION_FANOUT = 2; +const int DEFAULT_DIFFUSION_FANOUT = 5; const int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10; +const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point + +const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed +const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused + AudioReflector::AudioReflector(QObject* parent) : QObject(parent), _preDelay(DEFAULT_PRE_DELAY), _soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), _distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), - _diffusionFanout(DEFAULT_DIFFUSION_FANOUT) + _diffusionFanout(DEFAULT_DIFFUSION_FANOUT), + _absorptionRatio(DEFAULT_ABSORPTION_RATIO), + _diffusionRatio(DEFAULT_DIFFUSION_RATIO) { reset(); } @@ -37,7 +44,7 @@ void AudioReflector::render() { } if (_audio->getProcessSpatialAudio()) { - drawRays(); + newDrawRays(); } } @@ -187,7 +194,8 @@ void AudioReflector::calculateAllReflections() { _origin = _myAvatar->getHead()->getPosition(); glm::vec3 averageEarPosition = _myAvatar->getHead()->getPosition(); - + _listenerPosition = averageEarPosition; +qDebug() << "_listenerPosition:" << _listenerPosition.x << "," << _listenerPosition.y << "," << _listenerPosition.z; _orientation = orientation; glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP); @@ -340,13 +348,6 @@ QVector AudioReflector::calculateReflections(const glm::vec3& earPosi direction = glm::normalize(glm::reflect(direction,faceNormal)); start = end; bounceCount++; - - // handle diffusion here - if (_diffusionFanout > 0) { - glm::vec3 thisReflection = end; - calculateDiffusions(earPosition, origin, end, totalDistance, - currentAttenuation, bounceCount, face, reflectionPoints); - } } } else { currentAttenuation = 0.0f; @@ -478,6 +479,10 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray } void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + return; // + +// DO NOTHING.... + //quint64 start = usecTimestampNow(); _maxDelay = 0; @@ -650,3 +655,353 @@ void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, co glEnable(GL_LIGHTING); // ?? } + + + +AudioPath::AudioPath(const glm::vec3& origin, const glm::vec3& direction, float attenuation, float delay, int bounceCount) : + startPoint(origin), + startDirection(direction), + startDelay(delay), + startAttenuation(attenuation), + + lastPoint(origin), + lastDirection(direction), + lastDistance(0.0f), + lastDelay(delay), + lastAttenuation(attenuation), + bounceCount(bounceCount), + + finalized(false), + reflections() +{ +} + + +void AudioReflector::addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, + float initialAttenuation, float initialDelay) { + + AudioPath* path = new AudioPath(origin, initialDirection, initialAttenuation, initialDelay, 0); + _audioPaths.push_back(path); +} + +void AudioReflector::newCalculateAllReflections() { + // only recalculate when we've moved... + // TODO: what about case where new voxels are added in front of us??? + bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented); + glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation(); + glm::vec3 origin = _myAvatar->getHead()->getPosition(); + glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition(); + + bool shouldRecalc = _audiblePoints.size() == 0 + || !isSimilarPosition(origin, _origin) + || !isSimilarOrientation(orientation, _orientation) + || !isSimilarPosition(listenerPosition, _listenerPosition); + + if (shouldRecalc) { + + /* + qDebug() << "_audiblePoints.size()=" << _audiblePoints.size(); + qDebug() << "isSimilarPosition(origin, _origin)=" << isSimilarPosition(origin, _origin); + qDebug() << "isSimilarPosition(listenerPosition, _listenerPosition)=" << isSimilarPosition(listenerPosition, _listenerPosition); + qDebug() << "isSimilarOrientation(orientation, _orientation)=" << isSimilarOrientation(orientation, _orientation); + if (!isSimilarOrientation(orientation, _orientation)) { + qDebug() << " orientation=" << orientation.x << "," << orientation.y << "," + << orientation.y << "," << orientation.w; + + qDebug() << " _orientation=" << _orientation.x << "," << _orientation.y << "," + << _orientation.y << "," << _orientation.w; + } + */ + + QMutexLocker locker(&_mutex); + quint64 start = usecTimestampNow(); + _origin = origin; + _orientation = orientation; + _listenerPosition = listenerPosition; + + anylizePaths(); // actually does the work + + quint64 end = usecTimestampNow(); + //reset(); + } +} + +void AudioReflector::newDrawRays() { + newCalculateAllReflections(); + + const glm::vec3 RED(1,0,0); + const glm::vec3 GREEN(0,1,0); + + int diffusionNumber = 0; + + QMutexLocker locker(&_mutex); + foreach(AudioPath* const& path, _audioPaths) { + + // if this is an original reflection, draw it in RED + if (path->startPoint == _origin) { + drawPath(path, RED); + } else { + diffusionNumber++; +//qDebug() << "drawing diffusion path:" << diffusionNumber << "length:" << path->reflections.size(); + drawPath(path, GREEN); + } + } +} + +void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) { + glm::vec3 start = path->startPoint; + glm::vec3 color = originalColor; + const float COLOR_ADJUST_PER_BOUNCE = 0.75f; + + foreach (glm::vec3 end, path->reflections) { + drawVector(start, end, color); + start = end; + color = color * COLOR_ADJUST_PER_BOUNCE; + } +} + + +void AudioReflector::anylizePaths() { + +qDebug() << "AudioReflector::anylizePaths()..."; + + // clear our _audioPaths + foreach(AudioPath* const& path, _audioPaths) { + delete path; + } + _audioPaths.clear(); + + _audiblePoints.clear(); // clear our audible points + + // add our initial paths + glm::vec3 averageEarPosition = _myAvatar->getHead()->getPosition(); + glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); + glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP); + glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT); + glm::vec3 left = -right; + glm::vec3 down = -up; + glm::vec3 back = -front; + glm::vec3 frontRightUp = glm::normalize(front + right + up); + glm::vec3 frontLeftUp = glm::normalize(front + left + up); + glm::vec3 backRightUp = glm::normalize(back + right + up); + glm::vec3 backLeftUp = glm::normalize(back + left + up); + glm::vec3 frontRightDown = glm::normalize(front + right + down); + glm::vec3 frontLeftDown = glm::normalize(front + left + down); + glm::vec3 backRightDown = glm::normalize(back + right + down); + glm::vec3 backLeftDown = glm::normalize(back + left + down); + + float initialAttenuation = 1.0f; + + addSoundSource(_origin, right, initialAttenuation, _preDelay); + addSoundSource(_origin, front, initialAttenuation, _preDelay); + addSoundSource(_origin, up, initialAttenuation, _preDelay); + addSoundSource(_origin, down, initialAttenuation, _preDelay); + addSoundSource(_origin, back, initialAttenuation, _preDelay); + addSoundSource(_origin, left, initialAttenuation, _preDelay); + + addSoundSource(_origin, frontRightUp, initialAttenuation, _preDelay); + addSoundSource(_origin, frontLeftUp, initialAttenuation, _preDelay); + addSoundSource(_origin, backRightUp, initialAttenuation, _preDelay); + addSoundSource(_origin, backLeftUp, initialAttenuation, _preDelay); + addSoundSource(_origin, frontRightDown, initialAttenuation, _preDelay); + addSoundSource(_origin, frontLeftDown, initialAttenuation, _preDelay); + addSoundSource(_origin, backRightDown, initialAttenuation, _preDelay); + addSoundSource(_origin, backLeftDown, initialAttenuation, _preDelay); + + // loop through all our + int steps = 0; + int acitvePaths = _audioPaths.size(); // when we start, all paths are active + while(acitvePaths > 0) { + acitvePaths = anylizePathsSingleStep(); + steps++; + qDebug() << "acitvePaths=" << acitvePaths << "steps=" << steps << "_audioPaths.size()=" << _audioPaths.size(); + } +} + +int AudioReflector::anylizePathsSingleStep() { + // iterate all the active sound paths, calculate one step per active path + + int activePaths = 0; + foreach(AudioPath* const& path, _audioPaths) { + + bool isDiffusion = (path->startPoint != _origin); + + /* + qDebug() << "ray intersection... " + << " startPoint=[" << path->startPoint.x << "," << path->startPoint.y << "," << path->startPoint.z << "]" + << " _origin=[" << _origin.x << "," << _origin.y << "," << _origin.z << "]" + << " bouceCount= " << path->bounceCount + << " isDiffusion=" << isDiffusion; + */ + + glm::vec3 start = path->lastPoint; + glm::vec3 direction = path->lastDirection; + OctreeElement* elementHit; // output from findRayIntersection + float distance; // output from findRayIntersection + BoxFace face; // output from findRayIntersection + + float currentAttenuation = path->lastAttenuation; + float currentDelay = path->lastDelay; // start with our delay so far + float pathDistance = path->lastDistance; + float totalDelay = path->lastDelay; // start with our delay so far + unsigned int bounceCount = path->bounceCount; + + if (!path->finalized) { + activePaths++; + + // quick hack to stop early reflections right away... + //if (!isDiffusion && path->bounceCount > 1) { + // path->finalized = true; + // qDebug() << "stopping reflections on first bounce!"; + //} else + if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { + path->finalized = true; + if (isDiffusion) { + qDebug() << "diffusion bounceCount too high!"; + } + } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + + pathDistance += glm::distance(start, end); + + + /* + qDebug() << "ray intersection... " + << " startPoint=[" << path->startPoint.x << "," << path->startPoint.y << "," << path->startPoint.z << "]" + << " bouceCount= " << path->bounceCount + << " end=[" << end.x << "," << end.y << "," << end.z << "]" + << " pathDistance=" << pathDistance; + */ + + + // We aren't using this... should we be???? + float toListenerDistance = glm::distance(end, _listenerPosition); + float totalDistance = toListenerDistance + pathDistance; + + // adjust our current delay by just the delay from the most recent ray + currentDelay += getDelayFromDistance(distance); + + // adjust our previous attenuation based on the distance traveled in last ray + currentAttenuation *= getDistanceAttenuationCoefficient(distance); + + // now we know the current attenuation for the "perfect" reflection case, but we now incorporate + // our surface materials to determine how much of this ray is absorbed, reflected, and diffused + SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit); + + float reflectiveAttenuation = currentAttenuation * material.reflectiveRatio; + float totalDiffusionAttenuation = currentAttenuation * material.diffusionRatio; + float partialDiffusionAttenuation = totalDiffusionAttenuation / _diffusionFanout; + + // total delay includes the bounce back to listener + totalDelay = getDelayFromDistance(totalDistance); + float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance); + //qDebug() << "toListenerDistance=" << toListenerDistance; + //qDebug() << "toListenerAttenuation=" << toListenerAttenuation; + + // if our resulting partial diffusion attenuation, is still above our minimum attenuation + // then we add new paths for each diffusion point + if ((partialDiffusionAttenuation * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT + && totalDelay < MAXIMUM_DELAY_MS) { + + // add sound sources for the normal + glm::vec3 faceNormal = getFaceNormal(face); + addSoundSource(end, faceNormal, partialDiffusionAttenuation, currentDelay); + + // diffusions fan out from random places on the semisphere of the collision point + for(int i = 1; i < _diffusionFanout; i++) { + glm::vec3 randomDirection; + + float surfaceRandomness = randFloatInRange(0.5f,1.0f); + float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f; + float altRemainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + float altRemainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + + if (face == MIN_X_FACE) { + randomDirection = glm::vec3(-surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); + } else if (face == MAX_X_FACE) { + randomDirection = glm::vec3(surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); + } else if (face == MIN_Y_FACE) { + randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, -surfaceRandomness, surfaceRemainder * altRemainderSignB); + } else if (face == MAX_Y_FACE) { + randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRandomness, surfaceRemainder * altRemainderSignB); + } else if (face == MIN_Z_FACE) { + randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, -surfaceRandomness); + } else if (face == MAX_Z_FACE) { + randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, surfaceRandomness); + } + + randomDirection = glm::normalize(randomDirection); + + /* + qDebug() << "DIFFUSION... addSoundSource()... partialDiffusionAttenuation=" << partialDiffusionAttenuation << "\n" << + " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT << "\n" << + " previous direction=[" << direction.x << "," << direction.y << "," << direction.z << "]" << "\n" << + " randomDirection=[" << randomDirection.x << "," << randomDirection.y << "," << randomDirection.z << "]" << "\n" << + " end=[" << end.x << "," << end.y << "," << end.z << "]"; + */ + + // add sound sources for these diffusions + addSoundSource(end, randomDirection, partialDiffusionAttenuation, currentDelay); + } + } + + // if our reflective attenuation is above our minimum, then add our reflection point and + // allow our path to continue + if (isDiffusion) { + qDebug() << "checking diffusion"; + qDebug() << "reflectiveAttenuation=" << reflectiveAttenuation; + qDebug() << "totalDiffusionAttenuation=" << totalDiffusionAttenuation; + qDebug() << "toListenerAttenuation=" << toListenerAttenuation; + qDebug() << "(reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation); + } + + if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT + && totalDelay < MAXIMUM_DELAY_MS) { + + // add this location, as the reflective attenuation as well as the total diffusion attenuation + AudioPoint point = { end, totalDelay, reflectiveAttenuation + totalDiffusionAttenuation }; + _audiblePoints.push_back(point); + + // add this location to the path points, so we can visualize it + path->reflections.push_back(end); + + // now, if our reflective attenuation is over our minimum then keep going... + if (reflectiveAttenuation * toListenerAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) { + glm::vec3 faceNormal = getFaceNormal(face); + path->lastDirection = glm::normalize(glm::reflect(direction,faceNormal)); + path->lastPoint = end; + path->lastAttenuation = reflectiveAttenuation; + path->lastDelay = currentDelay; + path->lastDistance = pathDistance; + path->bounceCount++; + +//qDebug() << "whichPath=" << activePaths << "path->lastAttenuation=" << path->lastAttenuation << "path->lastDistance=" << path->lastDistance; + + } else { + path->finalized = true; // if we're too quiet, then we're done + } + } else { + path->finalized = true; // if we're too quiet, then we're done + if (isDiffusion) { + qDebug() << "diffusion too quiet!"; + } + } + } else { +//qDebug() << "whichPath=" << activePaths << "path->bounceCount=" << path->bounceCount << "ray missed..."; + path->finalized = true; // if it doesn't intersect, then it is finished + if (isDiffusion) { + qDebug() << "diffusion doesn't intersect!"; + } + } + } + } + return activePaths; +} + +SurfaceCharacteristics AudioReflector::getSurfaceCharacteristics(OctreeElement* elementHit) { + float reflectiveRatio = (1.0f - (_absorptionRatio + _diffusionRatio)); + SurfaceCharacteristics result = { reflectiveRatio, _absorptionRatio, _diffusionRatio }; + return result; +} + + diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index a2cc39d20e..8ae5413b40 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -16,6 +16,41 @@ #include "Audio.h" #include "avatar/MyAvatar.h" +class AudioPath { +public: + AudioPath(const glm::vec3& origin = glm::vec3(0), const glm::vec3& direction = glm::vec3(0), float attenuation = 1.0f, + float delay = 0.0f, int bounceCount = 0); + glm::vec3 startPoint; + glm::vec3 startDirection; + float startDelay; + float startAttenuation; + + glm::vec3 lastPoint; + glm::vec3 lastDirection; + float lastDistance; + float lastDelay; + float lastAttenuation; + unsigned int bounceCount; + + bool finalized; + QVector reflections; +}; + +class AudioPoint { +public: + glm::vec3 location; + float delay; + float attenuation; +}; + +class SurfaceCharacteristics { +public: + float reflectiveRatio; + float absorptionRatio; + float diffusionRatio; +}; + + class AudioReflector : public QObject { Q_OBJECT public: @@ -58,40 +93,51 @@ private: MyAvatar* _myAvatar; // access to listener Audio* _audio; // access to audio API + // Helpers for drawing void drawRays(); void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); + // OLD helper for playing audio void echoReflections(const glm::vec3& origin, const QVector& reflections, const QByteArray& samples, unsigned int sampleTime, int sampleRate); + // OLD helper for calculating reflections QVector calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection); void calculateDiffusions(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& thisReflection, float thisDistance, float thisAttenuation, int thisBounceCount, BoxFace thisReflectionFace, QVector reflectionPoints); + // OLD helper for drawing refections void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections); + // OLD helper for calculating reflections void calculateAllReflections(); + + // resets statistics void reset(); + + // helper for generically calculating attenuation based on distance float getDistanceAttenuationCoefficient(float distance); + // statistics int _reflections; - int _delayCount; float _totalDelay; float _averageDelay; float _maxDelay; float _minDelay; - int _attenuationCount; float _totalAttenuation; float _averageAttenuation; float _maxAttenuation; float _minAttenuation; + glm::vec3 _listenerPosition; glm::vec3 _origin; glm::quat _orientation; + + // old way of doing this... QVector _frontRightUpReflections; QVector _frontLeftUpReflections; QVector _backRightUpReflections; @@ -107,6 +153,32 @@ private: QVector _upReflections; QVector _downReflections; + + // NOTE: Here's the new way, we will have an array of AudioPaths, we will loop on all of our currently calculating audio + // paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it + // is considered finalized. + // If the ray hits a surface, then, based on the characteristics of that surface, it will create calculate the new + // attenuation, path length, and delay for the primary path. For surfaces that have diffusion, it will also create + // fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation + // of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop. + QVector _audioPaths; + QVector _audiblePoints; + + // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, + // as well as diffusion sound sources + void addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, float initialDelay); + + // helper that handles audioPath analysis + int anylizePathsSingleStep(); + void anylizePaths(); + void newDrawRays(); + void drawPath(AudioPath* path, const glm::vec3& originalColor); + void newCalculateAllReflections(); + + // return the surface characteristics of the element we hit + SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit); + + QMutex _mutex; float _preDelay; @@ -114,6 +186,11 @@ private: float _distanceAttenuationScalingFactor; int _diffusionFanout; // number of points of diffusion from each reflection point + + // all elements have the same material for now... + float _absorptionRatio; + float _diffusionRatio; + float _reflectiveRatio; }; From 030de00e882814821cb9904d22830edc8353c15b Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Sun, 13 Apr 2014 19:11:25 -0700 Subject: [PATCH 24/64] first cut at new version of audio refelctor --- interface/interface_en.ts | 12 ++++++------ interface/src/AudioReflector.cpp | 3 ++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/interface/interface_en.ts b/interface/interface_en.ts index 43ec129c99..8a8a877a32 100644 --- a/interface/interface_en.ts +++ b/interface/interface_en.ts @@ -14,12 +14,12 @@ - + Open Script - + JavaScript Files (*.js) @@ -113,18 +113,18 @@ Menu - + Open .ini config file - - + + Text files (*.ini) - + Save .ini config file diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index e870e01539..6e0b72c9fa 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -770,7 +770,6 @@ qDebug() << "AudioReflector::anylizePaths()..."; delete path; } _audioPaths.clear(); - _audiblePoints.clear(); // clear our audible points // add our initial paths @@ -816,6 +815,8 @@ qDebug() << "AudioReflector::anylizePaths()..."; steps++; qDebug() << "acitvePaths=" << acitvePaths << "steps=" << steps << "_audioPaths.size()=" << _audioPaths.size(); } + + qDebug() << "_audiblePoints.size()=" << _audiblePoints.size(); } int AudioReflector::anylizePathsSingleStep() { From 22f7facc4e15b1291076ec9751778c069e091e77 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Sun, 13 Apr 2014 19:41:54 -0700 Subject: [PATCH 25/64] first cut at new version of audio refelctor --- interface/src/AudioReflector.cpp | 117 +++++++++++++++++++++++++++++-- interface/src/AudioReflector.h | 4 ++ 2 files changed, 116 insertions(+), 5 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 6e0b72c9fa..89cc678a0e 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -44,7 +44,8 @@ void AudioReflector::render() { } if (_audio->getProcessSpatialAudio()) { - newDrawRays(); + //newDrawRays(); + drawRays(); } } @@ -474,15 +475,121 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVectorisOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars); + bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource); + glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() : + _myAvatar->getHead()->getPosition(); + glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() : + _myAvatar->getHead()->getPosition(); + + int totalNumberOfSamples = samples.size() / sizeof(int16_t); + int totalNumberOfStereoSamples = samples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); + + const int16_t* originalSamplesData = (const int16_t*)samples.constData(); + QByteArray attenuatedLeftSamples; + QByteArray attenuatedRightSamples; + attenuatedLeftSamples.resize(samples.size()); + attenuatedRightSamples.resize(samples.size()); + + int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data(); + int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data(); + + // calculate the distance to the ears + float rightEarDistance = glm::distance(audiblePoint.location, rightEarPosition); + float leftEarDistance = glm::distance(audiblePoint.location, leftEarPosition); + + float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay; + float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay; + + _totalDelay += rightEarDelayMsecs + leftEarDelayMsecs; + _delayCount += 2; + _maxDelay = std::max(_maxDelay,rightEarDelayMsecs); + _maxDelay = std::max(_maxDelay,leftEarDelayMsecs); + _minDelay = std::min(_minDelay,rightEarDelayMsecs); + _minDelay = std::min(_minDelay,leftEarDelayMsecs); + + int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + + //qDebug() << "leftTotalDistance=" << leftTotalDistance << "rightTotalDistance=" << rightTotalDistance; + //qDebug() << "leftEarDelay=" << leftEarDelay << "rightEarDelay=" << rightEarDelay; + + float rightEarAttenuation = getDistanceAttenuationCoefficient(rightEarDistance) * audiblePoint.attenuation; + float leftEarAttenuation = getDistanceAttenuationCoefficient(leftEarDistance) * audiblePoint.attenuation; + + _totalAttenuation += rightEarAttenuation + leftEarAttenuation; + _attenuationCount += 2; + _maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation); + _maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation); + _minAttenuation = std::min(_minAttenuation,rightEarAttenuation); + _minAttenuation = std::min(_minAttenuation,leftEarAttenuation); + + // run through the samples, and attenuate them + for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) { + int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; + int16_t rightSample = leftSample; + if (wantStereo) { + rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; + } + + //qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample; + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; + + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; + + //qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation); + } + + // now inject the attenuated array with the appropriate delay + + unsigned int sampleTimeLeft = sampleTime + leftEarDelay; + unsigned int sampleTimeRight = sampleTime + rightEarDelay; + + //qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight; + + _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); + _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); +} void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { // nothing yet, but will do local reflections too... } void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - return; // - -// DO NOTHING.... - + //newEchoAudio(sampleTime, samples, format); + oldEchoAudio(sampleTime, samples, format); +} + +void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + //quint64 start = usecTimestampNow(); + + _maxDelay = 0; + _maxAttenuation = 0.0f; + _minDelay = std::numeric_limits::max(); + _minAttenuation = std::numeric_limits::max(); + _totalDelay = 0.0f; + _delayCount = 0; + _totalAttenuation = 0.0f; + _attenuationCount = 0; + + QMutexLocker locker(&_mutex); + + foreach(const AudioPoint& audiblePoint, _audiblePoints) { + injectAudiblePoint(audiblePoint, samples, sampleTime, format.sampleRate()); + } + + _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; + _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; + + //quint64 end = usecTimestampNow(); + //qDebug() << "AudioReflector::addSamples()... elapsed=" << (end - start); +} + +void AudioReflector::oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { //quint64 start = usecTimestampNow(); _maxDelay = 0; diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 8ae5413b40..3e8f06e8a7 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -174,6 +174,10 @@ private: void newDrawRays(); void drawPath(AudioPath* path, const glm::vec3& originalColor); void newCalculateAllReflections(); + + void injectAudiblePoint(const AudioPoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); + void oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + void newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); // return the surface characteristics of the element we hit SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit); From ddd2b0843cac71b97b43efd7119b329f95adeba4 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Sun, 13 Apr 2014 20:43:40 -0700 Subject: [PATCH 26/64] more work on new diffusion model --- examples/audioReflectorTools.js | 65 ++++++++++++- interface/src/Application.cpp | 5 +- interface/src/AudioReflector.cpp | 157 +++++++++---------------------- interface/src/Menu.cpp | 4 + interface/src/Menu.h | 1 + 5 files changed, 116 insertions(+), 116 deletions(-) diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js index e19e757367..99baf7260c 100644 --- a/examples/audioReflectorTools.js +++ b/examples/audioReflectorTools.js @@ -11,12 +11,13 @@ var delayScale = 100.0; +var fanoutScale = 10.0; var speedScale = 20; var factorScale = 5.0; // This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to // move the slider -var delayY = 300; +var delayY = 250; var delaySlider = Overlays.addOverlay("image", { // alternate form of expressing bounds bounds: { x: 100, y: delayY, width: 150, height: 35}, @@ -36,10 +37,37 @@ var delayThumb = Overlays.addOverlay("image", { width: 18, height: 17, imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 0, blue: 0}, + alpha: 1 + }); + +// This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to +// move the slider +var fanoutY = 300; +var fanoutSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: fanoutY, width: 150, height: 35}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", color: { red: 255, green: 255, blue: 255}, alpha: 1 }); +// This is the thumb of our slider +var fanoutMinThumbX = 110; +var fanoutMaxThumbX = fanoutMinThumbX + 110; +var fanoutThumbX = (fanoutMinThumbX + fanoutMaxThumbX) / 2; +var fanoutThumb = Overlays.addOverlay("image", { + x: fanoutThumbX, + y: fanoutY + 9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 255, blue: 0}, + alpha: 1 + }); + + // This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to // move the slider var speedY = 350; @@ -62,7 +90,7 @@ var speedThumb = Overlays.addOverlay("image", { width: 18, height: 17, imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", - color: { red: 255, green: 255, blue: 255}, + color: { red: 0, green: 255, blue: 0}, alpha: 1 }); @@ -88,7 +116,7 @@ var factorThumb = Overlays.addOverlay("image", { width: 18, height: 17, imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", - color: { red: 255, green: 255, blue: 255}, + color: { red: 0, green: 0, blue: 255}, alpha: 1 }); @@ -101,6 +129,8 @@ function scriptEnding() { Overlays.deleteOverlay(speedSlider); Overlays.deleteOverlay(delayThumb); Overlays.deleteOverlay(delaySlider); + Overlays.deleteOverlay(fanoutThumb); + Overlays.deleteOverlay(fanoutSlider); } Script.scriptEnding.connect(scriptEnding); @@ -116,6 +146,7 @@ Script.update.connect(update); // The slider is handled in the mouse event callbacks. var movingSliderDelay = false; +var movingSliderFanout = false; var movingSliderSpeed = false; var movingSliderFactor = false; var thumbClickOffsetX = 0; @@ -130,8 +161,22 @@ function mouseMoveEvent(event) { } Overlays.editOverlay(delayThumb, { x: newThumbX } ); var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale; + print("delay="+delay); AudioReflector.setPreDelay(delay); } + if (movingSliderFanout) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < fanoutMinThumbX) { + newThumbX = fanoutMinThumbX; + } + if (newThumbX > fanoutMaxThumbX) { + newThumbX = fanoutMaxThumbX; + } + Overlays.editOverlay(fanoutThumb, { x: newThumbX } ); + var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale); + print("fanout="+fanout); + AudioReflector.setDiffusionFanout(fanout); + } if (movingSliderSpeed) { newThumbX = event.x - thumbClickOffsetX; if (newThumbX < speedMinThumbX) { @@ -167,7 +212,11 @@ function mousePressEvent(event) { movingSliderDelay = true; thumbClickOffsetX = event.x - delayThumbX; } - + // If the user clicked on the thumb, handle the slider logic + if (clickedOverlay == fanoutThumb) { + movingSliderFanout = true; + thumbClickOffsetX = event.x - fanoutThumbX; + } // If the user clicked on the thumb, handle the slider logic if (clickedOverlay == speedThumb) { movingSliderSpeed = true; @@ -185,9 +234,17 @@ function mouseReleaseEvent(event) { if (movingSliderDelay) { movingSliderDelay = false; var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale; + print("delay="+delay); AudioReflector.setPreDelay(delay); delayThumbX = newThumbX; } + if (movingSliderFanout) { + movingSliderFanout = false; + var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale); + print("fanout="+fanout); + AudioReflector.setDiffusionFanout(fanout); + fanoutThumbX = newThumbX; + } if (movingSliderSpeed) { movingSliderSpeed = false; var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale; diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index 4a7da51887..e3e59bbab6 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -2854,8 +2854,11 @@ void Application::displayStats() { verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT); + float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? + _audioReflector.getPreDelay() : 0.0f; + sprintf(reflectionsStatus, "Delay: pre: %f, average %f, max %f, min %f, speed: %f", - _audioReflector.getDelayFromDistance(0.0f), + preDelay, _audioReflector.getAverageDelayMsecs(), _audioReflector.getMaxDelayMsecs(), _audioReflector.getMinDelayMsecs(), diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 89cc678a0e..28e9f6161b 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -44,8 +44,11 @@ void AudioReflector::render() { } if (_audio->getProcessSpatialAudio()) { - //newDrawRays(); - drawRays(); + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) { + newDrawRays(); + } else { + drawRays(); + } } } @@ -59,11 +62,12 @@ void AudioReflector::render() { float AudioReflector::getDelayFromDistance(float distance) { float delay = (_soundMsPerMeter * distance); - if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) { + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) && + !Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) { + delay += _preDelay; } - return delay; } @@ -237,87 +241,6 @@ qDebug() << "_listenerPosition:" << _listenerPosition.x << "," << _listenerPosit } } -// TODO: add diffusion ratio. percentage of echo energy that diffuses -// so say that 50% of the energy that hits the echo point diffuses in fanout directions -void AudioReflector::calculateDiffusions(const glm::vec3& earPosition, const glm::vec3& origin, - const glm::vec3& thisReflection, float thisDistance, float thisAttenuation, int thisBounceCount, - BoxFace thisReflectionFace, QVector reflectionPoints) { - - //return; // do nothing - - QVector diffusionDirections; - - // diffusions fan out from random places on the semisphere of the collision point - for(int i = 0; i < _diffusionFanout; i++) { - glm::vec3 randomDirection; - - float surfaceRandomness = randFloatInRange(0.5f,1.0f); - float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f; - float altRemainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; - float altRemainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; - - if (thisReflectionFace == MIN_X_FACE) { - randomDirection = glm::vec3(-surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); - } else if (thisReflectionFace == MAX_X_FACE) { - randomDirection = glm::vec3(surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); - } else if (thisReflectionFace == MIN_Y_FACE) { - randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, -surfaceRandomness, surfaceRemainder * altRemainderSignB); - } else if (thisReflectionFace == MAX_Y_FACE) { - randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRandomness, surfaceRemainder * altRemainderSignB); - } else if (thisReflectionFace == MIN_Z_FACE) { - randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, -surfaceRandomness); - } else if (thisReflectionFace == MAX_Z_FACE) { - randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, surfaceRandomness); - } - diffusionDirections.push_back(randomDirection); - } - - foreach(glm::vec3 direction, diffusionDirections) { - - glm::vec3 start = thisReflection; - OctreeElement* elementHit; - float distance; - BoxFace face; - const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point - float currentAttenuation = thisAttenuation; - float totalDistance = thisDistance; - float totalDelay = getDelayFromDistance(totalDistance); - int bounceCount = thisBounceCount; - - while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS && bounceCount < ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { - if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { - glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); - - totalDistance += glm::distance(start, end); - float earDistance = glm::distance(end, earPosition); - float totalDistanceToEar = earDistance + distance; - totalDelay = getDelayFromDistance(totalDistanceToEar); - currentAttenuation = getDistanceAttenuationCoefficient(totalDistanceToEar) * getBounceAttenuationCoefficient(bounceCount); - - if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { - reflectionPoints.push_back(end); - glm::vec3 faceNormal = getFaceNormal(face); - direction = glm::normalize(glm::reflect(direction,faceNormal)); - start = end; - bounceCount++; - - /* - // handle diffusion here - if (_diffusionFanout > 0 && bounceCount < ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { - glm::vec3 thisReflection = end; - calculateDiffusions(earPosition, origin, end, totalDistance, - currentAttenuation, bounceCount, face, reflectionPoints); - } - */ - } - } else { - currentAttenuation = 0.0f; - } - } - } -} - - QVector AudioReflector::calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection) { @@ -560,8 +483,11 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray } void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - //newEchoAudio(sampleTime, samples, format); - oldEchoAudio(sampleTime, samples, format); + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) { + newEchoAudio(sampleTime, samples, format); + } else { + oldEchoAudio(sampleTime, samples, format); + } } void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { @@ -829,7 +755,6 @@ void AudioReflector::newCalculateAllReflections() { anylizePaths(); // actually does the work quint64 end = usecTimestampNow(); - //reset(); } } @@ -898,21 +823,23 @@ qDebug() << "AudioReflector::anylizePaths()..."; float initialAttenuation = 1.0f; - addSoundSource(_origin, right, initialAttenuation, _preDelay); - addSoundSource(_origin, front, initialAttenuation, _preDelay); - addSoundSource(_origin, up, initialAttenuation, _preDelay); - addSoundSource(_origin, down, initialAttenuation, _preDelay); - addSoundSource(_origin, back, initialAttenuation, _preDelay); - addSoundSource(_origin, left, initialAttenuation, _preDelay); + float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? _preDelay : 0.0f; + + addSoundSource(_origin, right, initialAttenuation, preDelay); + addSoundSource(_origin, front, initialAttenuation, preDelay); + addSoundSource(_origin, up, initialAttenuation, preDelay); + addSoundSource(_origin, down, initialAttenuation, preDelay); + addSoundSource(_origin, back, initialAttenuation, preDelay); + addSoundSource(_origin, left, initialAttenuation, preDelay); - addSoundSource(_origin, frontRightUp, initialAttenuation, _preDelay); - addSoundSource(_origin, frontLeftUp, initialAttenuation, _preDelay); - addSoundSource(_origin, backRightUp, initialAttenuation, _preDelay); - addSoundSource(_origin, backLeftUp, initialAttenuation, _preDelay); - addSoundSource(_origin, frontRightDown, initialAttenuation, _preDelay); - addSoundSource(_origin, frontLeftDown, initialAttenuation, _preDelay); - addSoundSource(_origin, backRightDown, initialAttenuation, _preDelay); - addSoundSource(_origin, backLeftDown, initialAttenuation, _preDelay); + addSoundSource(_origin, frontRightUp, initialAttenuation, preDelay); + addSoundSource(_origin, frontLeftUp, initialAttenuation, preDelay); + addSoundSource(_origin, backRightUp, initialAttenuation, preDelay); + addSoundSource(_origin, backLeftUp, initialAttenuation, preDelay); + addSoundSource(_origin, frontRightDown, initialAttenuation, preDelay); + addSoundSource(_origin, frontLeftDown, initialAttenuation, preDelay); + addSoundSource(_origin, backRightDown, initialAttenuation, preDelay); + addSoundSource(_origin, backLeftDown, initialAttenuation, preDelay); // loop through all our int steps = 0; @@ -920,9 +847,11 @@ qDebug() << "AudioReflector::anylizePaths()..."; while(acitvePaths > 0) { acitvePaths = anylizePathsSingleStep(); steps++; - qDebug() << "acitvePaths=" << acitvePaths << "steps=" << steps << "_audioPaths.size()=" << _audioPaths.size(); + //qDebug() << "acitvePaths=" << acitvePaths << "steps=" << steps << "_audioPaths.size()=" << _audioPaths.size(); } + _reflections = _audiblePoints.size(); + qDebug() << "_audiblePoints.size()=" << _audiblePoints.size(); } @@ -964,9 +893,11 @@ int AudioReflector::anylizePathsSingleStep() { //} else if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { path->finalized = true; + /* if (isDiffusion) { qDebug() << "diffusion bounceCount too high!"; } + */ } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); @@ -998,7 +929,7 @@ int AudioReflector::anylizePathsSingleStep() { float reflectiveAttenuation = currentAttenuation * material.reflectiveRatio; float totalDiffusionAttenuation = currentAttenuation * material.diffusionRatio; - float partialDiffusionAttenuation = totalDiffusionAttenuation / _diffusionFanout; + float partialDiffusionAttenuation = _diffusionFanout < 1 ? 0.0f : totalDiffusionAttenuation / _diffusionFanout; // total delay includes the bounce back to listener totalDelay = getDelayFromDistance(totalDistance); @@ -1012,11 +943,11 @@ int AudioReflector::anylizePathsSingleStep() { && totalDelay < MAXIMUM_DELAY_MS) { // add sound sources for the normal - glm::vec3 faceNormal = getFaceNormal(face); - addSoundSource(end, faceNormal, partialDiffusionAttenuation, currentDelay); + //glm::vec3 faceNormal = getFaceNormal(face); + //addSoundSource(end, faceNormal, partialDiffusionAttenuation, currentDelay); // diffusions fan out from random places on the semisphere of the collision point - for(int i = 1; i < _diffusionFanout; i++) { + for(int i = 0; i < _diffusionFanout; i++) { glm::vec3 randomDirection; float surfaceRandomness = randFloatInRange(0.5f,1.0f); @@ -1055,6 +986,7 @@ int AudioReflector::anylizePathsSingleStep() { // if our reflective attenuation is above our minimum, then add our reflection point and // allow our path to continue + /* if (isDiffusion) { qDebug() << "checking diffusion"; qDebug() << "reflectiveAttenuation=" << reflectiveAttenuation; @@ -1062,6 +994,7 @@ int AudioReflector::anylizePathsSingleStep() { qDebug() << "toListenerAttenuation=" << toListenerAttenuation; qDebug() << "(reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation); } + */ if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { @@ -1082,24 +1015,26 @@ int AudioReflector::anylizePathsSingleStep() { path->lastDelay = currentDelay; path->lastDistance = pathDistance; path->bounceCount++; - -//qDebug() << "whichPath=" << activePaths << "path->lastAttenuation=" << path->lastAttenuation << "path->lastDistance=" << path->lastDistance; - } else { path->finalized = true; // if we're too quiet, then we're done } } else { path->finalized = true; // if we're too quiet, then we're done + /* if (isDiffusion) { qDebug() << "diffusion too quiet!"; } + */ } } else { -//qDebug() << "whichPath=" << activePaths << "path->bounceCount=" << path->bounceCount << "ray missed..."; + //qDebug() << "whichPath=" << activePaths << "path->bounceCount=" << path->bounceCount << "ray missed..."; path->finalized = true; // if it doesn't intersect, then it is finished + + /* if (isDiffusion) { qDebug() << "diffusion doesn't intersect!"; } + */ } } } diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 1a0adb5550..1bdbabec44 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -392,6 +392,10 @@ Menu::Menu() : Qt::CTRL | Qt::SHIFT | Qt::Key_H, true); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingWithDiffusions, + Qt::CTRL | Qt::SHIFT | Qt::Key_W, + true); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, Qt::CTRL | Qt::SHIFT | Qt::Key_F, false); diff --git a/interface/src/Menu.h b/interface/src/Menu.h index fd7873fae9..6e42e21b06 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -249,6 +249,7 @@ namespace MenuOption { const QString AudioSpatialProcessingPreDelay = "Audio Spatial Processing add Pre-Delay"; const QString AudioSpatialProcessingStereoSource = "Audio Spatial Processing Stereo Source"; const QString AudioSpatialProcessingHeadOriented = "Audio Spatial Processing Head Oriented"; + const QString AudioSpatialProcessingWithDiffusions = "Audio Spatial Processing With Diffusions"; const QString EchoServerAudio = "Echo Server Audio"; const QString EchoLocalAudio = "Echo Local Audio"; const QString MuteAudio = "Mute Microphone"; From d3a9c6940cb7e44ec24e4e961490c290dbd4bb65 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 10:12:08 -0700 Subject: [PATCH 27/64] some audio reflection cleanup --- interface/src/AudioReflector.cpp | 144 ++++--------------------------- interface/src/ui/Stats.cpp | 111 +++++++++++++----------- 2 files changed, 77 insertions(+), 178 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 28e9f6161b..e3c5e19b2a 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -510,6 +510,7 @@ void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& sam _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; + _reflections = _audiblePoints.size(); //quint64 end = usecTimestampNow(); //qDebug() << "AudioReflector::addSamples()... elapsed=" << (end - start); @@ -554,125 +555,26 @@ void AudioReflector::oldEchoAudio(unsigned int sampleTime, const QByteArray& sam } void AudioReflector::drawRays() { - //qDebug() << "AudioReflector::drawRays()"; - calculateAllReflections(); const glm::vec3 RED(1,0,0); - const glm::vec3 GREEN(0,1,0); - const glm::vec3 BLUE(0,0,1); - const glm::vec3 PURPLE(1,0,1); - const glm::vec3 YELLOW(1,1,0); - const glm::vec3 CYAN(0,1,1); - const glm::vec3 DARK_RED(0.8f,0.2f,0.2f); - const glm::vec3 DARK_GREEN(0.2f,0.8f,0.2f); - const glm::vec3 DARK_BLUE(0.2f,0.2f,0.8f); - const glm::vec3 DARK_PURPLE(0.8f,0.2f,0.8f); - const glm::vec3 DARK_YELLOW(0.8f,0.8f,0.2f); - const glm::vec3 DARK_CYAN(0.2f,0.8f,0.8f); - const glm::vec3 WHITE(1,1,1); - const glm::vec3 GRAY(0.5f,0.5f,0.5f); - glm::vec3 frontRightUpColor = RED; - glm::vec3 frontLeftUpColor = GREEN; - glm::vec3 backRightUpColor = BLUE; - glm::vec3 backLeftUpColor = CYAN; - glm::vec3 frontRightDownColor = PURPLE; - glm::vec3 frontLeftDownColor = YELLOW; - glm::vec3 backRightDownColor = WHITE; - glm::vec3 backLeftDownColor = DARK_RED; - glm::vec3 frontColor = GRAY; - glm::vec3 backColor = DARK_GREEN; - glm::vec3 leftColor = DARK_BLUE; - glm::vec3 rightColor = DARK_CYAN; - glm::vec3 upColor = DARK_PURPLE; - glm::vec3 downColor = DARK_YELLOW; - - // attempt to determine insidness/outsideness based on number of directional rays that reflect - bool inside = false; - - bool blockedUp = (_frontRightUpReflections.size() > 0) && - (_frontLeftUpReflections.size() > 0) && - (_backRightUpReflections.size() > 0) && - (_backLeftUpReflections.size() > 0) && - (_upReflections.size() > 0); - - bool blockedDown = (_frontRightDownReflections.size() > 0) && - (_frontLeftDownReflections.size() > 0) && - (_backRightDownReflections.size() > 0) && - (_backLeftDownReflections.size() > 0) && - (_downReflections.size() > 0); - - bool blockedFront = (_frontRightUpReflections.size() > 0) && - (_frontLeftUpReflections.size() > 0) && - (_frontRightDownReflections.size() > 0) && - (_frontLeftDownReflections.size() > 0) && - (_frontReflections.size() > 0); - - bool blockedBack = (_backRightUpReflections.size() > 0) && - (_backLeftUpReflections.size() > 0) && - (_backRightDownReflections.size() > 0) && - (_backLeftDownReflections.size() > 0) && - (_backReflections.size() > 0); - - bool blockedLeft = (_frontLeftUpReflections.size() > 0) && - (_backLeftUpReflections.size() > 0) && - (_frontLeftDownReflections.size() > 0) && - (_backLeftDownReflections.size() > 0) && - (_leftReflections.size() > 0); - - bool blockedRight = (_frontRightUpReflections.size() > 0) && - (_backRightUpReflections.size() > 0) && - (_frontRightDownReflections.size() > 0) && - (_backRightDownReflections.size() > 0) && - (_rightReflections.size() > 0); - - inside = blockedUp && blockedDown && blockedFront && blockedBack && blockedLeft && blockedRight; - - if (inside) { - frontRightUpColor = RED; - frontLeftUpColor = RED; - backRightUpColor = RED; - backLeftUpColor = RED; - frontRightDownColor = RED; - frontLeftDownColor = RED; - backRightDownColor = RED; - backLeftDownColor = RED; - frontColor = RED; - backColor = RED; - leftColor = RED; - rightColor = RED; - upColor = RED; - downColor = RED; - } - QMutexLocker locker(&_mutex); - drawReflections(_origin, frontRightUpColor, _frontRightUpReflections); - drawReflections(_origin, frontLeftUpColor, _frontLeftUpReflections); - drawReflections(_origin, backRightUpColor, _backRightUpReflections); - drawReflections(_origin, backLeftUpColor, _backLeftUpReflections); - drawReflections(_origin, frontRightDownColor, _frontRightDownReflections); - drawReflections(_origin, frontLeftDownColor, _frontLeftDownReflections); - drawReflections(_origin, backRightDownColor, _backRightDownReflections); - drawReflections(_origin, backLeftDownColor, _backLeftDownReflections); - drawReflections(_origin, frontColor, _frontReflections); - drawReflections(_origin, backColor, _backReflections); - drawReflections(_origin, leftColor, _leftReflections); - drawReflections(_origin, rightColor, _rightReflections); - drawReflections(_origin, upColor, _upReflections); - drawReflections(_origin, downColor, _downReflections); - - /* - qDebug() << "_reflections:" << _reflections - << "_averageDelay:" << _averageDelay - << "_maxDelay:" << _maxDelay - << "_minDelay:" << _minDelay; - - qDebug() << "_averageAttenuation:" << _averageAttenuation - << "_maxAttenuation:" << _maxAttenuation - << "_minAttenuation:" << _minAttenuation; - */ + drawReflections(_origin, RED, _frontRightUpReflections); + drawReflections(_origin, RED, _frontLeftUpReflections); + drawReflections(_origin, RED, _backRightUpReflections); + drawReflections(_origin, RED, _backLeftUpReflections); + drawReflections(_origin, RED, _frontRightDownReflections); + drawReflections(_origin, RED, _frontLeftDownReflections); + drawReflections(_origin, RED, _backRightDownReflections); + drawReflections(_origin, RED, _backLeftDownReflections); + drawReflections(_origin, RED, _frontReflections); + drawReflections(_origin, RED, _backReflections); + drawReflections(_origin, RED, _leftReflections); + drawReflections(_origin, RED, _rightReflections); + drawReflections(_origin, RED, _upReflections); + drawReflections(_origin, RED, _downReflections); } void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) { @@ -774,7 +676,6 @@ void AudioReflector::newDrawRays() { drawPath(path, RED); } else { diffusionNumber++; -//qDebug() << "drawing diffusion path:" << diffusionNumber << "length:" << path->reflections.size(); drawPath(path, GREEN); } } @@ -794,9 +695,6 @@ void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) { void AudioReflector::anylizePaths() { - -qDebug() << "AudioReflector::anylizePaths()..."; - // clear our _audioPaths foreach(AudioPath* const& path, _audioPaths) { delete path; @@ -847,12 +745,8 @@ qDebug() << "AudioReflector::anylizePaths()..."; while(acitvePaths > 0) { acitvePaths = anylizePathsSingleStep(); steps++; - //qDebug() << "acitvePaths=" << acitvePaths << "steps=" << steps << "_audioPaths.size()=" << _audioPaths.size(); } - _reflections = _audiblePoints.size(); - - qDebug() << "_audiblePoints.size()=" << _audiblePoints.size(); } int AudioReflector::anylizePathsSingleStep() { @@ -863,14 +757,6 @@ int AudioReflector::anylizePathsSingleStep() { bool isDiffusion = (path->startPoint != _origin); - /* - qDebug() << "ray intersection... " - << " startPoint=[" << path->startPoint.x << "," << path->startPoint.y << "," << path->startPoint.z << "]" - << " _origin=[" << _origin.x << "," << _origin.y << "," << _origin.z << "]" - << " bouceCount= " << path->bounceCount - << " isDiffusion=" << isDiffusion; - */ - glm::vec3 start = path->lastPoint; glm::vec3 direction = path->lastDirection; OctreeElement* elementHit; // output from findRayIntersection diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index c540877262..befa710903 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -293,11 +293,7 @@ void Stats::display( glm::vec3 avatarPos = myAvatar->getPosition(); lines = _expanded ? 5 : 3; - if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { - lines += 3; // spatial audio processing adds 3 extra lines - } - - + drawBackground(backgroundColor, horizontalOffset, 0, _geoStatsWidth, lines * STATS_PELS_PER_LINE + 10); horizontalOffset += 5; @@ -338,49 +334,6 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, downloads.str().c_str(), color); - - if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { - const AudioReflector* audioReflector = Application::getInstance()->getAudioReflector(); - - - // add some reflection stats - char reflectionsStatus[128]; - - sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s", - audioReflector->getReflections(), - (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal) - ? "with" : "without"), - (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars) - ? "two" : "one"), - (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource) - ? "stereo" : "mono") - ); - - verticalOffset += STATS_PELS_PER_LINE; - drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); - - float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? - audioReflector->getPreDelay() : 0.0f; - - sprintf(reflectionsStatus, "Delay: pre: %f, average %f, max %f, min %f, speed: %f", - preDelay, - audioReflector->getAverageDelayMsecs(), - audioReflector->getMaxDelayMsecs(), - audioReflector->getMinDelayMsecs(), - audioReflector->getSoundMsPerMeter()); - - verticalOffset += STATS_PELS_PER_LINE; - drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); - - sprintf(reflectionsStatus, "Attenuation: average %f, max %f, min %f, distance scale: %f", - audioReflector->getAverageAttenuation(), - audioReflector->getMaxAttenuation(), - audioReflector->getMinAttenuation(), - audioReflector->getDistanceAttenuationScalingFactor()); - - verticalOffset += STATS_PELS_PER_LINE; - drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); - } } verticalOffset = 0; @@ -389,6 +342,10 @@ void Stats::display( VoxelSystem* voxels = Application::getInstance()->getVoxels(); lines = _expanded ? 12 : 3; + if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { + lines += 5; // spatial audio processing adds 1 spacing line and 4 extra lines of info + } + drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10); horizontalOffset += 5; @@ -545,5 +502,61 @@ void Stats::display( voxelStats << "LOD: You can see " << qPrintable(displayLODDetails.trimmed()); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, (char*)voxelStats.str().c_str(), color); - } + } + + + if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { + verticalOffset += STATS_PELS_PER_LINE; // space one line... + + const AudioReflector* audioReflector = Application::getInstance()->getAudioReflector(); + + // add some reflection stats + char reflectionsStatus[128]; + + sprintf(reflectionsStatus, "Reflections: %d, Diffusion: %s, Original: %s, Ears: %s, Source: %s", + audioReflector->getReflections(), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions) + ? "yes" : "no"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal) + ? "included" : "silent"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars) + ? "two" : "one"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource) + ? "stereo" : "mono") + ); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? + audioReflector->getPreDelay() : 0.0f; + + sprintf(reflectionsStatus, "Delay: pre: %6.3f, average %6.3f, max %6.3f, min %6.3f, speed: %6.3f", + preDelay, + audioReflector->getAverageDelayMsecs(), + audioReflector->getMaxDelayMsecs(), + audioReflector->getMinDelayMsecs(), + audioReflector->getSoundMsPerMeter()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, distance scale: %5.3f", + audioReflector->getAverageAttenuation(), + audioReflector->getMaxAttenuation(), + audioReflector->getMinAttenuation(), + audioReflector->getDistanceAttenuationScalingFactor()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + bool diffusionEnabled = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + int fanout = diffusionEnabled ? audioReflector->getDiffusionFanout() : 0; + sprintf(reflectionsStatus, "Diffusion: %s, Fanout: %d", (diffusionEnabled ? "yes" : "no"), fanout); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + } + } From 5630ed65af40a0a74731ba5b819d96e94da2af0b Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 10:35:55 -0700 Subject: [PATCH 28/64] more cleanup --- interface/src/AudioReflector.cpp | 159 ++++++++++--------------------- interface/src/AudioReflector.h | 4 +- 2 files changed, 50 insertions(+), 113 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index e3c5e19b2a..726cbfac57 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -163,44 +163,14 @@ void AudioReflector::calculateAllReflections() { bool shouldRecalc = _reflections == 0 || _myAvatar->getHead()->getPosition() != _origin || (orientation != _orientation); - /* - qDebug() << "wantHeadOrientation=" << wantHeadOrientation; - - qDebug(" _myAvatar->getHead()->getPosition()=%f,%f,%f", - _myAvatar->getHead()->getPosition().x, - _myAvatar->getHead()->getPosition().y, - _myAvatar->getHead()->getPosition().z); - - qDebug(" _origin=%f,%f,%f", - _origin.x, - _origin.y, - _origin.z); - - qDebug(" orientation=%f,%f,%f,%f", - orientation.x, - orientation.y, - orientation.z, - orientation.w); - - qDebug(" _orientation=%f,%f,%f,%f", - _orientation.x, - _orientation.y, - _orientation.z, - _orientation.w); - */ if (shouldRecalc) { - //qDebug() << "origin or orientation has changed..."; - QMutexLocker locker(&_mutex); - - - quint64 start = usecTimestampNow(); + quint64 start = usecTimestampNow(); _origin = _myAvatar->getHead()->getPosition(); glm::vec3 averageEarPosition = _myAvatar->getHead()->getPosition(); _listenerPosition = averageEarPosition; -qDebug() << "_listenerPosition:" << _listenerPosition.x << "," << _listenerPosition.y << "," << _listenerPosition.z; _orientation = orientation; glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP); @@ -235,9 +205,11 @@ qDebug() << "_listenerPosition:" << _listenerPosition.x << "," << _listenerPosit quint64 end = usecTimestampNow(); reset(); - - //qDebug() << "Reflections recalculated in " << (end - start) << "usecs"; - + + const bool wantDebugging = false; + if (wantDebugging) { + qDebug() << "calculateAllReflections() elapsed=" << (end - start); + } } } @@ -633,30 +605,17 @@ void AudioReflector::newCalculateAllReflections() { || !isSimilarPosition(listenerPosition, _listenerPosition); if (shouldRecalc) { - - /* - qDebug() << "_audiblePoints.size()=" << _audiblePoints.size(); - qDebug() << "isSimilarPosition(origin, _origin)=" << isSimilarPosition(origin, _origin); - qDebug() << "isSimilarPosition(listenerPosition, _listenerPosition)=" << isSimilarPosition(listenerPosition, _listenerPosition); - qDebug() << "isSimilarOrientation(orientation, _orientation)=" << isSimilarOrientation(orientation, _orientation); - if (!isSimilarOrientation(orientation, _orientation)) { - qDebug() << " orientation=" << orientation.x << "," << orientation.y << "," - << orientation.y << "," << orientation.w; - - qDebug() << " _orientation=" << _orientation.x << "," << _orientation.y << "," - << _orientation.y << "," << _orientation.w; - } - */ - QMutexLocker locker(&_mutex); quint64 start = usecTimestampNow(); _origin = origin; _orientation = orientation; _listenerPosition = listenerPosition; - - anylizePaths(); // actually does the work - + analyzePaths(); // actually does the work quint64 end = usecTimestampNow(); + const bool wantDebugging = false; + if (wantDebugging) { + qDebug() << "newCalculateAllReflections() elapsed=" << (end - start); + } } } @@ -694,7 +653,7 @@ void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) { } -void AudioReflector::anylizePaths() { +void AudioReflector::analyzePaths() { // clear our _audioPaths foreach(AudioPath* const& path, _audioPaths) { delete path; @@ -703,7 +662,6 @@ void AudioReflector::anylizePaths() { _audiblePoints.clear(); // clear our audible points // add our initial paths - glm::vec3 averageEarPosition = _myAvatar->getHead()->getPosition(); glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP); glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT); @@ -739,22 +697,23 @@ void AudioReflector::anylizePaths() { addSoundSource(_origin, backRightDown, initialAttenuation, preDelay); addSoundSource(_origin, backLeftDown, initialAttenuation, preDelay); - // loop through all our + // loop through all our audio paths and keep analyzing them until they complete int steps = 0; int acitvePaths = _audioPaths.size(); // when we start, all paths are active while(acitvePaths > 0) { - acitvePaths = anylizePathsSingleStep(); + acitvePaths = analyzePathsSingleStep(); steps++; } _reflections = _audiblePoints.size(); } -int AudioReflector::anylizePathsSingleStep() { +int AudioReflector::analyzePathsSingleStep() { // iterate all the active sound paths, calculate one step per active path int activePaths = 0; foreach(AudioPath* const& path, _audioPaths) { + bool wantExtraDebuggging = false; bool isDiffusion = (path->startPoint != _origin); glm::vec3 start = path->lastPoint; @@ -767,37 +726,28 @@ int AudioReflector::anylizePathsSingleStep() { float currentDelay = path->lastDelay; // start with our delay so far float pathDistance = path->lastDistance; float totalDelay = path->lastDelay; // start with our delay so far - unsigned int bounceCount = path->bounceCount; if (!path->finalized) { activePaths++; - // quick hack to stop early reflections right away... - //if (!isDiffusion && path->bounceCount > 1) { - // path->finalized = true; - // qDebug() << "stopping reflections on first bounce!"; - //} else if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { path->finalized = true; - /* - if (isDiffusion) { + if (wantExtraDebuggging && isDiffusion) { qDebug() << "diffusion bounceCount too high!"; } - */ } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); pathDistance += glm::distance(start, end); - /* - qDebug() << "ray intersection... " - << " startPoint=[" << path->startPoint.x << "," << path->startPoint.y << "," << path->startPoint.z << "]" - << " bouceCount= " << path->bounceCount - << " end=[" << end.x << "," << end.y << "," << end.z << "]" - << " pathDistance=" << pathDistance; - */ - + if (wantExtraDebuggging) { + qDebug() << "ray intersection... " + << " startPoint=[" << path->startPoint.x << "," << path->startPoint.y << "," << path->startPoint.z << "]" + << " bouceCount= " << path->bounceCount + << " end=[" << end.x << "," << end.y << "," << end.z << "]" + << " pathDistance=" << pathDistance; + } // We aren't using this... should we be???? float toListenerDistance = glm::distance(end, _listenerPosition); @@ -820,67 +770,60 @@ int AudioReflector::anylizePathsSingleStep() { // total delay includes the bounce back to listener totalDelay = getDelayFromDistance(totalDistance); float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance); - //qDebug() << "toListenerDistance=" << toListenerDistance; - //qDebug() << "toListenerAttenuation=" << toListenerAttenuation; // if our resulting partial diffusion attenuation, is still above our minimum attenuation // then we add new paths for each diffusion point if ((partialDiffusionAttenuation * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { - // add sound sources for the normal - //glm::vec3 faceNormal = getFaceNormal(face); - //addSoundSource(end, faceNormal, partialDiffusionAttenuation, currentDelay); - // diffusions fan out from random places on the semisphere of the collision point for(int i = 0; i < _diffusionFanout; i++) { - glm::vec3 randomDirection; + glm::vec3 diffusion; - float surfaceRandomness = randFloatInRange(0.5f,1.0f); - float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f; - float altRemainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; - float altRemainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + float randomness = randFloatInRange(0.5f,1.0f); + float remainder = (1.0f - randomness)/2.0f; + float remainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + float remainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; if (face == MIN_X_FACE) { - randomDirection = glm::vec3(-surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); + diffusion = glm::vec3(-randomness, remainder * remainderSignA, remainder * remainderSignB); } else if (face == MAX_X_FACE) { - randomDirection = glm::vec3(surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); + diffusion = glm::vec3(randomness, remainder * remainderSignA, remainder * remainderSignB); } else if (face == MIN_Y_FACE) { - randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, -surfaceRandomness, surfaceRemainder * altRemainderSignB); + diffusion = glm::vec3(remainder * remainderSignA, -randomness, remainder * remainderSignB); } else if (face == MAX_Y_FACE) { - randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRandomness, surfaceRemainder * altRemainderSignB); + diffusion = glm::vec3(remainder * remainderSignA, randomness, remainder * remainderSignB); } else if (face == MIN_Z_FACE) { - randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, -surfaceRandomness); + diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -randomness); } else if (face == MAX_Z_FACE) { - randomDirection = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, surfaceRandomness); + diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, randomness); } - randomDirection = glm::normalize(randomDirection); + diffusion = glm::normalize(diffusion); - /* - qDebug() << "DIFFUSION... addSoundSource()... partialDiffusionAttenuation=" << partialDiffusionAttenuation << "\n" << - " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT << "\n" << - " previous direction=[" << direction.x << "," << direction.y << "," << direction.z << "]" << "\n" << - " randomDirection=[" << randomDirection.x << "," << randomDirection.y << "," << randomDirection.z << "]" << "\n" << - " end=[" << end.x << "," << end.y << "," << end.z << "]"; - */ + if (wantExtraDebuggging) { + qDebug() << "DIFFUSION... addSoundSource()... " << + " partialDiffusionAttenuation=" << partialDiffusionAttenuation << "\n" << + " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT << "\n" << + " direction=[" << direction.x << "," << direction.y << "," << direction.z << "]\n" << + " diffusion=[" << diffusion.x << "," << diffusion.y << "," << diffusion.z << "]\n" << + " end=[" << end.x << "," << end.y << "," << end.z << "]"; + } // add sound sources for these diffusions - addSoundSource(end, randomDirection, partialDiffusionAttenuation, currentDelay); + addSoundSource(end, diffusion, partialDiffusionAttenuation, currentDelay); } } // if our reflective attenuation is above our minimum, then add our reflection point and // allow our path to continue - /* - if (isDiffusion) { + if (wantExtraDebuggging && isDiffusion) { qDebug() << "checking diffusion"; qDebug() << "reflectiveAttenuation=" << reflectiveAttenuation; qDebug() << "totalDiffusionAttenuation=" << totalDiffusionAttenuation; qDebug() << "toListenerAttenuation=" << toListenerAttenuation; qDebug() << "(reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation); } - */ if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { @@ -906,21 +849,15 @@ int AudioReflector::anylizePathsSingleStep() { } } else { path->finalized = true; // if we're too quiet, then we're done - /* - if (isDiffusion) { + if (wantExtraDebuggging && isDiffusion) { qDebug() << "diffusion too quiet!"; } - */ } } else { - //qDebug() << "whichPath=" << activePaths << "path->bounceCount=" << path->bounceCount << "ray missed..."; path->finalized = true; // if it doesn't intersect, then it is finished - - /* - if (isDiffusion) { + if (wantExtraDebuggging && isDiffusion) { qDebug() << "diffusion doesn't intersect!"; } - */ } } } diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 3e8f06e8a7..02e43c4b4f 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -169,8 +169,8 @@ private: void addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, float initialDelay); // helper that handles audioPath analysis - int anylizePathsSingleStep(); - void anylizePaths(); + int analyzePathsSingleStep(); + void analyzePaths(); void newDrawRays(); void drawPath(AudioPath* path, const glm::vec3& originalColor); void newCalculateAllReflections(); From 0d308e7cb99a88a79c0df5aed158e26ad4b09f5d Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 11:58:49 -0700 Subject: [PATCH 29/64] more cleanup add support for enable/disable rendering of paths --- interface/src/Audio.cpp | 2 - interface/src/AudioReflector.cpp | 105 ++++++++++++++++++++----------- interface/src/AudioReflector.h | 8 ++- interface/src/Menu.cpp | 4 ++ interface/src/Menu.h | 1 + interface/src/ui/Stats.cpp | 8 +-- 6 files changed, 85 insertions(+), 43 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 77b9fbb16b..2cc02b1368 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -822,8 +822,6 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { // add the next numNetworkOutputSamples from each QByteArray // in our _localInjectionByteArrays QVector to the localInjectedSamples - - if (Menu::getInstance()->isOptionChecked(MenuOption::LowPassFilter)) { int channels = _desiredOutputFormat.channelCount(); int filterSamples = numNetworkOutputSamples / channels; diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 726cbfac57..80eebb990d 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -39,12 +39,24 @@ AudioReflector::AudioReflector(QObject* parent) : void AudioReflector::render() { - if (!_myAvatar) { - return; // exit early if not set up correctly + + // if we're not set up yet, or we're not processing spatial audio, then exit early + if (!_myAvatar || !_audio->getProcessSpatialAudio()) { + return; + } + + bool withDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + + // Even if we're not rendering, use this as a chance to recalculate our reflections + if (withDiffusions) { + newCalculateAllReflections(); + } else { + calculateAllReflections(); } - if (_audio->getProcessSpatialAudio()) { - if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) { + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)) { + // here's where we actually render + if (withDiffusions) { newDrawRays(); } else { drawRays(); @@ -52,8 +64,6 @@ void AudioReflector::render() { } } - - // delay = 1ms per foot // = 3ms per meter // attenuation = @@ -62,6 +72,9 @@ void AudioReflector::render() { float AudioReflector::getDelayFromDistance(float distance) { float delay = (_soundMsPerMeter * distance); + // NOTE: kind of hacky, the old code (which didn't handle diffusions, assumes that this function + // will add in any and all pre delay. But the new method (which includes diffusions) handles pre delay + // on it's own. So we only add in pre delay if the pre delay is enabled, and we're not in diffusion mode if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) && !Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) { @@ -71,17 +84,6 @@ float AudioReflector::getDelayFromDistance(float distance) { return delay; } -// **option 1**: this is what we're using -const float PER_BOUNCE_ATTENUATION_FACTOR = 0.5f; - -// **option 2**: we're not using these -//const float BOUNCE_ATTENUATION_FACTOR = 0.125f; -// each bounce we adjust our attenuation by this factor, the result is an asymptotically decreasing attenuation... -// 0.125, 0.25, 0.5, ... -//const float PER_BOUNCE_ATTENUATION_ADJUSTMENT = 2.0f; -// we don't grow larger than this, which means by the 4th bounce we don't get that much less quiet -//const float MAX_BOUNCE_ATTENUATION = 0.99f; - float AudioReflector::getDistanceAttenuationCoefficient(float distance) { const float DISTANCE_SCALE = 2.5f; const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; @@ -100,8 +102,12 @@ float AudioReflector::getDistanceAttenuationCoefficient(float distance) { return distanceCoefficient; } -float getBounceAttenuationCoefficient(int bounceCount) { - return PER_BOUNCE_ATTENUATION_FACTOR * bounceCount; +// This is used in the "old" model with diffusions... it's essentially the amount of energy that is reflected on each bounce +float AudioReflector::getBounceAttenuationCoefficient(int bounceCount) { + // now we know the current attenuation for the "perfect" reflection case, but we now incorporate + // our surface materials to determine how much of this ray is absorbed, reflected, and diffused + SurfaceCharacteristics material = getSurfaceCharacteristics(); + return material.reflectiveRatio * bounceCount; } glm::vec3 getFaceNormal(BoxFace face) { @@ -130,6 +136,7 @@ glm::vec3 getFaceNormal(BoxFace face) { void AudioReflector::reset() { _reflections = 0; + _diffusionPathCount = 0; _averageAttenuation = 0.0f; _maxAttenuation = 0.0f; _minAttenuation = 0.0f; @@ -151,7 +158,6 @@ void AudioReflector::reset() { _rightReflections.size() + _upReflections.size() + _downReflections.size(); - } void AudioReflector::calculateAllReflections() { @@ -161,7 +167,9 @@ void AudioReflector::calculateAllReflections() { bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented); glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation(); - bool shouldRecalc = _reflections == 0 || _myAvatar->getHead()->getPosition() != _origin || (orientation != _orientation); + bool shouldRecalc = _reflections == 0 + || !isSimilarPosition(_myAvatar->getHead()->getPosition(), _origin) + || !isSimilarOrientation(orientation, _orientation); if (shouldRecalc) { QMutexLocker locker(&_mutex); @@ -236,7 +244,8 @@ QVector AudioReflector::calculateReflections(const glm::vec3& earPosi float earDistance = glm::distance(end, earPosition); float totalDistance = earDistance + distance; totalDelay = getDelayFromDistance(totalDistance); - currentAttenuation = getDistanceAttenuationCoefficient(totalDistance) * getBounceAttenuationCoefficient(bounceCount); + currentAttenuation = getDistanceAttenuationCoefficient(totalDistance) * + getBounceAttenuationCoefficient(bounceCount); if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { reflectionPoints.push_back(end); @@ -409,9 +418,6 @@ void AudioReflector::injectAudiblePoint(const AudioPoint& audiblePoint, int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; - //qDebug() << "leftTotalDistance=" << leftTotalDistance << "rightTotalDistance=" << rightTotalDistance; - //qDebug() << "leftEarDelay=" << leftEarDelay << "rightEarDelay=" << rightEarDelay; - float rightEarAttenuation = getDistanceAttenuationCoefficient(rightEarDistance) * audiblePoint.attenuation; float leftEarAttenuation = getDistanceAttenuationCoefficient(leftEarDistance) * audiblePoint.attenuation; @@ -463,8 +469,6 @@ void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArr } void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - //quint64 start = usecTimestampNow(); - _maxDelay = 0; _maxAttenuation = 0.0f; _minDelay = std::numeric_limits::max(); @@ -483,9 +487,12 @@ void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& sam _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; _reflections = _audiblePoints.size(); - - //quint64 end = usecTimestampNow(); - //qDebug() << "AudioReflector::addSamples()... elapsed=" << (end - start); + _diffusionPathCount = countDiffusionPaths(); + + if (_reflections == 0) { + _minDelay = 0.0f; + _minAttenuation = 0.0f; + } } void AudioReflector::oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { @@ -521,14 +528,29 @@ void AudioReflector::oldEchoAudio(unsigned int sampleTime, const QByteArray& sam _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; + _reflections = _frontRightUpReflections.size() + + _frontLeftUpReflections.size() + + _backRightUpReflections.size() + + _backLeftUpReflections.size() + + _frontRightDownReflections.size() + + _frontLeftDownReflections.size() + + _backRightDownReflections.size() + + _backLeftDownReflections.size() + + _frontReflections.size() + + _backReflections.size() + + _leftReflections.size() + + _rightReflections.size() + + _upReflections.size() + + _downReflections.size(); + _diffusionPathCount = 0; - //quint64 end = usecTimestampNow(); - //qDebug() << "AudioReflector::addSamples()... elapsed=" << (end - start); + if (_reflections == 0) { + _minDelay = 0.0f; + _minAttenuation = 0.0f; + } } void AudioReflector::drawRays() { - calculateAllReflections(); - const glm::vec3 RED(1,0,0); QMutexLocker locker(&_mutex); @@ -620,8 +642,6 @@ void AudioReflector::newCalculateAllReflections() { } void AudioReflector::newDrawRays() { - newCalculateAllReflections(); - const glm::vec3 RED(1,0,0); const glm::vec3 GREEN(0,1,0); @@ -705,6 +725,19 @@ void AudioReflector::analyzePaths() { steps++; } _reflections = _audiblePoints.size(); + _diffusionPathCount = countDiffusionPaths(); +} + +int AudioReflector::countDiffusionPaths() { + int diffusionCount = 0; + + foreach(AudioPath* const& path, _audioPaths) { + // if this is NOT an original reflection then it's a diffusion path + if (path->startPoint != _origin) { + diffusionCount++; + } + } + return diffusionCount; } int AudioReflector::analyzePathsSingleStep() { diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 02e43c4b4f..931942da3f 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -70,6 +70,7 @@ public: float getMinDelayMsecs() const { return _minDelay; } float getMinAttenuation() const { return _minAttenuation; } float getDelayFromDistance(float distance); + int getDiffusionPathCount() const { return _diffusionPathCount; } void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); @@ -119,9 +120,13 @@ private: // helper for generically calculating attenuation based on distance float getDistanceAttenuationCoefficient(float distance); + + // helper for generically calculating attenuation based on bounce count, used in old/non-diffusion mode + float getBounceAttenuationCoefficient(int bounceCount); // statistics int _reflections; + int _diffusionPathCount; int _delayCount; float _totalDelay; float _averageDelay; @@ -174,13 +179,14 @@ private: void newDrawRays(); void drawPath(AudioPath* path, const glm::vec3& originalColor); void newCalculateAllReflections(); + int countDiffusionPaths(); void injectAudiblePoint(const AudioPoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); void oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); // return the surface characteristics of the element we hit - SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit); + SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit = NULL); QMutex _mutex; diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index eb23d227bf..baa9aa314b 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -411,6 +411,10 @@ Menu::Menu() : addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingWithDiffusions, Qt::CTRL | Qt::SHIFT | Qt::Key_W, true); + + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingRenderPaths, + Qt::CTRL | Qt::SHIFT | Qt::Key_R, + true); addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, Qt::CTRL | Qt::SHIFT | Qt::Key_F, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 9a659e894d..fe68d78269 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -244,6 +244,7 @@ namespace MenuOption { const QString AudioSpatialProcessingStereoSource = "Audio Spatial Processing Stereo Source"; const QString AudioSpatialProcessingHeadOriented = "Audio Spatial Processing Head Oriented"; const QString AudioSpatialProcessingWithDiffusions = "Audio Spatial Processing With Diffusions"; + const QString AudioSpatialProcessingRenderPaths = "Audio Spatial Processing Render Paths"; const QString Avatars = "Avatars"; const QString Bandwidth = "Bandwidth Display"; const QString BandwidthDetails = "Bandwidth Details"; diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index befa710903..83b700484c 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -513,10 +513,8 @@ void Stats::display( // add some reflection stats char reflectionsStatus[128]; - sprintf(reflectionsStatus, "Reflections: %d, Diffusion: %s, Original: %s, Ears: %s, Source: %s", + sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s", audioReflector->getReflections(), - (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions) - ? "yes" : "no"), (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal) ? "included" : "silent"), (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars) @@ -552,7 +550,9 @@ void Stats::display( bool diffusionEnabled = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); int fanout = diffusionEnabled ? audioReflector->getDiffusionFanout() : 0; - sprintf(reflectionsStatus, "Diffusion: %s, Fanout: %d", (diffusionEnabled ? "yes" : "no"), fanout); + int diffusionPaths = diffusionEnabled ? audioReflector->getDiffusionPathCount() : 0; + sprintf(reflectionsStatus, "Diffusion: %s, Fanout: %d, Paths: %d", + (diffusionEnabled ? "yes" : "no"), fanout, diffusionPaths); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); From 5e42daaa5160c793811f25618390da21be685590 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 12:32:23 -0700 Subject: [PATCH 30/64] fix predelay in the diffusion case --- examples/audioReflectorTools.js | 2 +- interface/src/AudioReflector.cpp | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js index 99baf7260c..711f2a8e1f 100644 --- a/examples/audioReflectorTools.js +++ b/examples/audioReflectorTools.js @@ -10,7 +10,7 @@ // -var delayScale = 100.0; +var delayScale = 500.0; var fanoutScale = 10.0; var speedScale = 20; var factorScale = 5.0; diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 80eebb990d..b6143f8744 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -407,7 +407,14 @@ void AudioReflector::injectAudiblePoint(const AudioPoint& audiblePoint, float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay; float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay; - + +/* +qDebug() << "injectAudiblePoint()... "; +qDebug() << " audiblePoint.delay=" << audiblePoint.delay; +qDebug() << " rightEarDelayMsecs=" << rightEarDelayMsecs; +qDebug() << " leftEarDelayMsecs=" << leftEarDelayMsecs; +*/ + _totalDelay += rightEarDelayMsecs + leftEarDelayMsecs; _delayCount += 2; _maxDelay = std::max(_maxDelay,rightEarDelayMsecs); @@ -700,7 +707,7 @@ void AudioReflector::analyzePaths() { float initialAttenuation = 1.0f; float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? _preDelay : 0.0f; - + addSoundSource(_origin, right, initialAttenuation, preDelay); addSoundSource(_origin, front, initialAttenuation, preDelay); addSoundSource(_origin, up, initialAttenuation, preDelay); @@ -758,7 +765,6 @@ int AudioReflector::analyzePathsSingleStep() { float currentAttenuation = path->lastAttenuation; float currentDelay = path->lastDelay; // start with our delay so far float pathDistance = path->lastDistance; - float totalDelay = path->lastDelay; // start with our delay so far if (!path->finalized) { activePaths++; @@ -784,7 +790,6 @@ int AudioReflector::analyzePathsSingleStep() { // We aren't using this... should we be???? float toListenerDistance = glm::distance(end, _listenerPosition); - float totalDistance = toListenerDistance + pathDistance; // adjust our current delay by just the delay from the most recent ray currentDelay += getDelayFromDistance(distance); @@ -801,7 +806,7 @@ int AudioReflector::analyzePathsSingleStep() { float partialDiffusionAttenuation = _diffusionFanout < 1 ? 0.0f : totalDiffusionAttenuation / _diffusionFanout; // total delay includes the bounce back to listener - totalDelay = getDelayFromDistance(totalDistance); + float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance); float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance); // if our resulting partial diffusion attenuation, is still above our minimum attenuation @@ -862,7 +867,10 @@ int AudioReflector::analyzePathsSingleStep() { && totalDelay < MAXIMUM_DELAY_MS) { // add this location, as the reflective attenuation as well as the total diffusion attenuation - AudioPoint point = { end, totalDelay, reflectiveAttenuation + totalDiffusionAttenuation }; + // NOTE: we add the delay to the audible point, not back to the listener. The additional delay + // and attenuation to the listener is recalculated at the point where we actually inject the + // audio so that it can be adjusted to ear position + AudioPoint point = { end, currentDelay, reflectiveAttenuation + totalDiffusionAttenuation }; _audiblePoints.push_back(point); // add this location to the path points, so we can visualize it From 8a6da915aeb361610e26e78a300ffa35770f26d4 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 13:25:01 -0700 Subject: [PATCH 31/64] fix predelay in the diffusion case --- interface/src/AudioReflector.cpp | 105 ++++++++++++++++++++----------- interface/src/AudioReflector.h | 2 + interface/src/Menu.cpp | 8 ++- interface/src/Menu.h | 1 + 4 files changed, 76 insertions(+), 40 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index b6143f8744..31ceaa2594 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -32,7 +32,8 @@ AudioReflector::AudioReflector(QObject* parent) : _distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), _diffusionFanout(DEFAULT_DIFFUSION_FANOUT), _absorptionRatio(DEFAULT_ABSORPTION_RATIO), - _diffusionRatio(DEFAULT_DIFFUSION_RATIO) + _diffusionRatio(DEFAULT_DIFFUSION_RATIO), + _withDiffusion(false) { reset(); } @@ -110,28 +111,30 @@ float AudioReflector::getBounceAttenuationCoefficient(int bounceCount) { return material.reflectiveRatio * bounceCount; } -glm::vec3 getFaceNormal(BoxFace face) { - glm::vec3 slightlyRandomFaceNormal; +glm::vec3 AudioReflector::getFaceNormal(BoxFace face) { + bool wantSlightRandomness = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces); - float surfaceRandomness = randFloatInRange(0.99f,1.0f); - float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f; - float altRemainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; - float altRemainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + glm::vec3 faceNormal; + + float normalLength = wantSlightRandomness ? randFloatInRange(0.99f,1.0f) : 1.0f; + float remainder = (1.0f - normalLength)/2.0f; + float remainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + float remainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; if (face == MIN_X_FACE) { - slightlyRandomFaceNormal = glm::vec3(-surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); + faceNormal = glm::vec3(-normalLength, remainder * remainderSignA, remainder * remainderSignB); } else if (face == MAX_X_FACE) { - slightlyRandomFaceNormal = glm::vec3(surfaceRandomness, surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB); + faceNormal = glm::vec3(normalLength, remainder * remainderSignA, remainder * remainderSignB); } else if (face == MIN_Y_FACE) { - slightlyRandomFaceNormal = glm::vec3(surfaceRemainder * altRemainderSignA, -surfaceRandomness, surfaceRemainder * altRemainderSignB); + faceNormal = glm::vec3(remainder * remainderSignA, -normalLength, remainder * remainderSignB); } else if (face == MAX_Y_FACE) { - slightlyRandomFaceNormal = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRandomness, surfaceRemainder * altRemainderSignB); + faceNormal = glm::vec3(remainder * remainderSignA, normalLength, remainder * remainderSignB); } else if (face == MIN_Z_FACE) { - slightlyRandomFaceNormal = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, -surfaceRandomness); + faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -normalLength); } else if (face == MAX_Z_FACE) { - slightlyRandomFaceNormal = glm::vec3(surfaceRemainder * altRemainderSignA, surfaceRemainder * altRemainderSignB, surfaceRandomness); + faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, normalLength); } - return slightlyRandomFaceNormal; + return faceNormal; } void AudioReflector::reset() { @@ -166,20 +169,30 @@ void AudioReflector::calculateAllReflections() { // TODO: what about case where new voxels are added in front of us??? bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented); glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation(); - + glm::vec3 origin = _myAvatar->getHead()->getPosition(); + glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition(); + + bool withDiffusion = false; // this is the non-diffusion mode. + bool shouldRecalc = _reflections == 0 - || !isSimilarPosition(_myAvatar->getHead()->getPosition(), _origin) - || !isSimilarOrientation(orientation, _orientation); + || !isSimilarPosition(origin, _origin) + || !isSimilarOrientation(orientation, _orientation) + || !isSimilarPosition(listenerPosition, _listenerPosition) + || (withDiffusion != _withDiffusion); if (shouldRecalc) { + +qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!"; + QMutexLocker locker(&_mutex); quint64 start = usecTimestampNow(); - _origin = _myAvatar->getHead()->getPosition(); - glm::vec3 averageEarPosition = _myAvatar->getHead()->getPosition(); - _listenerPosition = averageEarPosition; + _origin = origin; + _listenerPosition = listenerPosition; _orientation = orientation; + _withDiffusion = withDiffusion; + glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP); glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT); @@ -195,20 +208,20 @@ void AudioReflector::calculateAllReflections() { glm::vec3 backRightDown = glm::normalize(back + right + down); glm::vec3 backLeftDown = glm::normalize(back + left + down); - _frontRightUpReflections = calculateReflections(averageEarPosition, _origin, frontRightUp); - _frontLeftUpReflections = calculateReflections(averageEarPosition, _origin, frontLeftUp); - _backRightUpReflections = calculateReflections(averageEarPosition, _origin, backRightUp); - _backLeftUpReflections = calculateReflections(averageEarPosition, _origin, backLeftUp); - _frontRightDownReflections = calculateReflections(averageEarPosition, _origin, frontRightDown); - _frontLeftDownReflections = calculateReflections(averageEarPosition, _origin, frontLeftDown); - _backRightDownReflections = calculateReflections(averageEarPosition, _origin, backRightDown); - _backLeftDownReflections = calculateReflections(averageEarPosition, _origin, backLeftDown); - _frontReflections = calculateReflections(averageEarPosition, _origin, front); - _backReflections = calculateReflections(averageEarPosition, _origin, back); - _leftReflections = calculateReflections(averageEarPosition, _origin, left); - _rightReflections = calculateReflections(averageEarPosition, _origin, right); - _upReflections = calculateReflections(averageEarPosition, _origin, up); - _downReflections = calculateReflections(averageEarPosition, _origin, down); + _frontRightUpReflections = calculateReflections(listenerPosition, _origin, frontRightUp); + _frontLeftUpReflections = calculateReflections(listenerPosition, _origin, frontLeftUp); + _backRightUpReflections = calculateReflections(listenerPosition, _origin, backRightUp); + _backLeftUpReflections = calculateReflections(listenerPosition, _origin, backLeftUp); + _frontRightDownReflections = calculateReflections(listenerPosition, _origin, frontRightDown); + _frontLeftDownReflections = calculateReflections(listenerPosition, _origin, frontLeftDown); + _backRightDownReflections = calculateReflections(listenerPosition, _origin, backRightDown); + _backLeftDownReflections = calculateReflections(listenerPosition, _origin, backLeftDown); + _frontReflections = calculateReflections(listenerPosition, _origin, front); + _backReflections = calculateReflections(listenerPosition, _origin, back); + _leftReflections = calculateReflections(listenerPosition, _origin, left); + _rightReflections = calculateReflections(listenerPosition, _origin, right); + _upReflections = calculateReflections(listenerPosition, _origin, up); + _downReflections = calculateReflections(listenerPosition, _origin, down); quint64 end = usecTimestampNow(); @@ -237,7 +250,7 @@ QVector AudioReflector::calculateReflections(const glm::vec3& earPosi int bounceCount = 1; while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS && bounceCount < ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { - if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + if (_voxels->findRayIntersection(start, direction, elementHit, distance, face, Octree::Lock)) { glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); totalDistance += glm::distance(start, end); @@ -627,18 +640,26 @@ void AudioReflector::newCalculateAllReflections() { glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation(); glm::vec3 origin = _myAvatar->getHead()->getPosition(); glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition(); + + bool withDiffusion = true; // this is the diffusion mode. - bool shouldRecalc = _audiblePoints.size() == 0 + // _audiblePoints.size() == 0 ?? + bool shouldRecalc = _reflections == 0 || !isSimilarPosition(origin, _origin) || !isSimilarOrientation(orientation, _orientation) - || !isSimilarPosition(listenerPosition, _listenerPosition); + || !isSimilarPosition(listenerPosition, _listenerPosition) + || (withDiffusion != _withDiffusion); if (shouldRecalc) { + +qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!"; + QMutexLocker locker(&_mutex); quint64 start = usecTimestampNow(); _origin = origin; _orientation = orientation; _listenerPosition = listenerPosition; + _withDiffusion = withDiffusion; analyzePaths(); // actually does the work quint64 end = usecTimestampNow(); const bool wantDebugging = false; @@ -774,7 +795,7 @@ int AudioReflector::analyzePathsSingleStep() { if (wantExtraDebuggging && isDiffusion) { qDebug() << "diffusion bounceCount too high!"; } - } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face, Octree::Lock)) { glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); pathDistance += glm::distance(start, end); @@ -893,12 +914,20 @@ int AudioReflector::analyzePathsSingleStep() { if (wantExtraDebuggging && isDiffusion) { qDebug() << "diffusion too quiet!"; } + + if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) { + qDebug() << "too quiet!"; + } + if (totalDelay >= MAXIMUM_DELAY_MS) { + qDebug() << "too much delay!"; + } } } else { path->finalized = true; // if it doesn't intersect, then it is finished if (wantExtraDebuggging && isDiffusion) { qDebug() << "diffusion doesn't intersect!"; } + qDebug() << "doesn't intersect!"; } } } diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 931942da3f..6cbb4b433e 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -180,6 +180,7 @@ private: void drawPath(AudioPath* path, const glm::vec3& originalColor); void newCalculateAllReflections(); int countDiffusionPaths(); + glm::vec3 getFaceNormal(BoxFace face); void injectAudiblePoint(const AudioPoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); void oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); @@ -202,6 +203,7 @@ private: float _diffusionRatio; float _reflectiveRatio; + bool _withDiffusion; }; diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index baa9aa314b..a1a74bc6ce 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -359,7 +359,7 @@ Menu::Menu() : addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::ShowCulledSharedFaces, - Qt::CTRL | Qt::SHIFT | Qt::Key_X, + 0, false, appInstance->getVoxels(), SLOT(showCulledSharedFaces())); @@ -415,7 +415,11 @@ Menu::Menu() : addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingRenderPaths, Qt::CTRL | Qt::SHIFT | Qt::Key_R, true); - + + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces, + Qt::CTRL | Qt::SHIFT | Qt::Key_X, + true); + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, Qt::CTRL | Qt::SHIFT | Qt::Key_F, false); diff --git a/interface/src/Menu.h b/interface/src/Menu.h index fe68d78269..578d4e8dc6 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -245,6 +245,7 @@ namespace MenuOption { const QString AudioSpatialProcessingHeadOriented = "Audio Spatial Processing Head Oriented"; const QString AudioSpatialProcessingWithDiffusions = "Audio Spatial Processing With Diffusions"; const QString AudioSpatialProcessingRenderPaths = "Audio Spatial Processing Render Paths"; + const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Audio Spatial Processing Slightly Random Surfaces"; const QString Avatars = "Avatars"; const QString Bandwidth = "Bandwidth Display"; const QString BandwidthDetails = "Bandwidth Details"; From 588231cebfa22cd4a9685be901568b8d391b9882 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 15:36:20 -0700 Subject: [PATCH 32/64] reconcile old and new implmentations so that 0 fan out produces same results --- interface/src/AudioReflector.cpp | 78 +++++++++++++++++++++++++------- interface/src/AudioReflector.h | 10 ++-- 2 files changed, 67 insertions(+), 21 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 31ceaa2594..fdafa3d762 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -108,7 +108,7 @@ float AudioReflector::getBounceAttenuationCoefficient(int bounceCount) { // now we know the current attenuation for the "perfect" reflection case, but we now incorporate // our surface materials to determine how much of this ray is absorbed, reflected, and diffused SurfaceCharacteristics material = getSurfaceCharacteristics(); - return material.reflectiveRatio * bounceCount; + return powf(material.reflectiveRatio, bounceCount); } glm::vec3 AudioReflector::getFaceNormal(BoxFace face) { @@ -183,6 +183,21 @@ void AudioReflector::calculateAllReflections() { if (shouldRecalc) { qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!"; +if (_reflections == 0) { + qDebug() << "RECALC...... No reflections!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; +} +if (!isSimilarPosition(origin, _origin)) { + qDebug() << "RECALC...... origin changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; +} +if (!isSimilarOrientation(orientation, _orientation)) { + qDebug() << "RECALC...... orientation changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; +} +if (!isSimilarPosition(listenerPosition, _listenerPosition)) { + qDebug() << "RECALC...... listenerPosition changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; +} +if (withDiffusion != _withDiffusion) { + qDebug() << "RECALC...... withDiffusion changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; +} QMutexLocker locker(&_mutex); @@ -208,6 +223,7 @@ qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!"; glm::vec3 backRightDown = glm::normalize(back + right + down); glm::vec3 backLeftDown = glm::normalize(back + left + down); + _rightReflections = calculateReflections(listenerPosition, _origin, right); _frontRightUpReflections = calculateReflections(listenerPosition, _origin, frontRightUp); _frontLeftUpReflections = calculateReflections(listenerPosition, _origin, frontLeftUp); _backRightUpReflections = calculateReflections(listenerPosition, _origin, backRightUp); @@ -219,7 +235,6 @@ qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!"; _frontReflections = calculateReflections(listenerPosition, _origin, front); _backReflections = calculateReflections(listenerPosition, _origin, back); _leftReflections = calculateReflections(listenerPosition, _origin, left); - _rightReflections = calculateReflections(listenerPosition, _origin, right); _upReflections = calculateReflections(listenerPosition, _origin, up); _downReflections = calculateReflections(listenerPosition, _origin, down); @@ -352,6 +367,14 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector= MAXIMUM_DELAY_MS) { - qDebug() << "too much delay!"; - } } } else { path->finalized = true; // if it doesn't intersect, then it is finished - if (wantExtraDebuggging && isDiffusion) { - qDebug() << "diffusion doesn't intersect!"; - } - qDebug() << "doesn't intersect!"; } } } From 839df97687fba31eafdafdfeefbcbdc3f49a1c3e Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 16:46:25 -0700 Subject: [PATCH 34/64] make audio sliders initialize to defaul starting points --- examples/audioReflectorTools.js | 92 ++++++++++++++++++++++++++++----- 1 file changed, 79 insertions(+), 13 deletions(-) diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js index 711f2a8e1f..ae9f43fddb 100644 --- a/examples/audioReflectorTools.js +++ b/examples/audioReflectorTools.js @@ -10,17 +10,33 @@ // -var delayScale = 500.0; +var delayScale = 100.0; var fanoutScale = 10.0; var speedScale = 20; var factorScale = 5.0; +var topY = 250; +var sliderHeight = 35; + // This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to // move the slider -var delayY = 250; +var delayY = topY; +topY += sliderHeight; +var delayLabel = Overlays.addOverlay("text", { + x: 40, + y: delayY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 0, blue: 0}, + topMargin: 12, + leftMargin: 5, + text: "Delay:" + }); + var delaySlider = Overlays.addOverlay("image", { // alternate form of expressing bounds - bounds: { x: 100, y: delayY, width: 150, height: 35}, + bounds: { x: 100, y: delayY, width: 150, height: sliderHeight}, subImage: { x: 46, y: 0, width: 200, height: 71 }, imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", color: { red: 255, green: 255, blue: 255}, @@ -30,7 +46,7 @@ var delaySlider = Overlays.addOverlay("image", { // This is the thumb of our slider var delayMinThumbX = 110; var delayMaxThumbX = delayMinThumbX + 110; -var delayThumbX = (delayMinThumbX + delayMaxThumbX) / 2; +var delayThumbX = delayMinThumbX + ((delayMaxThumbX - delayMinThumbX) * (AudioReflector.getPreDelay() / delayScale)); var delayThumb = Overlays.addOverlay("image", { x: delayThumbX, y: delayY + 9, @@ -43,10 +59,24 @@ var delayThumb = Overlays.addOverlay("image", { // This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to // move the slider -var fanoutY = 300; +var fanoutY = topY; +topY += sliderHeight; + +var fanoutLabel = Overlays.addOverlay("text", { + x: 40, + y: fanoutY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 0, blue: 0}, + topMargin: 12, + leftMargin: 5, + text: "Fanout:" + }); + var fanoutSlider = Overlays.addOverlay("image", { // alternate form of expressing bounds - bounds: { x: 100, y: fanoutY, width: 150, height: 35}, + bounds: { x: 100, y: fanoutY, width: 150, height: sliderHeight}, subImage: { x: 46, y: 0, width: 200, height: 71 }, imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", color: { red: 255, green: 255, blue: 255}, @@ -56,7 +86,7 @@ var fanoutSlider = Overlays.addOverlay("image", { // This is the thumb of our slider var fanoutMinThumbX = 110; var fanoutMaxThumbX = fanoutMinThumbX + 110; -var fanoutThumbX = (fanoutMinThumbX + fanoutMaxThumbX) / 2; +var fanoutThumbX = fanoutMinThumbX + ((fanoutMaxThumbX - fanoutMinThumbX) * (AudioReflector.getDiffusionFanout() / fanoutScale)); var fanoutThumb = Overlays.addOverlay("image", { x: fanoutThumbX, y: fanoutY + 9, @@ -70,10 +100,24 @@ var fanoutThumb = Overlays.addOverlay("image", { // This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to // move the slider -var speedY = 350; +var speedY = topY; +topY += sliderHeight; + +var speedLabel = Overlays.addOverlay("text", { + x: 40, + y: speedY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 0, blue: 0}, + topMargin: 6, + leftMargin: 5, + text: "Speed\nin ms/m:" + }); + var speedSlider = Overlays.addOverlay("image", { // alternate form of expressing bounds - bounds: { x: 100, y: speedY, width: 150, height: 35}, + bounds: { x: 100, y: speedY, width: 150, height: sliderHeight}, subImage: { x: 46, y: 0, width: 200, height: 71 }, imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", color: { red: 255, green: 255, blue: 255}, @@ -83,7 +127,7 @@ var speedSlider = Overlays.addOverlay("image", { // This is the thumb of our slider var speedMinThumbX = 110; var speedMaxThumbX = speedMinThumbX + 110; -var speedThumbX = (speedMinThumbX + speedMaxThumbX) / 2; +var speedThumbX = speedMinThumbX + ((speedMaxThumbX - speedMinThumbX) * (AudioReflector.getSoundMsPerMeter() / speedScale)); var speedThumb = Overlays.addOverlay("image", { x: speedThumbX, y: speedY+9, @@ -96,10 +140,25 @@ var speedThumb = Overlays.addOverlay("image", { // This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to // move the slider -var factorY = 400; +var factorY = topY; +topY += sliderHeight; + +var factorLabel = Overlays.addOverlay("text", { + x: 40, + y: factorY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 0, blue: 0}, + topMargin: 6, + leftMargin: 5, + text: "Attenuation\nFactor:" + }); + + var factorSlider = Overlays.addOverlay("image", { // alternate form of expressing bounds - bounds: { x: 100, y: factorY, width: 150, height: 35}, + bounds: { x: 100, y: factorY, width: 150, height: sliderHeight}, subImage: { x: 46, y: 0, width: 200, height: 71 }, imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", color: { red: 255, green: 255, blue: 255}, @@ -109,7 +168,7 @@ var factorSlider = Overlays.addOverlay("image", { // This is the thumb of our slider var factorMinThumbX = 110; var factorMaxThumbX = factorMinThumbX + 110; -var factorThumbX = (factorMinThumbX + factorMaxThumbX) / 2; +var factorThumbX = factorMinThumbX + ((factorMaxThumbX - factorMinThumbX) * (AudioReflector.getDistanceAttenuationScalingFactor() / factorScale)); var factorThumb = Overlays.addOverlay("image", { x: factorThumbX, y: factorY+9, @@ -123,12 +182,19 @@ var factorThumb = Overlays.addOverlay("image", { // When our script shuts down, we should clean up all of our overlays function scriptEnding() { + Overlays.deleteOverlay(factorLabel); Overlays.deleteOverlay(factorThumb); Overlays.deleteOverlay(factorSlider); + + Overlays.deleteOverlay(speedLabel); Overlays.deleteOverlay(speedThumb); Overlays.deleteOverlay(speedSlider); + + Overlays.deleteOverlay(delayLabel); Overlays.deleteOverlay(delayThumb); Overlays.deleteOverlay(delaySlider); + + Overlays.deleteOverlay(fanoutLabel); Overlays.deleteOverlay(fanoutThumb); Overlays.deleteOverlay(fanoutSlider); } From 7448e0ab16342212cd9d9153e9b5da8ae53a066b Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 20:28:20 -0700 Subject: [PATCH 35/64] move to sub menu --- interface/src/Menu.cpp | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index a1a74bc6ce..e7c5b84a42 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -382,48 +382,51 @@ Menu::Menu() : false, appInstance->getAudio(), SLOT(toggleToneInjection())); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessing, + + addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, + Qt::CTRL | Qt::SHIFT | Qt::Key_F, + false); + + QMenu* spatialAudioMenu = audioDebugMenu->addMenu("Spatial Audio"); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessing, Qt::CTRL | Qt::SHIFT | Qt::Key_M, false, appInstance->getAudio(), SLOT(toggleAudioSpatialProcessing())); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingIncudeOriginal, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingIncudeOriginal, Qt::CTRL | Qt::SHIFT | Qt::Key_O, true); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingSeparateEars, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSeparateEars, Qt::CTRL | Qt::SHIFT | Qt::Key_E, true); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingPreDelay, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingPreDelay, Qt::CTRL | Qt::SHIFT | Qt::Key_D, true); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingStereoSource, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingStereoSource, Qt::CTRL | Qt::SHIFT | Qt::Key_S, true); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingHeadOriented, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingHeadOriented, Qt::CTRL | Qt::SHIFT | Qt::Key_H, true); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingWithDiffusions, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingWithDiffusions, Qt::CTRL | Qt::SHIFT | Qt::Key_W, true); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingRenderPaths, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingRenderPaths, Qt::CTRL | Qt::SHIFT | Qt::Key_R, true); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces, Qt::CTRL | Qt::SHIFT | Qt::Key_X, true); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, - Qt::CTRL | Qt::SHIFT | Qt::Key_F, - false); - addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, Qt::CTRL | Qt::SHIFT | Qt::Key_V, this, From 1ed5512925873dd664220204dbb2ded78b3731c0 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Mon, 14 Apr 2014 21:02:09 -0700 Subject: [PATCH 36/64] remove old mode, replaced by new mode --- interface/src/AudioReflector.cpp | 508 +++---------------------------- interface/src/AudioReflector.h | 54 +--- interface/src/ui/Stats.cpp | 6 +- 3 files changed, 44 insertions(+), 524 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index fe1c26950a..d9dc678886 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -35,7 +35,14 @@ AudioReflector::AudioReflector(QObject* parent) : _diffusionRatio(DEFAULT_DIFFUSION_RATIO), _withDiffusion(false) { - reset(); + _reflections = 0; + _diffusionPathCount = 0; + _averageAttenuation = 0.0f; + _maxAttenuation = 0.0f; + _minAttenuation = 0.0f; + _averageDelay = 0; + _maxDelay = 0; + _minDelay = 0; } @@ -45,46 +52,27 @@ void AudioReflector::render() { if (!_myAvatar || !_audio->getProcessSpatialAudio()) { return; } - - bool withDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); - - // Even if we're not rendering, use this as a chance to recalculate our reflections - if (withDiffusions) { - newCalculateAllReflections(); - } else { - calculateAllReflections(); - } + // use this oportunity to calculate our reflections + calculateAllReflections(); + + // only render if we've been asked to do so if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)) { - // here's where we actually render - if (withDiffusions) { - newDrawRays(); - } else { - drawRays(); - } + drawRays(); } } // delay = 1ms per foot // = 3ms per meter -// attenuation = -// BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance)) - float AudioReflector::getDelayFromDistance(float distance) { float delay = (_soundMsPerMeter * distance); - - // NOTE: kind of hacky, the old code (which didn't handle diffusions, assumes that this function - // will add in any and all pre delay. But the new method (which includes diffusions) handles pre delay - // on it's own. So we only add in pre delay if the pre delay is enabled, and we're not in diffusion mode - if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) && - !Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) { - + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) { delay += _preDelay; } - return delay; } +// attenuation = from the Audio Mixer float AudioReflector::getDistanceAttenuationCoefficient(float distance) { const float DISTANCE_SCALE = 2.5f; const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; @@ -103,14 +91,6 @@ float AudioReflector::getDistanceAttenuationCoefficient(float distance) { return distanceCoefficient; } -// This is used in the "old" model with diffusions... it's essentially the amount of energy that is reflected on each bounce -float AudioReflector::getBounceAttenuationCoefficient(int bounceCount) { - // now we know the current attenuation for the "perfect" reflection case, but we now incorporate - // our surface materials to determine how much of this ray is absorbed, reflected, and diffused - SurfaceCharacteristics material = getSurfaceCharacteristics(); - return powf(material.reflectiveRatio, bounceCount); -} - glm::vec3 AudioReflector::getFaceNormal(BoxFace face) { bool wantSlightRandomness = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces); @@ -137,285 +117,10 @@ glm::vec3 AudioReflector::getFaceNormal(BoxFace face) { return faceNormal; } -void AudioReflector::reset() { - _reflections = 0; - _diffusionPathCount = 0; - _averageAttenuation = 0.0f; - _maxAttenuation = 0.0f; - _minAttenuation = 0.0f; - _averageDelay = 0; - _maxDelay = 0; - _minDelay = 0; - - _reflections = _frontRightUpReflections.size() + - _frontLeftUpReflections.size() + - _backRightUpReflections.size() + - _backLeftUpReflections.size() + - _frontRightDownReflections.size() + - _frontLeftDownReflections.size() + - _backRightDownReflections.size() + - _backLeftDownReflections.size() + - _frontReflections.size() + - _backReflections.size() + - _leftReflections.size() + - _rightReflections.size() + - _upReflections.size() + - _downReflections.size(); -} - -void AudioReflector::calculateAllReflections() { - - // only recalculate when we've moved... - // TODO: what about case where new voxels are added in front of us??? - bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented); - glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation(); - glm::vec3 origin = _myAvatar->getHead()->getPosition(); - glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition(); - - bool withDiffusion = false; // this is the non-diffusion mode. - - bool shouldRecalc = _reflections == 0 - || !isSimilarPosition(origin, _origin) - || !isSimilarOrientation(orientation, _orientation) - || !isSimilarPosition(listenerPosition, _listenerPosition) - || (withDiffusion != _withDiffusion); - - if (shouldRecalc) { - -qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!"; -if (_reflections == 0) { - qDebug() << "RECALC...... No reflections!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; -} -if (!isSimilarPosition(origin, _origin)) { - qDebug() << "RECALC...... origin changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; -} -if (!isSimilarOrientation(orientation, _orientation)) { - qDebug() << "RECALC...... orientation changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; -} -if (!isSimilarPosition(listenerPosition, _listenerPosition)) { - qDebug() << "RECALC...... listenerPosition changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; -} -if (withDiffusion != _withDiffusion) { - qDebug() << "RECALC...... withDiffusion changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; -} - - QMutexLocker locker(&_mutex); - - quint64 start = usecTimestampNow(); - - _origin = origin; - _listenerPosition = listenerPosition; - _orientation = orientation; - _withDiffusion = withDiffusion; - - glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); - glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP); - glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT); - glm::vec3 left = -right; - glm::vec3 down = -up; - glm::vec3 back = -front; - glm::vec3 frontRightUp = glm::normalize(front + right + up); - glm::vec3 frontLeftUp = glm::normalize(front + left + up); - glm::vec3 backRightUp = glm::normalize(back + right + up); - glm::vec3 backLeftUp = glm::normalize(back + left + up); - glm::vec3 frontRightDown = glm::normalize(front + right + down); - glm::vec3 frontLeftDown = glm::normalize(front + left + down); - glm::vec3 backRightDown = glm::normalize(back + right + down); - glm::vec3 backLeftDown = glm::normalize(back + left + down); - - _rightReflections = calculateReflections(listenerPosition, _origin, right); - _frontRightUpReflections = calculateReflections(listenerPosition, _origin, frontRightUp); - _frontLeftUpReflections = calculateReflections(listenerPosition, _origin, frontLeftUp); - _backRightUpReflections = calculateReflections(listenerPosition, _origin, backRightUp); - _backLeftUpReflections = calculateReflections(listenerPosition, _origin, backLeftUp); - _frontRightDownReflections = calculateReflections(listenerPosition, _origin, frontRightDown); - _frontLeftDownReflections = calculateReflections(listenerPosition, _origin, frontLeftDown); - _backRightDownReflections = calculateReflections(listenerPosition, _origin, backRightDown); - _backLeftDownReflections = calculateReflections(listenerPosition, _origin, backLeftDown); - _frontReflections = calculateReflections(listenerPosition, _origin, front); - _backReflections = calculateReflections(listenerPosition, _origin, back); - _leftReflections = calculateReflections(listenerPosition, _origin, left); - _upReflections = calculateReflections(listenerPosition, _origin, up); - _downReflections = calculateReflections(listenerPosition, _origin, down); - - quint64 end = usecTimestampNow(); - - reset(); - - const bool wantDebugging = false; - if (wantDebugging) { - qDebug() << "calculateAllReflections() elapsed=" << (end - start); - } - } -} - -QVector AudioReflector::calculateReflections(const glm::vec3& earPosition, - const glm::vec3& origin, const glm::vec3& originalDirection) { - - QVector reflectionPoints; - glm::vec3 start = origin; - glm::vec3 direction = originalDirection; - OctreeElement* elementHit; - float distance; - BoxFace face; - const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point - float currentAttenuation = 1.0f; - float totalDistance = 0.0f; - float totalDelay = 0.0f; - int bounceCount = 1; - - while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS && bounceCount < ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { - if (_voxels->findRayIntersection(start, direction, elementHit, distance, face, Octree::Lock)) { - glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); - - totalDistance += glm::distance(start, end); - float earDistance = glm::distance(end, earPosition); - float totalDistance = earDistance + distance; - totalDelay = getDelayFromDistance(totalDistance); - currentAttenuation = getDistanceAttenuationCoefficient(totalDistance) * - getBounceAttenuationCoefficient(bounceCount); - - if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { - reflectionPoints.push_back(end); - glm::vec3 faceNormal = getFaceNormal(face); - direction = glm::normalize(glm::reflect(direction,faceNormal)); - start = end; - bounceCount++; - } - } else { - currentAttenuation = 0.0f; - } - } - return reflectionPoints; -} - - -void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections) { - - glm::vec3 start = origin; - glm::vec3 color = originalColor; - const float COLOR_ADJUST_PER_BOUNCE = 0.75f; - - foreach (glm::vec3 end, reflections) { - drawVector(start, end, color); - start = end; - color = color * COLOR_ADJUST_PER_BOUNCE; - } -} - // set up our buffers for our attenuated and delayed samples const int NUMBER_OF_CHANNELS = 2; - -void AudioReflector::echoReflections(const glm::vec3& origin, const QVector& reflections, const QByteArray& samples, - unsigned int sampleTime, int sampleRate) { - - bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars); - bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource); - glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() : - _myAvatar->getHead()->getPosition(); - glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() : - _myAvatar->getHead()->getPosition(); - glm::vec3 start = origin; - - int totalNumberOfSamples = samples.size() / sizeof(int16_t); - int totalNumberOfStereoSamples = samples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); - - const int16_t* originalSamplesData = (const int16_t*)samples.constData(); - QByteArray attenuatedLeftSamples; - QByteArray attenuatedRightSamples; - attenuatedLeftSamples.resize(samples.size()); - attenuatedRightSamples.resize(samples.size()); - - int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data(); - int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data(); - - float rightDistance = 0; - float leftDistance = 0; - int bounceCount = 0; - - foreach (glm::vec3 end, reflections) { - bounceCount++; - - rightDistance += glm::distance(start, end); - leftDistance += glm::distance(start, end); - - // calculate the distance to the ears - float rightEarDistance = glm::distance(end, rightEarPosition); - float leftEarDistance = glm::distance(end, leftEarPosition); - - float rightTotalDistance = rightEarDistance + rightDistance; - float leftTotalDistance = leftEarDistance + leftDistance; - - float rightEarDelayMsecs = getDelayFromDistance(rightTotalDistance); - float leftEarDelayMsecs = getDelayFromDistance(leftTotalDistance); - - _totalDelay += rightEarDelayMsecs + leftEarDelayMsecs; - _delayCount += 2; - _maxDelay = std::max(_maxDelay,rightEarDelayMsecs); - _maxDelay = std::max(_maxDelay,leftEarDelayMsecs); - _minDelay = std::min(_minDelay,rightEarDelayMsecs); - _minDelay = std::min(_minDelay,leftEarDelayMsecs); - - int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; - int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; - - //qDebug() << "leftTotalDistance=" << leftTotalDistance << "rightTotalDistance=" << rightTotalDistance; - //qDebug() << "leftEarDelay=" << leftEarDelay << "rightEarDelay=" << rightEarDelay; - - float bounceAttenuation = getBounceAttenuationCoefficient(bounceCount); - float rightEarAttenuation = getDistanceAttenuationCoefficient(rightTotalDistance) * bounceAttenuation; - float leftEarAttenuation = getDistanceAttenuationCoefficient(leftTotalDistance) * bounceAttenuation; - - /* - qDebug() << "audible point..."; - qDebug() << " bounceCount=" << bounceCount; - qDebug() << " bounceAttenuation=" << bounceAttenuation; - qDebug() << " rightEarAttenuation=" << rightEarAttenuation; - qDebug() << " leftEarAttenuation=" << leftEarAttenuation; - */ - - _totalAttenuation += rightEarAttenuation + leftEarAttenuation; - _attenuationCount += 2; - _maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation); - _maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation); - _minAttenuation = std::min(_minAttenuation,rightEarAttenuation); - _minAttenuation = std::min(_minAttenuation,leftEarAttenuation); - - // run through the samples, and attenuate them - for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) { - int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; - int16_t rightSample = leftSample; - if (wantStereo) { - rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; - } - - //qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample; - attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; - attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; - - attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; - attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; - - //qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation); - } - - // now inject the attenuated array with the appropriate delay - - unsigned int sampleTimeLeft = sampleTime + leftEarDelay; - unsigned int sampleTimeRight = sampleTime + rightEarDelay; - - //qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight; - - _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); - _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); - - - start = end; - } -} - -void AudioReflector::injectAudiblePoint(const AudioPoint& audiblePoint, +void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate) { bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars); @@ -444,13 +149,6 @@ void AudioReflector::injectAudiblePoint(const AudioPoint& audiblePoint, float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay; float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay; -/* -qDebug() << "injectAudiblePoint()... "; -qDebug() << " audiblePoint.delay=" << audiblePoint.delay; -qDebug() << " rightEarDelayMsecs=" << rightEarDelayMsecs; -qDebug() << " leftEarDelayMsecs=" << leftEarDelayMsecs; -*/ - _totalDelay += rightEarDelayMsecs + leftEarDelayMsecs; _delayCount += 2; _maxDelay = std::max(_maxDelay,rightEarDelayMsecs); @@ -464,13 +162,6 @@ qDebug() << " leftEarDelayMsecs=" << leftEarDelayMsecs; float rightEarAttenuation = audiblePoint.attenuation * getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance); float leftEarAttenuation = audiblePoint.attenuation * getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance); - /* - qDebug() << "audible point..."; - qDebug() << " audiblePoint.attenuation=" << audiblePoint.attenuation; - qDebug() << " rightEarAttenuation=" << rightEarAttenuation; - qDebug() << " leftEarAttenuation=" << leftEarAttenuation; - */ - _totalAttenuation += rightEarAttenuation + leftEarAttenuation; _attenuationCount += 2; _maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation); @@ -486,23 +177,17 @@ qDebug() << " leftEarDelayMsecs=" << leftEarDelayMsecs; rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; } - //qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample; attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; - - //qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation); } // now inject the attenuated array with the appropriate delay - unsigned int sampleTimeLeft = sampleTime + leftEarDelay; unsigned int sampleTimeRight = sampleTime + rightEarDelay; - //qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight; - _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); } @@ -511,14 +196,10 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray } void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) { - newEchoAudio(sampleTime, samples, format); - } else { - oldEchoAudio(sampleTime, samples, format); - } + echoAudio(sampleTime, samples, format); } -void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { +void AudioReflector::echoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { _maxDelay = 0; _maxAttenuation = 0.0f; _minDelay = std::numeric_limits::max(); @@ -530,7 +211,7 @@ void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& sam QMutexLocker locker(&_mutex); - foreach(const AudioPoint& audiblePoint, _audiblePoints) { + foreach(const AudiblePoint& audiblePoint, _audiblePoints) { injectAudiblePoint(audiblePoint, samples, sampleTime, format.sampleRate()); } @@ -545,84 +226,8 @@ void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& sam } } -void AudioReflector::oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - //quint64 start = usecTimestampNow(); - - _maxDelay = 0; - _maxAttenuation = 0.0f; - _minDelay = std::numeric_limits::max(); - _minAttenuation = std::numeric_limits::max(); - _totalDelay = 0.0f; - _delayCount = 0; - _totalAttenuation = 0.0f; - _attenuationCount = 0; - - QMutexLocker locker(&_mutex); - - echoReflections(_origin, _frontRightUpReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _frontLeftUpReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _backRightUpReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _backLeftUpReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _frontRightDownReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _frontLeftDownReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _backRightDownReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _backLeftDownReflections, samples, sampleTime, format.sampleRate()); - - echoReflections(_origin, _frontReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _backReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _leftReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _rightReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _upReflections, samples, sampleTime, format.sampleRate()); - echoReflections(_origin, _downReflections, samples, sampleTime, format.sampleRate()); - - - _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; - _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; - _reflections = _frontRightUpReflections.size() + - _frontLeftUpReflections.size() + - _backRightUpReflections.size() + - _backLeftUpReflections.size() + - _frontRightDownReflections.size() + - _frontLeftDownReflections.size() + - _backRightDownReflections.size() + - _backLeftDownReflections.size() + - _frontReflections.size() + - _backReflections.size() + - _leftReflections.size() + - _rightReflections.size() + - _upReflections.size() + - _downReflections.size(); - _diffusionPathCount = 0; - - if (_reflections == 0) { - _minDelay = 0.0f; - _minAttenuation = 0.0f; - } -} - -void AudioReflector::drawRays() { - const glm::vec3 RED(1,0,0); - - QMutexLocker locker(&_mutex); - - drawReflections(_origin, RED, _frontRightUpReflections); - drawReflections(_origin, RED, _frontLeftUpReflections); - drawReflections(_origin, RED, _backRightUpReflections); - drawReflections(_origin, RED, _backLeftUpReflections); - drawReflections(_origin, RED, _frontRightDownReflections); - drawReflections(_origin, RED, _frontLeftDownReflections); - drawReflections(_origin, RED, _backRightDownReflections); - drawReflections(_origin, RED, _backLeftDownReflections); - drawReflections(_origin, RED, _frontReflections); - drawReflections(_origin, RED, _backReflections); - drawReflections(_origin, RED, _leftReflections); - drawReflections(_origin, RED, _rightReflections); - drawReflections(_origin, RED, _upReflections); - drawReflections(_origin, RED, _downReflections); -} - void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) { - glDisable(GL_LIGHTING); // ?? + glDisable(GL_LIGHTING); glLineWidth(2.0); // Draw the vector itself @@ -632,7 +237,7 @@ void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, co glVertex3f(end.x, end.y, end.z); glEnd(); - glEnable(GL_LIGHTING); // ?? + glEnable(GL_LIGHTING); } @@ -657,7 +262,6 @@ AudioPath::AudioPath(const glm::vec3& origin, const glm::vec3& direction, { } - void AudioReflector::addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, float initialDelay, float initialDistance) { @@ -665,7 +269,7 @@ void AudioReflector::addSoundSource(const glm::vec3& origin, const glm::vec3& in _audioPaths.push_back(path); } -void AudioReflector::newCalculateAllReflections() { +void AudioReflector::calculateAllReflections() { // only recalculate when we've moved... // TODO: what about case where new voxels are added in front of us??? bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented); @@ -673,19 +277,15 @@ void AudioReflector::newCalculateAllReflections() { glm::vec3 origin = _myAvatar->getHead()->getPosition(); glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition(); - bool withDiffusion = true; // this is the diffusion mode. + bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); - // _audiblePoints.size() == 0 ?? - bool shouldRecalc = _reflections == 0 + bool shouldRecalc = _reflections == 0 || !isSimilarPosition(origin, _origin) || !isSimilarOrientation(orientation, _orientation) || !isSimilarPosition(listenerPosition, _listenerPosition) || (withDiffusion != _withDiffusion); if (shouldRecalc) { - -qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!"; - QMutexLocker locker(&_mutex); quint64 start = usecTimestampNow(); _origin = origin; @@ -701,7 +301,7 @@ qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!"; } } -void AudioReflector::newDrawRays() { +void AudioReflector::drawRays() { const glm::vec3 RED(1,0,0); const glm::vec3 GREEN(0,1,0); @@ -805,8 +405,8 @@ int AudioReflector::analyzePathsSingleStep() { int activePaths = 0; foreach(AudioPath* const& path, _audioPaths) { - bool wantExtraDebuggging = false; - bool isDiffusion = (path->startPoint != _origin); + //bool wantExtraDebuggging = false; + //bool isDiffusion = (path->startPoint != _origin); glm::vec3 start = path->lastPoint; glm::vec3 direction = path->lastDirection; @@ -823,22 +423,10 @@ int AudioReflector::analyzePathsSingleStep() { if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { path->finalized = true; - if (wantExtraDebuggging && isDiffusion) { - qDebug() << "diffusion bounceCount too high!"; - } } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face, Octree::Lock)) { glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); pathDistance += glm::distance(start, end); - - - if (wantExtraDebuggging) { - qDebug() << "ray intersection... " - << " startPoint=[" << path->startPoint.x << "," << path->startPoint.y << "," << path->startPoint.z << "]" - << " bouceCount= " << path->bounceCount - << " end=[" << end.x << "," << end.y << "," << end.z << "]" - << " pathDistance=" << pathDistance; - } // We aren't using this... should we be???? float toListenerDistance = glm::distance(end, _listenerPosition); @@ -846,16 +434,17 @@ int AudioReflector::analyzePathsSingleStep() { // adjust our current delay by just the delay from the most recent ray currentDelay += getDelayFromDistance(distance); - // adjust our previous attenuation based on the distance traveled in last ray - //float distanceAttenuation = getDistanceAttenuationCoefficient(pathDistance); - // now we know the current attenuation for the "perfect" reflection case, but we now incorporate // our surface materials to determine how much of this ray is absorbed, reflected, and diffused SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit); float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio; float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio; - float partialDiffusionAttenuation = _diffusionFanout < 1 ? 0.0f : totalDiffusionAttenuation / _diffusionFanout; + + bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + int fanout = wantDiffusions ? _diffusionFanout : 0; + + float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / fanout; // total delay includes the bounce back to listener float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance); @@ -867,7 +456,7 @@ int AudioReflector::analyzePathsSingleStep() { && totalDelay < MAXIMUM_DELAY_MS) { // diffusions fan out from random places on the semisphere of the collision point - for(int i = 0; i < _diffusionFanout; i++) { + for(int i = 0; i < fanout; i++) { glm::vec3 diffusion; float randomness = randFloatInRange(0.5f,1.0f); @@ -891,15 +480,6 @@ int AudioReflector::analyzePathsSingleStep() { diffusion = glm::normalize(diffusion); - if (wantExtraDebuggging) { - qDebug() << "DIFFUSION... addSoundSource()... " << - " partialDiffusionAttenuation=" << partialDiffusionAttenuation << "\n" << - " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT << "\n" << - " direction=[" << direction.x << "," << direction.y << "," << direction.z << "]\n" << - " diffusion=[" << diffusion.x << "," << diffusion.y << "," << diffusion.z << "]\n" << - " end=[" << end.x << "," << end.y << "," << end.z << "]"; - } - // add sound sources for these diffusions addSoundSource(end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance); } @@ -907,16 +487,6 @@ int AudioReflector::analyzePathsSingleStep() { // if our reflective attenuation is above our minimum, then add our reflection point and // allow our path to continue - if (wantExtraDebuggging && isDiffusion) { - qDebug() << "checking diffusion"; - qDebug() << "reflectiveAttenuation=" << reflectiveAttenuation; - qDebug() << "totalDiffusionAttenuation=" << totalDiffusionAttenuation; - qDebug() << "toListenerAttenuation=" << toListenerAttenuation; - qDebug() << "(reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation); - } - - // we used to use... ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT - if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) { @@ -924,18 +494,8 @@ int AudioReflector::analyzePathsSingleStep() { // NOTE: we add the delay to the audible point, not back to the listener. The additional delay // and attenuation to the listener is recalculated at the point where we actually inject the // audio so that it can be adjusted to ear position - AudioPoint point = { end, currentDelay, - (reflectiveAttenuation + totalDiffusionAttenuation), - pathDistance}; + AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance}; - /* - qDebug() << "audible point..."; - qDebug() << " reflectiveAttenuation=" << reflectiveAttenuation; - qDebug() << " toListenerAttenuation=" << toListenerAttenuation; - qDebug() << " likely attenuation=" << (reflectiveAttenuation * toListenerAttenuation); - qDebug() << " totalDiffusionAttenuation=" << totalDiffusionAttenuation; - */ - _audiblePoints.push_back(point); // add this location to the path points, so we can visualize it diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 9e5756e62c..54df80e938 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -36,7 +36,7 @@ public: QVector reflections; }; -class AudioPoint { +class AudiblePoint { public: glm::vec3 location; float delay; // includes total delay including pre delay to the point of the audible location, not to the listener's ears @@ -96,35 +96,11 @@ private: Audio* _audio; // access to audio API // Helpers for drawing - void drawRays(); void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); - // OLD helper for playing audio - void echoReflections(const glm::vec3& origin, const QVector& reflections, const QByteArray& samples, - unsigned int sampleTime, int sampleRate); - - // OLD helper for calculating reflections - QVector calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection); - void calculateDiffusions(const glm::vec3& earPosition, const glm::vec3& origin, - const glm::vec3& thisReflection, float thisDistance, float thisAttenuation, int thisBounceCount, - BoxFace thisReflectionFace, QVector reflectionPoints); - - - // OLD helper for drawing refections - void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector& reflections); - - // OLD helper for calculating reflections - void calculateAllReflections(); - - // resets statistics - void reset(); - // helper for generically calculating attenuation based on distance float getDistanceAttenuationCoefficient(float distance); - // helper for generically calculating attenuation based on bounce count, used in old/non-diffusion mode - float getBounceAttenuationCoefficient(int bounceCount); - // statistics int _reflections; int _diffusionPathCount; @@ -143,23 +119,6 @@ private: glm::vec3 _origin; glm::quat _orientation; - // old way of doing this... - QVector _frontRightUpReflections; - QVector _frontLeftUpReflections; - QVector _backRightUpReflections; - QVector _backLeftUpReflections; - QVector _frontRightDownReflections; - QVector _frontLeftDownReflections; - QVector _backRightDownReflections; - QVector _backLeftDownReflections; - QVector _frontReflections; - QVector _backReflections; - QVector _leftReflections; - QVector _rightReflections; - QVector _upReflections; - QVector _downReflections; - - // NOTE: Here's the new way, we will have an array of AudioPaths, we will loop on all of our currently calculating audio // paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it // is considered finalized. @@ -168,7 +127,7 @@ private: // fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation // of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop. QVector _audioPaths; - QVector _audiblePoints; + QVector _audiblePoints; // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, // as well as diffusion sound sources @@ -178,15 +137,14 @@ private: // helper that handles audioPath analysis int analyzePathsSingleStep(); void analyzePaths(); - void newDrawRays(); + void drawRays(); void drawPath(AudioPath* path, const glm::vec3& originalColor); - void newCalculateAllReflections(); + void calculateAllReflections(); int countDiffusionPaths(); glm::vec3 getFaceNormal(BoxFace face); - void injectAudiblePoint(const AudioPoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); - void oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); - void newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + void injectAudiblePoint(const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); + void echoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); // return the surface characteristics of the element we hit SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit = NULL); diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index 83b700484c..df47c9ad88 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -513,14 +513,16 @@ void Stats::display( // add some reflection stats char reflectionsStatus[128]; - sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s", + sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s, Normals: %s", audioReflector->getReflections(), (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal) ? "included" : "silent"), (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars) ? "two" : "one"), (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource) - ? "stereo" : "mono") + ? "stereo" : "mono"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces) + ? "random" : "regular") ); verticalOffset += STATS_PELS_PER_LINE; From 8a16986294bb601e1e9a5d1689c4404589e4c9af Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 09:58:35 -0700 Subject: [PATCH 37/64] add sliders for reflection, diffusion, absorption --- examples/audioReflectorTools.js | 297 ++++++++++++++++++++++++++++--- interface/src/AudioReflector.cpp | 26 ++- interface/src/AudioReflector.h | 10 +- interface/src/ui/Stats.cpp | 14 +- 4 files changed, 309 insertions(+), 38 deletions(-) diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js index ae9f43fddb..0467a4f937 100644 --- a/examples/audioReflectorTools.js +++ b/examples/audioReflectorTools.js @@ -14,12 +14,62 @@ var delayScale = 100.0; var fanoutScale = 10.0; var speedScale = 20; var factorScale = 5.0; +var reflectiveScale = 100.0; +var diffusionScale = 100.0; +var absorptionScale = 100.0; + +// these three properties are bound together, if you change one, the others will also change +var reflectiveRatio = AudioReflector.getReflectiveRatio(); +var diffusionRatio = AudioReflector.getDiffusionRatio(); +var absorptionRatio = AudioReflector.getAbsorptionRatio(); + +var reflectiveThumbX; +var diffusionThumbX; +var absorptionThumbX; + +function setReflectiveRatio(reflective) { + var total = diffusionRatio + absorptionRatio + (reflective / reflectiveScale); + diffusionRatio = diffusionRatio / total; + absorptionRatio = absorptionRatio / total; + reflectiveRatio = (reflective / reflectiveScale) / total; + updateRatioValues(); +} + +function setDiffusionRatio(diffusion) { + var total = (diffusion / diffusionScale) + absorptionRatio + reflectiveRatio; + diffusionRatio = (diffusion / diffusionScale) / total; + absorptionRatio = absorptionRatio / total; + reflectiveRatio = reflectiveRatio / total; + updateRatioValues(); +} + +function setAbsorptionRatio(absorption) { + var total = diffusionRatio + (absorption / absorptionScale) + reflectiveRatio; + diffusionRatio = diffusionRatio / total; + absorptionRatio = (absorption / absorptionScale) / total; + reflectiveRatio = reflectiveRatio / total; + updateRatioValues(); +} + +function updateRatioSliders() { + reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * reflectiveRatio); + diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * diffusionRatio); + absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * absorptionRatio); + + Overlays.editOverlay(reflectiveThumb, { x: reflectiveThumbX } ); + Overlays.editOverlay(diffusionThumb, { x: diffusionThumbX } ); + Overlays.editOverlay(absorptionThumb, { x: absorptionThumbX } ); +} + +function updateRatioValues() { + AudioReflector.setReflectiveRatio(reflectiveRatio); + AudioReflector.setDiffusionRatio(diffusionRatio); + AudioReflector.setAbsorptionRatio(absorptionRatio); +} var topY = 250; var sliderHeight = 35; -// This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to -// move the slider var delayY = topY; topY += sliderHeight; var delayLabel = Overlays.addOverlay("text", { @@ -28,7 +78,7 @@ var delayLabel = Overlays.addOverlay("text", { width: 60, height: sliderHeight, color: { red: 0, green: 0, blue: 0}, - textColor: { red: 255, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, topMargin: 12, leftMargin: 5, text: "Delay:" @@ -43,7 +93,7 @@ var delaySlider = Overlays.addOverlay("image", { alpha: 1 }); -// This is the thumb of our slider + var delayMinThumbX = 110; var delayMaxThumbX = delayMinThumbX + 110; var delayThumbX = delayMinThumbX + ((delayMaxThumbX - delayMinThumbX) * (AudioReflector.getPreDelay() / delayScale)); @@ -57,8 +107,6 @@ var delayThumb = Overlays.addOverlay("image", { alpha: 1 }); -// This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to -// move the slider var fanoutY = topY; topY += sliderHeight; @@ -68,7 +116,7 @@ var fanoutLabel = Overlays.addOverlay("text", { width: 60, height: sliderHeight, color: { red: 0, green: 0, blue: 0}, - textColor: { red: 255, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, topMargin: 12, leftMargin: 5, text: "Fanout:" @@ -83,7 +131,7 @@ var fanoutSlider = Overlays.addOverlay("image", { alpha: 1 }); -// This is the thumb of our slider + var fanoutMinThumbX = 110; var fanoutMaxThumbX = fanoutMinThumbX + 110; var fanoutThumbX = fanoutMinThumbX + ((fanoutMaxThumbX - fanoutMinThumbX) * (AudioReflector.getDiffusionFanout() / fanoutScale)); @@ -98,8 +146,6 @@ var fanoutThumb = Overlays.addOverlay("image", { }); -// This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to -// move the slider var speedY = topY; topY += sliderHeight; @@ -109,7 +155,7 @@ var speedLabel = Overlays.addOverlay("text", { width: 60, height: sliderHeight, color: { red: 0, green: 0, blue: 0}, - textColor: { red: 255, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, topMargin: 6, leftMargin: 5, text: "Speed\nin ms/m:" @@ -124,7 +170,7 @@ var speedSlider = Overlays.addOverlay("image", { alpha: 1 }); -// This is the thumb of our slider + var speedMinThumbX = 110; var speedMaxThumbX = speedMinThumbX + 110; var speedThumbX = speedMinThumbX + ((speedMaxThumbX - speedMinThumbX) * (AudioReflector.getSoundMsPerMeter() / speedScale)); @@ -138,8 +184,6 @@ var speedThumb = Overlays.addOverlay("image", { alpha: 1 }); -// This will create a couple of image overlays that make a "slider", we will demonstrate how to trap mouse messages to -// move the slider var factorY = topY; topY += sliderHeight; @@ -149,7 +193,7 @@ var factorLabel = Overlays.addOverlay("text", { width: 60, height: sliderHeight, color: { red: 0, green: 0, blue: 0}, - textColor: { red: 255, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, topMargin: 6, leftMargin: 5, text: "Attenuation\nFactor:" @@ -165,7 +209,7 @@ var factorSlider = Overlays.addOverlay("image", { alpha: 1 }); -// This is the thumb of our slider + var factorMinThumbX = 110; var factorMaxThumbX = factorMinThumbX + 110; var factorThumbX = factorMinThumbX + ((factorMaxThumbX - factorMinThumbX) * (AudioReflector.getDistanceAttenuationScalingFactor() / factorScale)); @@ -179,6 +223,123 @@ var factorThumb = Overlays.addOverlay("image", { alpha: 1 }); +var reflectiveY = topY; +topY += sliderHeight; + +var reflectiveLabel = Overlays.addOverlay("text", { + x: 40, + y: reflectiveY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Reflective\nRatio:" + }); + + +var reflectiveSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: reflectiveY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var reflectiveMinThumbX = 110; +var reflectiveMaxThumbX = reflectiveMinThumbX + 110; +reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * AudioReflector.getReflectiveRatio()); +var reflectiveThumb = Overlays.addOverlay("image", { + x: reflectiveThumbX, + y: reflectiveY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + +var diffusionY = topY; +topY += sliderHeight; + +var diffusionLabel = Overlays.addOverlay("text", { + x: 40, + y: diffusionY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Diffusion\nRatio:" + }); + + +var diffusionSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: diffusionY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var diffusionMinThumbX = 110; +var diffusionMaxThumbX = diffusionMinThumbX + 110; +diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * AudioReflector.getDiffusionRatio()); +var diffusionThumb = Overlays.addOverlay("image", { + x: diffusionThumbX, + y: diffusionY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 0, green: 255, blue: 255}, + alpha: 1 + }); + +var absorptionY = topY; +topY += sliderHeight; + +var absorptionLabel = Overlays.addOverlay("text", { + x: 40, + y: absorptionY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Absorption\nRatio:" + }); + + +var absorptionSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: absorptionY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var absorptionMinThumbX = 110; +var absorptionMaxThumbX = absorptionMinThumbX + 110; +absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * AudioReflector.getAbsorptionRatio()); +var absorptionThumb = Overlays.addOverlay("image", { + x: absorptionThumbX, + y: absorptionY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 0, blue: 255}, + alpha: 1 + }); + // When our script shuts down, we should clean up all of our overlays function scriptEnding() { @@ -197,6 +358,19 @@ function scriptEnding() { Overlays.deleteOverlay(fanoutLabel); Overlays.deleteOverlay(fanoutThumb); Overlays.deleteOverlay(fanoutSlider); + + Overlays.deleteOverlay(reflectiveLabel); + Overlays.deleteOverlay(reflectiveThumb); + Overlays.deleteOverlay(reflectiveSlider); + + Overlays.deleteOverlay(diffusionLabel); + Overlays.deleteOverlay(diffusionThumb); + Overlays.deleteOverlay(diffusionSlider); + + Overlays.deleteOverlay(absorptionLabel); + Overlays.deleteOverlay(absorptionThumb); + Overlays.deleteOverlay(absorptionSlider); + } Script.scriptEnding.connect(scriptEnding); @@ -215,6 +389,10 @@ var movingSliderDelay = false; var movingSliderFanout = false; var movingSliderSpeed = false; var movingSliderFactor = false; +var movingSliderReflective = false; +var movingSliderDiffusion = false; +var movingSliderAbsorption = false; + var thumbClickOffsetX = 0; function mouseMoveEvent(event) { if (movingSliderDelay) { @@ -227,7 +405,6 @@ function mouseMoveEvent(event) { } Overlays.editOverlay(delayThumb, { x: newThumbX } ); var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale; - print("delay="+delay); AudioReflector.setPreDelay(delay); } if (movingSliderFanout) { @@ -240,7 +417,6 @@ function mouseMoveEvent(event) { } Overlays.editOverlay(fanoutThumb, { x: newThumbX } ); var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale); - print("fanout="+fanout); AudioReflector.setDiffusionFanout(fanout); } if (movingSliderSpeed) { @@ -267,47 +443,90 @@ function mouseMoveEvent(event) { var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale; AudioReflector.setDistanceAttenuationScalingFactor(factor); } + + if (movingSliderAbsorption) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < absorptionMinThumbX) { + newThumbX = absorptionMminThumbX; + } + if (newThumbX > absorptionMaxThumbX) { + newThumbX = absorptionMaxThumbX; + } + Overlays.editOverlay(absorptionThumb, { x: newThumbX } ); + var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale; + setAbsorptionRatio(absorption); + } + + if (movingSliderReflective) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < reflectiveMinThumbX) { + newThumbX = reflectiveMminThumbX; + } + if (newThumbX > reflectiveMaxThumbX) { + newThumbX = reflectiveMaxThumbX; + } + Overlays.editOverlay(reflectiveThumb, { x: newThumbX } ); + var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale; + setReflectiveRatio(reflective); + } + + if (movingSliderDiffusion) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < diffusionMinThumbX) { + newThumbX = diffusionMminThumbX; + } + if (newThumbX > diffusionMaxThumbX) { + newThumbX = diffusionMaxThumbX; + } + Overlays.editOverlay(diffusionThumb, { x: newThumbX } ); + var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale; + setDiffusionRatio(diffusion); + } + } // we also handle click detection in our mousePressEvent() function mousePressEvent(event) { var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y}); - - // If the user clicked on the thumb, handle the slider logic if (clickedOverlay == delayThumb) { movingSliderDelay = true; thumbClickOffsetX = event.x - delayThumbX; } - // If the user clicked on the thumb, handle the slider logic if (clickedOverlay == fanoutThumb) { movingSliderFanout = true; thumbClickOffsetX = event.x - fanoutThumbX; } - // If the user clicked on the thumb, handle the slider logic if (clickedOverlay == speedThumb) { movingSliderSpeed = true; thumbClickOffsetX = event.x - speedThumbX; } - - // If the user clicked on the thumb, handle the slider logic if (clickedOverlay == factorThumb) { -print("movingSliderFactor..."); movingSliderFactor = true; thumbClickOffsetX = event.x - factorThumbX; } + if (clickedOverlay == diffusionThumb) { + movingSliderDiffusion = true; + thumbClickOffsetX = event.x - diffusionThumbX; + } + if (clickedOverlay == absorptionThumb) { + movingSliderAbsorption = true; + thumbClickOffsetX = event.x - absorptionThumbX; + } + if (clickedOverlay == reflectiveThumb) { + movingSliderReflective = true; + thumbClickOffsetX = event.x - reflectiveThumbX; + } } function mouseReleaseEvent(event) { if (movingSliderDelay) { movingSliderDelay = false; var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale; - print("delay="+delay); AudioReflector.setPreDelay(delay); delayThumbX = newThumbX; } if (movingSliderFanout) { movingSliderFanout = false; var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale); - print("fanout="+fanout); AudioReflector.setDiffusionFanout(fanout); fanoutThumbX = newThumbX; } @@ -323,6 +542,30 @@ function mouseReleaseEvent(event) { AudioReflector.setDistanceAttenuationScalingFactor(factor); factorThumbX = newThumbX; } + + if (movingSliderReflective) { + movingSliderReflective = false; + var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale; + setReflectiveRatio(reflective); + reflectiveThumbX = newThumbX; + updateRatioSliders(); + } + + if (movingSliderDiffusion) { + movingSliderDiffusion = false; + var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale; + setDiffusionRatio(diffusion); + diffusionThumbX = newThumbX; + updateRatioSliders(); + } + + if (movingSliderAbsorption) { + movingSliderAbsorption = false; + var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale; + setAbsorptionRatio(absorption); + absorptionThumbX = newThumbX; + updateRatioSliders(); + } } Controller.mouseMoveEvent.connect(mouseMoveEvent); diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index d9dc678886..1a1c1e3b10 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -11,7 +11,6 @@ #include "AudioReflector.h" #include "Menu.h" - const float DEFAULT_PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections const float DEFAULT_MS_DELAY_PER_METER = 3.0f; const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f; @@ -45,7 +44,6 @@ AudioReflector::AudioReflector(QObject* parent) : _minDelay = 0; } - void AudioReflector::render() { // if we're not set up yet, or we're not processing spatial audio, then exit early @@ -159,8 +157,11 @@ void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint, int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; - float rightEarAttenuation = audiblePoint.attenuation * getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance); - float leftEarAttenuation = audiblePoint.attenuation * getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance); + float rightEarAttenuation = audiblePoint.attenuation * + getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance); + + float leftEarAttenuation = audiblePoint.attenuation * + getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance); _totalAttenuation += rightEarAttenuation + leftEarAttenuation; _attenuationCount += 2; @@ -423,7 +424,12 @@ int AudioReflector::analyzePathsSingleStep() { if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { path->finalized = true; - } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face, Octree::Lock)) { + } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + // TODO: we need to decide how we want to handle locking on the ray intersection, if we force lock, + // we get an accurate picture, but it could prevent rendering of the voxels. If we trylock (default), + // we might not get ray intersections where they may exist, but we can't really detect that case... + // add last parameter of Octree::Lock to force locking + glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); pathDistance += glm::distance(start, end); @@ -525,9 +531,15 @@ int AudioReflector::analyzePathsSingleStep() { } SurfaceCharacteristics AudioReflector::getSurfaceCharacteristics(OctreeElement* elementHit) { - float reflectiveRatio = (1.0f - (_absorptionRatio + _diffusionRatio)); - SurfaceCharacteristics result = { reflectiveRatio, _absorptionRatio, _diffusionRatio }; + SurfaceCharacteristics result = { getReflectiveRatio(), _absorptionRatio, _diffusionRatio }; return result; } +void AudioReflector::setReflectiveRatio(float ratio) { + float currentReflectiveRatio = (1.0f - (_absorptionRatio + _diffusionRatio)); + float halfDifference = (ratio - currentReflectiveRatio) / 2.0f; + // evenly distribute the difference between the two other ratios + _absorptionRatio -= halfDifference; + _diffusionRatio -= halfDifference; +} diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 54df80e938..3a806e2267 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -84,10 +84,16 @@ public slots: void setSoundMsPerMeter(float soundMsPerMeter) { _soundMsPerMeter = soundMsPerMeter; } float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; } /// ms per meter, larger means slower void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; } - int getDiffusionFanout() const { return _diffusionFanout; } /// number of points of diffusion from each reflection point void setDiffusionFanout(int fanout) { _diffusionFanout = fanout; } /// number of points of diffusion from each reflection point - + + float getAbsorptionRatio() const { return _absorptionRatio; } + void setAbsorptionRatio(float ratio) { _absorptionRatio = ratio; } + float getDiffusionRatio() const { return _diffusionRatio; } + void setDiffusionRatio(float ratio) { _diffusionRatio = ratio; } + float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); } + void setReflectiveRatio(float ratio); + signals: private: diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index df47c9ad88..2b70b3538c 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -343,7 +343,7 @@ void Stats::display( lines = _expanded ? 12 : 3; if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { - lines += 5; // spatial audio processing adds 1 spacing line and 4 extra lines of info + lines += 6; // spatial audio processing adds 1 spacing line and 5 extra lines of info } drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10); @@ -541,7 +541,7 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); - sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, distance scale: %5.3f", + sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, Factor: %5.3f", audioReflector->getAverageAttenuation(), audioReflector->getMaxAttenuation(), audioReflector->getMinAttenuation(), @@ -559,6 +559,16 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + const float AS_PERCENT = 100.0f; + float reflectiveRatio = audioReflector->getReflectiveRatio() * AS_PERCENT; + float diffusionRatio = audioReflector->getDiffusionRatio() * AS_PERCENT; + float absorptionRatio = audioReflector->getAbsorptionRatio() * AS_PERCENT; + sprintf(reflectionsStatus, "Ratios: Reflective: %5.3f, Diffusion: %5.3f, Absorption: %5.3f", + reflectiveRatio, diffusionRatio, absorptionRatio); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + } } From 60c6b27ab2785ca7d37d912722cefdda2702cceb Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 11:38:11 -0700 Subject: [PATCH 38/64] cleanup code a bit, add support for diffusions even with no ray intersection --- interface/src/AudioReflector.cpp | 277 ++++++++++++++++----------- interface/src/AudioReflector.h | 9 +- libraries/octree/src/AABox.h | 3 +- libraries/shared/src/SharedUtil.cpp | 4 + libraries/shared/src/SharedUtil.h | 1 + libraries/voxels/src/VoxelDetail.cpp | 3 + 6 files changed, 185 insertions(+), 112 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 1a1c1e3b10..7801c26924 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -91,13 +91,14 @@ float AudioReflector::getDistanceAttenuationCoefficient(float distance) { glm::vec3 AudioReflector::getFaceNormal(BoxFace face) { bool wantSlightRandomness = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces); - glm::vec3 faceNormal; - - float normalLength = wantSlightRandomness ? randFloatInRange(0.99f,1.0f) : 1.0f; + const float MIN_RANDOM_LENGTH = 0.99f; + const float MAX_RANDOM_LENGTH = 1.0f; + const float NON_RANDOM_LENGTH = 1.0f; + float normalLength = wantSlightRandomness ? randFloatInRange(MIN_RANDOM_LENGTH, MAX_RANDOM_LENGTH) : NON_RANDOM_LENGTH; float remainder = (1.0f - normalLength)/2.0f; - float remainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; - float remainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; + float remainderSignA = randomSign(); + float remainderSignB = randomSign(); if (face == MIN_X_FACE) { faceNormal = glm::vec3(-normalLength, remainder * remainderSignA, remainder * remainderSignB); @@ -244,8 +245,9 @@ void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, co AudioPath::AudioPath(const glm::vec3& origin, const glm::vec3& direction, - float attenuation, float delay, float distance, int bounceCount) : + float attenuation, float delay, float distance,bool isDiffusion, int bounceCount) : + isDiffusion(isDiffusion), startPoint(origin), startDirection(direction), startDelay(delay), @@ -264,9 +266,10 @@ AudioPath::AudioPath(const glm::vec3& origin, const glm::vec3& direction, } void AudioReflector::addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, - float initialAttenuation, float initialDelay, float initialDistance) { + float initialAttenuation, float initialDelay, float initialDistance, bool isDiffusion) { - AudioPath* path = new AudioPath(origin, initialDirection, initialAttenuation, initialDelay, initialDistance, 0); + AudioPath* path = new AudioPath(origin, initialDirection, initialAttenuation, initialDelay, + initialDistance, isDiffusion, 0); _audioPaths.push_back(path); } @@ -312,11 +315,11 @@ void AudioReflector::drawRays() { foreach(AudioPath* const& path, _audioPaths) { // if this is an original reflection, draw it in RED - if (path->startPoint == _origin) { - drawPath(path, RED); - } else { + if (path->isDiffusion) { diffusionNumber++; drawPath(path, GREEN); + } else { + drawPath(path, RED); } } } @@ -415,10 +418,6 @@ int AudioReflector::analyzePathsSingleStep() { float distance; // output from findRayIntersection BoxFace face; // output from findRayIntersection - float currentReflectiveAttenuation = path->lastAttenuation; // only the reflective components - float currentDelay = path->lastDelay; // start with our delay so far - float pathDistance = path->lastDistance; - if (!path->finalized) { activePaths++; @@ -429,107 +428,169 @@ int AudioReflector::analyzePathsSingleStep() { // we get an accurate picture, but it could prevent rendering of the voxels. If we trylock (default), // we might not get ray intersections where they may exist, but we can't really detect that case... // add last parameter of Octree::Lock to force locking - - glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + handlePathPoint(path, distance, elementHit, face); - pathDistance += glm::distance(start, end); - - // We aren't using this... should we be???? - float toListenerDistance = glm::distance(end, _listenerPosition); - - // adjust our current delay by just the delay from the most recent ray - currentDelay += getDelayFromDistance(distance); - - // now we know the current attenuation for the "perfect" reflection case, but we now incorporate - // our surface materials to determine how much of this ray is absorbed, reflected, and diffused - SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit); - - float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio; - float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio; - - bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); - int fanout = wantDiffusions ? _diffusionFanout : 0; - - float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / fanout; - - // total delay includes the bounce back to listener - float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance); - float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance + pathDistance); - - // if our resulting partial diffusion attenuation, is still above our minimum attenuation - // then we add new paths for each diffusion point - if ((partialDiffusionAttenuation * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT - && totalDelay < MAXIMUM_DELAY_MS) { - - // diffusions fan out from random places on the semisphere of the collision point - for(int i = 0; i < fanout; i++) { - glm::vec3 diffusion; - - float randomness = randFloatInRange(0.5f,1.0f); - float remainder = (1.0f - randomness)/2.0f; - float remainderSignA = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; - float remainderSignB = (randFloatInRange(-1.0f,1.0f) < 0.0f) ? -1.0 : 1.0; - - if (face == MIN_X_FACE) { - diffusion = glm::vec3(-randomness, remainder * remainderSignA, remainder * remainderSignB); - } else if (face == MAX_X_FACE) { - diffusion = glm::vec3(randomness, remainder * remainderSignA, remainder * remainderSignB); - } else if (face == MIN_Y_FACE) { - diffusion = glm::vec3(remainder * remainderSignA, -randomness, remainder * remainderSignB); - } else if (face == MAX_Y_FACE) { - diffusion = glm::vec3(remainder * remainderSignA, randomness, remainder * remainderSignB); - } else if (face == MIN_Z_FACE) { - diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -randomness); - } else if (face == MAX_Z_FACE) { - diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, randomness); - } - - diffusion = glm::normalize(diffusion); - - // add sound sources for these diffusions - addSoundSource(end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance); - } - } - - // if our reflective attenuation is above our minimum, then add our reflection point and - // allow our path to continue - if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT - && totalDelay < MAXIMUM_DELAY_MS) { - - // add this location, as the reflective attenuation as well as the total diffusion attenuation - // NOTE: we add the delay to the audible point, not back to the listener. The additional delay - // and attenuation to the listener is recalculated at the point where we actually inject the - // audio so that it can be adjusted to ear position - AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance}; - - _audiblePoints.push_back(point); - - // add this location to the path points, so we can visualize it - path->reflections.push_back(end); - - // now, if our reflective attenuation is over our minimum then keep going... - if (reflectiveAttenuation * toListenerAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) { - glm::vec3 faceNormal = getFaceNormal(face); - path->lastDirection = glm::normalize(glm::reflect(direction,faceNormal)); - path->lastPoint = end; - path->lastAttenuation = reflectiveAttenuation; - path->lastDelay = currentDelay; - path->lastDistance = pathDistance; - path->bounceCount++; - } else { - path->finalized = true; // if we're too quiet, then we're done - } - } else { - path->finalized = true; // if we're too quiet, then we're done - } } else { - path->finalized = true; // if it doesn't intersect, then it is finished + // If we didn't intersect, but this was a diffusion ray, then we will go ahead and cast a short ray out + // from our last known point, in the last known direction, and leave that sound source hanging there + if (path->isDiffusion) { + const float MINIMUM_RANDOM_DISTANCE = 0.25f; + const float MAXIMUM_RANDOM_DISTANCE = 0.5f; + float distance = randFloatInRange(MINIMUM_RANDOM_DISTANCE, MAXIMUM_RANDOM_DISTANCE); + handlePathPoint(path, distance, NULL, UNKNOWN_FACE); + } else { + path->finalized = true; // if it doesn't intersect, then it is finished + } } } } return activePaths; } +void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face) { + glm::vec3 start = path->lastPoint; + glm::vec3 direction = path->lastDirection; + glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + + float currentReflectiveAttenuation = path->lastAttenuation; // only the reflective components + float currentDelay = path->lastDelay; // start with our delay so far + float pathDistance = path->lastDistance; + + pathDistance += glm::distance(start, end); + + // We aren't using this... should we be???? + float toListenerDistance = glm::distance(end, _listenerPosition); + + // adjust our current delay by just the delay from the most recent ray + currentDelay += getDelayFromDistance(distance); + + // now we know the current attenuation for the "perfect" reflection case, but we now incorporate + // our surface materials to determine how much of this ray is absorbed, reflected, and diffused + SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit); + + float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio; + float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio; + + bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + int fanout = wantDiffusions ? _diffusionFanout : 0; + + float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / fanout; + + // total delay includes the bounce back to listener + float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance); + float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance + pathDistance); + + // if our resulting partial diffusion attenuation, is still above our minimum attenuation + // then we add new paths for each diffusion point + if ((partialDiffusionAttenuation * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT + && totalDelay < MAXIMUM_DELAY_MS) { + + // diffusions fan out from random places on the semisphere of the collision point + for(int i = 0; i < fanout; i++) { + glm::vec3 diffusion; + + // We're creating a random normal here. But we want it to be relatively dramatic compared to how we handle + // our slightly random surface normals. + const float MINIMUM_RANDOM_LENGTH = 0.5f; + const float MAXIMUM_RANDOM_LENGTH = 1.0f; + float randomness = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH); + float remainder = (1.0f - randomness)/2.0f; + float remainderSignA = randomSign(); + float remainderSignB = randomSign(); + + if (face == MIN_X_FACE) { + diffusion = glm::vec3(-randomness, remainder * remainderSignA, remainder * remainderSignB); + } else if (face == MAX_X_FACE) { + diffusion = glm::vec3(randomness, remainder * remainderSignA, remainder * remainderSignB); + } else if (face == MIN_Y_FACE) { + diffusion = glm::vec3(remainder * remainderSignA, -randomness, remainder * remainderSignB); + } else if (face == MAX_Y_FACE) { + diffusion = glm::vec3(remainder * remainderSignA, randomness, remainder * remainderSignB); + } else if (face == MIN_Z_FACE) { + diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -randomness); + } else if (face == MAX_Z_FACE) { + diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, randomness); + } else if (face == UNKNOWN_FACE) { + float randomnessX = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH); + float randomnessY = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH); + float randomnessZ = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH); + diffusion = glm::vec3(direction.x * randomnessX, direction.y * randomnessY, direction.z * randomnessZ); + } + + diffusion = glm::normalize(diffusion); + + // add sound sources for these diffusions + addSoundSource(end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance, true); + } + } else { + const bool wantDebugging = false; + if (wantDebugging) { + if ((partialDiffusionAttenuation * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) { + qDebug() << "too quiet to diffuse"; + qDebug() << " partialDiffusionAttenuation=" << partialDiffusionAttenuation; + qDebug() << " toListenerAttenuation=" << toListenerAttenuation; + qDebug() << " result=" << (partialDiffusionAttenuation * toListenerAttenuation); + qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT; + } + if (totalDelay > MAXIMUM_DELAY_MS) { + qDebug() << "too delayed to diffuse"; + qDebug() << " totalDelay=" << totalDelay; + qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS; + } + } + } + + // if our reflective attenuation is above our minimum, then add our reflection point and + // allow our path to continue + if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT + && totalDelay < MAXIMUM_DELAY_MS) { + + // add this location, as the reflective attenuation as well as the total diffusion attenuation + // NOTE: we add the delay to the audible point, not back to the listener. The additional delay + // and attenuation to the listener is recalculated at the point where we actually inject the + // audio so that it can be adjusted to ear position + AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance}; + + _audiblePoints.push_back(point); + + // add this location to the path points, so we can visualize it + path->reflections.push_back(end); + + // now, if our reflective attenuation is over our minimum then keep going... + if (reflectiveAttenuation * toListenerAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) { + glm::vec3 faceNormal = getFaceNormal(face); + path->lastDirection = glm::normalize(glm::reflect(direction,faceNormal)); + path->lastPoint = end; + path->lastAttenuation = reflectiveAttenuation; + path->lastDelay = currentDelay; + path->lastDistance = pathDistance; + path->bounceCount++; + } else { + path->finalized = true; // if we're too quiet, then we're done + } + } else { + const bool wantDebugging = false; + if (wantDebugging) { + if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) { + qDebug() << "too quiet to add audible point"; + qDebug() << " reflectiveAttenuation + totalDiffusionAttenuation=" << (reflectiveAttenuation + totalDiffusionAttenuation); + qDebug() << " toListenerAttenuation=" << toListenerAttenuation; + qDebug() << " result=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation); + qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT; + } + if (totalDelay > MAXIMUM_DELAY_MS) { + qDebug() << "too delayed to add audible point"; + qDebug() << " totalDelay=" << totalDelay; + qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS; + } + } + path->finalized = true; // if we're too quiet, then we're done + } +} + +// TODO: eventually we will add support for different surface characteristics based on the element +// that is hit, which is why we pass in the elementHit to this helper function. But for now, all +// surfaces have the same characteristics SurfaceCharacteristics AudioReflector::getSurfaceCharacteristics(OctreeElement* elementHit) { SurfaceCharacteristics result = { getReflectiveRatio(), _absorptionRatio, _diffusionRatio }; return result; diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 3a806e2267..72eca9aac2 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -19,7 +19,9 @@ class AudioPath { public: AudioPath(const glm::vec3& origin = glm::vec3(0), const glm::vec3& direction = glm::vec3(0), float attenuation = 1.0f, - float delay = 0.0f, float distance = 0.0f, int bounceCount = 0); + float delay = 0.0f, float distance = 0.0f, bool isDiffusion = false, int bounceCount = 0); + + bool isDiffusion; glm::vec3 startPoint; glm::vec3 startDirection; float startDelay; @@ -137,11 +139,12 @@ private: // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, // as well as diffusion sound sources - void addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, - float initialAttenuation, float initialDelay, float initialDistance = 0.0f); + void addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, + float initialDelay, float initialDistance = 0.0f, bool isDiffusion = false); // helper that handles audioPath analysis int analyzePathsSingleStep(); + void handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face); void analyzePaths(); void drawRays(); void drawPath(AudioPath* path, const glm::vec3& originalColor); diff --git a/libraries/octree/src/AABox.h b/libraries/octree/src/AABox.h index 093a111a69..1aa0849b70 100644 --- a/libraries/octree/src/AABox.h +++ b/libraries/octree/src/AABox.h @@ -23,7 +23,8 @@ enum BoxFace { MIN_Y_FACE, MAX_Y_FACE, MIN_Z_FACE, - MAX_Z_FACE + MAX_Z_FACE, + UNKNOWN_FACE }; enum BoxVertex { diff --git a/libraries/shared/src/SharedUtil.cpp b/libraries/shared/src/SharedUtil.cpp index d91e01d17f..db62f33f06 100644 --- a/libraries/shared/src/SharedUtil.cpp +++ b/libraries/shared/src/SharedUtil.cpp @@ -56,6 +56,10 @@ float randFloatInRange (float min,float max) { return min + ((rand() % 10000)/10000.f * (max-min)); } +float randomSign() { + return randomBoolean() ? -1.0 : 1.0; +} + unsigned char randomColorValue(int miniumum) { return miniumum + (rand() % (256 - miniumum)); } diff --git a/libraries/shared/src/SharedUtil.h b/libraries/shared/src/SharedUtil.h index 236c1f74a1..c909b80b13 100644 --- a/libraries/shared/src/SharedUtil.h +++ b/libraries/shared/src/SharedUtil.h @@ -78,6 +78,7 @@ void usecTimestampNowForceClockSkew(int clockSkew); float randFloat(); int randIntInRange (int min, int max); float randFloatInRange (float min,float max); +float randomSign(); /// \return -1.0 or 1.0 unsigned char randomColorValue(int minimum); bool randomBoolean(); diff --git a/libraries/voxels/src/VoxelDetail.cpp b/libraries/voxels/src/VoxelDetail.cpp index 6c385c9387..f1855f5f81 100644 --- a/libraries/voxels/src/VoxelDetail.cpp +++ b/libraries/voxels/src/VoxelDetail.cpp @@ -75,6 +75,9 @@ QScriptValue rayToVoxelIntersectionResultToScriptValue(QScriptEngine* engine, co case MAX_Z_FACE: faceName = "MAX_Z_FACE"; break; + case UNKNOWN_FACE: + faceName = "UNKNOWN_FACE"; + break; } obj.setProperty("face", faceName); From 57513610fdb45111d501b385c9cb9fe48fdacb2c Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 12:43:02 -0700 Subject: [PATCH 39/64] recalc on any attribute changes --- interface/src/AudioReflector.cpp | 39 +++++++++++++++++++++++++++----- interface/src/AudioReflector.h | 11 ++++++++- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 7801c26924..6c3e1e0f0b 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -32,7 +32,13 @@ AudioReflector::AudioReflector(QObject* parent) : _diffusionFanout(DEFAULT_DIFFUSION_FANOUT), _absorptionRatio(DEFAULT_ABSORPTION_RATIO), _diffusionRatio(DEFAULT_DIFFUSION_RATIO), - _withDiffusion(false) + _withDiffusion(false), + _lastPreDelay(DEFAULT_PRE_DELAY), + _lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), + _lastDistanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), + _lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT), + _lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO), + _lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO) { _reflections = 0; _diffusionPathCount = 0; @@ -44,6 +50,30 @@ AudioReflector::AudioReflector(QObject* parent) : _minDelay = 0; } +bool AudioReflector::haveAttributesChanged() { + bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + + bool attributesChange = (_withDiffusion != withDiffusion + || _lastPreDelay != _preDelay + || _lastSoundMsPerMeter != _soundMsPerMeter + || _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor + || _lastDiffusionFanout != _diffusionFanout + || _lastAbsorptionRatio != _absorptionRatio + || _lastDiffusionRatio != _diffusionRatio); + + if (attributesChange) { + _withDiffusion = withDiffusion; + _lastPreDelay = _preDelay; + _lastSoundMsPerMeter = _soundMsPerMeter; + _lastDistanceAttenuationScalingFactor = _distanceAttenuationScalingFactor; + _lastDiffusionFanout = _diffusionFanout; + _lastAbsorptionRatio = _absorptionRatio; + _lastDiffusionRatio = _diffusionRatio; + } + + return attributesChange; +} + void AudioReflector::render() { // if we're not set up yet, or we're not processing spatial audio, then exit early @@ -274,20 +304,18 @@ void AudioReflector::addSoundSource(const glm::vec3& origin, const glm::vec3& in } void AudioReflector::calculateAllReflections() { - // only recalculate when we've moved... + // only recalculate when we've moved, or if the attributes have changed // TODO: what about case where new voxels are added in front of us??? bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented); glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation(); glm::vec3 origin = _myAvatar->getHead()->getPosition(); glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition(); - bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); - bool shouldRecalc = _reflections == 0 || !isSimilarPosition(origin, _origin) || !isSimilarOrientation(orientation, _orientation) || !isSimilarPosition(listenerPosition, _listenerPosition) - || (withDiffusion != _withDiffusion); + || haveAttributesChanged(); if (shouldRecalc) { QMutexLocker locker(&_mutex); @@ -295,7 +323,6 @@ void AudioReflector::calculateAllReflections() { _origin = origin; _orientation = orientation; _listenerPosition = listenerPosition; - _withDiffusion = withDiffusion; analyzePaths(); // actually does the work quint64 end = usecTimestampNow(); const bool wantDebugging = false; diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 72eca9aac2..6a4d10524f 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -171,8 +171,17 @@ private: float _absorptionRatio; float _diffusionRatio; float _reflectiveRatio; - + + // remember the last known values at calculation + bool haveAttributesChanged(); + bool _withDiffusion; + float _lastPreDelay; + float _lastSoundMsPerMeter; + float _lastDistanceAttenuationScalingFactor; + int _lastDiffusionFanout; + float _lastAbsorptionRatio; + float _lastDiffusionRatio; }; From f3f9325a42d4f819c22f5b58caf40c77d08f19ff Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 12:45:28 -0700 Subject: [PATCH 40/64] fixed comment --- examples/audioReflectorTools.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js index 0467a4f937..acab563d4c 100644 --- a/examples/audioReflectorTools.js +++ b/examples/audioReflectorTools.js @@ -1,11 +1,11 @@ // -// overlaysExample.js +// audioReflectorTools.js // hifi // // Created by Brad Hefta-Gaub on 2/14/14. // Copyright (c) 2014 HighFidelity, Inc. All rights reserved. // -// This is an example script that demonstrates use of the Overlays class +// Tools for manipulating the attributes of the AudioReflector behavior // // From 9d49a5343417c50f097dcd1f253752789ac20dd0 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 12:50:05 -0700 Subject: [PATCH 41/64] coding standard cleanup --- interface/src/Audio.cpp | 1 - interface/src/AudioReflector.h | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 2cc02b1368..daaffe85a0 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -518,7 +518,6 @@ void Audio::handleAudioInput() { // Add tone injection if enabled const float TONE_FREQ = 220.f / SAMPLE_RATE * TWO_PI; - //const float TONE_FREQ = 5000.f / SAMPLE_RATE * TWO_PI; const float QUARTER_VOLUME = 8192.f; if (_toneInjectionEnabled) { loudness = 0.f; diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 6a4d10524f..b3faf4ff03 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -6,8 +6,8 @@ // Copyright (c) 2014 High Fidelity, Inc. All rights reserved. // -#ifndef __interface__AudioReflector__ -#define __interface__AudioReflector__ +#ifndef interface_AudioReflector_h +#define interface_AudioReflector_h #include @@ -185,4 +185,4 @@ private: }; -#endif /* defined(__interface__AudioReflector__) */ +#endif // interface_AudioReflector_h From 74828a321503ef9305d24af43eac9a9f00a5b1b6 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 12:54:59 -0700 Subject: [PATCH 42/64] removed non-functional low pass filter --- interface/src/Audio.cpp | 39 --------------------------------------- interface/src/Audio.h | 2 -- interface/src/Menu.cpp | 4 ---- interface/src/Menu.h | 1 - 4 files changed, 46 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index daaffe85a0..9ef687710a 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -819,14 +819,6 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { } - // add the next numNetworkOutputSamples from each QByteArray - // in our _localInjectionByteArrays QVector to the localInjectedSamples - if (Menu::getInstance()->isOptionChecked(MenuOption::LowPassFilter)) { - int channels = _desiredOutputFormat.channelCount(); - int filterSamples = numNetworkOutputSamples / channels; - lowPassFilter(ringBufferSamples, filterSamples, channels); - } - // copy the packet from the RB to the output linearResampling(ringBufferSamples, (int16_t*) outputBuffer.data(), @@ -956,37 +948,6 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) { } } - -// simple 3 pole low pass filter -void Audio::lowPassFilter(int16_t* inputBuffer, int samples, int channels) { - - //qDebug() << "lowPassFilter() samples=" << samples << " channels=" << channels; - //const int POLE_COUNT = 3; - - for (int c = 0; c < channels; c++) { - const float C1 = 0.25f; // 0.0f; // - const float C2 = 0.5f; // 1.0f; // - const float C3 = 0.25f; // 0.0f; // - int16_t S1,S2,S3; - S1 = inputBuffer[c]; // start with the Nth sample, based on the current channel, this is the fist sample for the channel - for (int i = 0; i < samples; i++) { - int sampleAt = (i * channels) + c; - int nextSampleAt = sampleAt + channels; - S2 = inputBuffer[sampleAt]; - if (i == samples - 1) { - S3 = inputBuffer[sampleAt]; - } else { - S3 = inputBuffer[nextSampleAt]; - } - // save our S1 for next time before we mod this - S1 = inputBuffer[sampleAt]; - inputBuffer[sampleAt] = (C1 * S1) + (C2 * S2) + (C3 * S3); - //qDebug() << "channel=" << c << " sampleAt=" << sampleAt; - } - } -} - - // Starts a collision sound. magnitude is 0-1, with 1 the loudest possible sound. void Audio::startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen) { _collisionSoundMagnitude = magnitude; diff --git a/interface/src/Audio.h b/interface/src/Audio.h index bac8d48ccf..8ed93efbe0 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -55,8 +55,6 @@ public: void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; } int getJitterBufferSamples() { return _jitterBufferSamples; } - void lowPassFilter(int16_t* inputBuffer, int samples, int channels); - virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen); virtual void startDrumSound(float volume, float frequency, float duration, float decay); diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index e7c5b84a42..a02c8342a7 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -383,10 +383,6 @@ Menu::Menu() : appInstance->getAudio(), SLOT(toggleToneInjection())); - addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter, - Qt::CTRL | Qt::SHIFT | Qt::Key_F, - false); - QMenu* spatialAudioMenu = audioDebugMenu->addMenu("Spatial Audio"); addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessing, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 578d4e8dc6..5fcbf11724 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -293,7 +293,6 @@ namespace MenuOption { const QString Login = "Login"; const QString Logout = "Logout"; const QString LookAtVectors = "Look-at Vectors"; - const QString LowPassFilter = "Low Pass Filter"; const QString MetavoxelEditor = "Metavoxel Editor..."; const QString Metavoxels = "Metavoxels"; const QString Mirror = "Mirror"; From cd23b95b42535354fc1f75a3ababd53a0517410b Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 13:16:52 -0700 Subject: [PATCH 43/64] revert back to using QByteArray for processReceivedAudio() --- interface/src/Audio.cpp | 24 ++++++++++++------------ interface/src/Audio.h | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 9ef687710a..bb7ee1a704 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -652,8 +652,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { if (_audioOutput) { // Audio output must exist and be correctly set up if we're going to process received audio - _ringBuffer.parseData(audioByteArray); - processReceivedAudio(_ringBuffer); + processReceivedAudio(audioByteArray); } Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size()); @@ -753,23 +752,24 @@ void Audio::toggleAudioNoiseReduction() { _noiseGateEnabled = !_noiseGateEnabled; } -void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { - +void Audio::processReceivedAudio(const QByteArray& audioByteArray) { + _ringBuffer.parseData(audioByteArray); + float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate()) * (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount()); - if (!ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) { + if (!_ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) { // we don't have any audio data left in the output buffer // we just starved //qDebug() << "Audio output just starved."; - ringBuffer.setIsStarved(true); + _ringBuffer.setIsStarved(true); _numFramesDisplayStarve = 10; } // if there is anything in the ring buffer, decide what to do - if (ringBuffer.samplesAvailable() > 0) { + if (_ringBuffer.samplesAvailable() > 0) { - int numNetworkOutputSamples = ringBuffer.samplesAvailable(); + int numNetworkOutputSamples = _ringBuffer.samplesAvailable(); int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; QByteArray outputBuffer; @@ -777,13 +777,13 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { int numSamplesNeededToStartPlayback = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2); - if (!ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) { + if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) { // We are still waiting for enough samples to begin playback // qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback; } else { // We are either already playing back, or we have enough audio to start playing back. //qDebug() << "pushing " << numNetworkOutputSamples; - ringBuffer.setIsStarved(false); + _ringBuffer.setIsStarved(false); int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; if (_processSpatialAudio) { @@ -791,7 +791,7 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { QByteArray buffer; buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); - ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); + _ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); // Accumulate direct transmission of audio from sender to receiver if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal)) { addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); @@ -814,7 +814,7 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { // copy the samples we'll resample from the ring buffer - this also // pushes the read pointer of the ring buffer forwards - ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + _ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); } diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 8ed93efbe0..196058047d 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -187,7 +187,7 @@ private: void addProceduralSounds(int16_t* monoInput, int numSamples); // Process received audio - void processReceivedAudio(AudioRingBuffer& ringBuffer); + void processReceivedAudio(const QByteArray& audioByteArray); bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo); bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo); From 579710e4bd16c5d753a2c5c5eeb57cfc233c5ef3 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 13:36:20 -0700 Subject: [PATCH 44/64] some cleanup --- interface/src/Audio.cpp | 2 +- libraries/audio/src/AudioRingBuffer.cpp | 44 +++++++++++++++---------- libraries/audio/src/AudioRingBuffer.h | 3 +- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index bb7ee1a704..da10f4cfe1 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -92,7 +92,7 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : _processSpatialAudio(false), _spatialAudioStart(0), _spatialAudioFinish(0), - _spatialAudioRingBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL) + _spatialAudioRingBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL, true) // random access mode { // clear the array of locally injected samples memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL); diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 45dafdca58..9b50ed0bcb 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -18,16 +18,19 @@ #include "AudioRingBuffer.h" -AudioRingBuffer::AudioRingBuffer(int numFrameSamples) : +AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode) : NodeData(), _sampleCapacity(numFrameSamples * RING_BUFFER_LENGTH_FRAMES), _numFrameSamples(numFrameSamples), _isStarved(true), - _hasStarted(false) + _hasStarted(false), + _randomAccessMode(randomAccessMode) { if (numFrameSamples) { _buffer = new int16_t[_sampleCapacity]; - memset(_buffer, 0, _sampleCapacity * sizeof(int16_t)); + if (_randomAccessMode) { + memset(_buffer, 0, _sampleCapacity * sizeof(int16_t)); + } _nextOutput = _buffer; _endOfLastWrite = _buffer; } else { @@ -51,7 +54,9 @@ void AudioRingBuffer::resizeForFrameSize(qint64 numFrameSamples) { delete[] _buffer; _sampleCapacity = numFrameSamples * RING_BUFFER_LENGTH_FRAMES; _buffer = new int16_t[_sampleCapacity]; - memset(_buffer, 0, _sampleCapacity * sizeof(int16_t)); + if (_randomAccessMode) { + memset(_buffer, 0, _sampleCapacity * sizeof(int16_t)); + } _nextOutput = _buffer; _endOfLastWrite = _buffer; } @@ -68,8 +73,14 @@ qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) { qint64 AudioRingBuffer::readData(char *data, qint64 maxSize) { // only copy up to the number of samples we have available - //int numReadSamples = std::min((unsigned) (maxSize / sizeof(int16_t)), samplesAvailable()); - int numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : samplesAvailable(); + int numReadSamples = std::min((unsigned) (maxSize / sizeof(int16_t)), samplesAvailable()); + + // If we're in random access mode, then we consider our number of available read samples slightly + // differently. Namely, if anything has been written, we say we have as many samples as they ask for + // otherwise we say we have nothing available + if (_randomAccessMode) { + numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : 0; + } if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) { // we're going to need to do two reads to get this data, it wraps around the edge @@ -77,15 +88,21 @@ qint64 AudioRingBuffer::readData(char *data, qint64 maxSize) { // read to the end of the buffer int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput; memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t)); - memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it + if (_randomAccessMode) { + memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it + } // read the rest from the beginning of the buffer memcpy(data + (numSamplesToEnd * sizeof(int16_t)), _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t)); - memset(_buffer, 0, (numReadSamples - numSamplesToEnd) * sizeof(int16_t)); // clear it + if (_randomAccessMode) { + memset(_buffer, 0, (numReadSamples - numSamplesToEnd) * sizeof(int16_t)); // clear it + } } else { // read the data memcpy(data, _nextOutput, numReadSamples * sizeof(int16_t)); - memset(_nextOutput, 0, numReadSamples * sizeof(int16_t)); // clear it + if (_randomAccessMode) { + memset(_nextOutput, 0, numReadSamples * sizeof(int16_t)); // clear it + } } // push the position of _nextOutput by the number of samples read @@ -111,7 +128,7 @@ qint64 AudioRingBuffer::writeData(const char* data, qint64 maxSize) { && (less(_endOfLastWrite, _nextOutput) && lessEqual(_nextOutput, shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy)))) { // this read will cross the next output, so call us starved and reset the buffer - qDebug() << "Filled the ring buffer. Resetting. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"; + qDebug() << "Filled the ring buffer. Resetting."; _endOfLastWrite = _buffer; _nextOutput = _buffer; _isStarved = true; @@ -151,13 +168,6 @@ unsigned int AudioRingBuffer::samplesAvailable() const { if (sampleDifference < 0) { sampleDifference += _sampleCapacity; } - - if (sampleDifference == 0) { - qDebug() << "ran dry!!! _endOfLastWrite=" << _endOfLastWrite - << "_nextOutput=" << _nextOutput - << "_buffer + _sampleCapacity=" << (_buffer + _sampleCapacity) - << " samplesAvailable() == 0!!!!!!!!!!!!!!!!!!!!"; - } return sampleDifference; } diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index 7ec686f1ac..04cc67c8ac 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -39,7 +39,7 @@ const int MIN_SAMPLE_VALUE = std::numeric_limits::min(); class AudioRingBuffer : public NodeData { Q_OBJECT public: - AudioRingBuffer(int numFrameSamples); + AudioRingBuffer(int numFrameSamples, bool randomAccessMode = false); ~AudioRingBuffer(); void reset(); @@ -88,6 +88,7 @@ protected: int16_t* _buffer; bool _isStarved; bool _hasStarted; + bool _randomAccessMode; /// will this ringbuffer be used for random access? if so, do some special processing }; #endif // hifi_AudioRingBuffer_h From 5a0963a73183fce52e77d32760630bf872681234 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 13:58:31 -0700 Subject: [PATCH 45/64] cleaned up comments and range protected some settings --- interface/src/AudioReflector.cpp | 22 +++++++++++++- interface/src/AudioReflector.h | 50 ++++++++++++++++++++------------ 2 files changed, 53 insertions(+), 19 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 6c3e1e0f0b..6d74228233 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -624,10 +624,30 @@ SurfaceCharacteristics AudioReflector::getSurfaceCharacteristics(OctreeElement* } void AudioReflector::setReflectiveRatio(float ratio) { + float safeRatio = std::max(0.0f, std::min(ratio, 1.0f)); float currentReflectiveRatio = (1.0f - (_absorptionRatio + _diffusionRatio)); - float halfDifference = (ratio - currentReflectiveRatio) / 2.0f; + float halfDifference = (safeRatio - currentReflectiveRatio) / 2.0f; + // evenly distribute the difference between the two other ratios _absorptionRatio -= halfDifference; _diffusionRatio -= halfDifference; } +void AudioReflector::setAbsorptionRatio(float ratio) { + float safeRatio = std::max(0.0f, std::min(ratio, 1.0f)); + _absorptionRatio = safeRatio; + const float MAX_COMBINED_RATIO = 1.0f; + if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) { + _diffusionRatio = MAX_COMBINED_RATIO - _absorptionRatio; + } +} + +void AudioReflector::setDiffusionRatio(float ratio) { + float safeRatio = std::max(0.0f, std::min(ratio, 1.0f)); + _diffusionRatio = safeRatio; + const float MAX_COMBINED_RATIO = 1.0f; + if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) { + _absorptionRatio = MAX_COMBINED_RATIO - _diffusionRatio; + } +} + diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index b3faf4ff03..0dea3b1d61 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -40,10 +40,10 @@ public: class AudiblePoint { public: - glm::vec3 location; - float delay; // includes total delay including pre delay to the point of the audible location, not to the listener's ears - float attenuation; // only the reflective & diffusive portion of attenuation, doesn't include distance attenuation - float distance; // includes total distance to the point of the audible location, not to the listener's ears + glm::vec3 location; /// location of the audible point + float delay; /// includes total delay including pre delay to the point of the audible location, not to the listener's ears + float attenuation; /// only the reflective & diffusive portion of attenuation, doesn't include distance attenuation + float distance; /// includes total distance to the point of the audible location, not to the listener's ears }; class SurfaceCharacteristics { @@ -59,12 +59,18 @@ class AudioReflector : public QObject { public: AudioReflector(QObject* parent = NULL); + // setup functions to configure the resources used by the AudioReflector void setVoxels(VoxelTree* voxels) { _voxels = voxels; } void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; } void setAudio(Audio* audio) { _audio = audio; } - void render(); + void render(); /// must be called in the application render loop + void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + +public slots: + // statistics int getReflections() const { return _reflections; } float getAverageDelayMsecs() const { return _averageDelay; } float getAverageAttenuation() const { return _averageAttenuation; } @@ -75,24 +81,32 @@ public: float getDelayFromDistance(float distance); int getDiffusionPathCount() const { return _diffusionPathCount; } - void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); - void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); - -public slots: - + /// ms of delay added to all echos float getPreDelay() const { return _preDelay; } void setPreDelay(float preDelay) { _preDelay = preDelay; } - float getSoundMsPerMeter() const { return _soundMsPerMeter; } /// ms per meter, larger means slower - void setSoundMsPerMeter(float soundMsPerMeter) { _soundMsPerMeter = soundMsPerMeter; } - float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; } /// ms per meter, larger means slower - void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; } - int getDiffusionFanout() const { return _diffusionFanout; } /// number of points of diffusion from each reflection point - void setDiffusionFanout(int fanout) { _diffusionFanout = fanout; } /// number of points of diffusion from each reflection point + /// ms per meter that sound travels, larger means slower, which sounds bigger + float getSoundMsPerMeter() const { return _soundMsPerMeter; } + void setSoundMsPerMeter(float soundMsPerMeter) { _soundMsPerMeter = soundMsPerMeter; } + + /// scales attenuation to be louder or softer than the default distance attenuation + float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; } + void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; } + + /// number of points of diffusion from each reflection point, as fanout increases there are more chances for secondary + /// echoes, but each diffusion ray is quieter and therefore more likely to be below the sound floor + int getDiffusionFanout() const { return _diffusionFanout; } + void setDiffusionFanout(int fanout) { _diffusionFanout = fanout; } + + /// ratio 0.0 - 1.0 of amount of each ray that is absorbed upon hitting a surface float getAbsorptionRatio() const { return _absorptionRatio; } - void setAbsorptionRatio(float ratio) { _absorptionRatio = ratio; } + void setAbsorptionRatio(float ratio); + + // ratio 0.0 - 1.0 of amount of each ray that is diffused upon hitting a surface float getDiffusionRatio() const { return _diffusionRatio; } - void setDiffusionRatio(float ratio) { _diffusionRatio = ratio; } + void setDiffusionRatio(float ratio); + + // remaining ratio 0.0 - 1.0 of amount of each ray that is cleanly reflected upon hitting a surface float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); } void setReflectiveRatio(float ratio); From f12ac3b03608f9b88f8da5a2c2f709667e68d1ee Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 14:14:51 -0700 Subject: [PATCH 46/64] some cleanup --- interface/src/AudioReflector.cpp | 21 ++++++++++++--------- interface/src/AudioReflector.h | 11 ++--------- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 6d74228233..fa4573b1b4 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -249,8 +249,6 @@ void AudioReflector::echoAudio(unsigned int sampleTime, const QByteArray& sample _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; - _reflections = _audiblePoints.size(); - _diffusionPathCount = countDiffusionPaths(); if (_reflections == 0) { _minDelay = 0.0f; @@ -364,6 +362,13 @@ void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) { } +// Here's how this works: we have an array of AudioPaths, we loop on all of our currently calculating audio +// paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it +// is considered finalized. +// If the ray hits a surface, then, based on the characteristics of that surface, it will calculate the new +// attenuation, path length, and delay for the primary path. For surfaces that have diffusion, it will also create +// fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation +// of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop. void AudioReflector::analyzePaths() { // clear our _audioPaths foreach(AudioPath* const& path, _audioPaths) { @@ -391,7 +396,10 @@ void AudioReflector::analyzePaths() { float initialAttenuation = 1.0f; float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? _preDelay : 0.0f; - + + // NOTE: we're still calculating our initial paths based on the listeners position. But the analysis code has been + // updated to support individual sound sources (which is how we support diffusion), we can use this new paradigm to + // add support for individual sound sources, and more directional sound sources addSoundSource(_origin, right, initialAttenuation, preDelay); addSoundSource(_origin, front, initialAttenuation, preDelay); addSoundSource(_origin, up, initialAttenuation, preDelay); @@ -422,8 +430,7 @@ int AudioReflector::countDiffusionPaths() { int diffusionCount = 0; foreach(AudioPath* const& path, _audioPaths) { - // if this is NOT an original reflection then it's a diffusion path - if (path->startPoint != _origin) { + if (path->isDiffusion) { diffusionCount++; } } @@ -432,13 +439,9 @@ int AudioReflector::countDiffusionPaths() { int AudioReflector::analyzePathsSingleStep() { // iterate all the active sound paths, calculate one step per active path - int activePaths = 0; foreach(AudioPath* const& path, _audioPaths) { - //bool wantExtraDebuggging = false; - //bool isDiffusion = (path->startPoint != _origin); - glm::vec3 start = path->lastPoint; glm::vec3 direction = path->lastDirection; OctreeElement* elementHit; // output from findRayIntersection diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 0dea3b1d61..7867df3d9b 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -141,15 +141,8 @@ private: glm::vec3 _origin; glm::quat _orientation; - // NOTE: Here's the new way, we will have an array of AudioPaths, we will loop on all of our currently calculating audio - // paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it - // is considered finalized. - // If the ray hits a surface, then, based on the characteristics of that surface, it will create calculate the new - // attenuation, path length, and delay for the primary path. For surfaces that have diffusion, it will also create - // fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation - // of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop. - QVector _audioPaths; - QVector _audiblePoints; + QVector _audioPaths; /// the various audio paths we're processing + QVector _audiblePoints; /// the audible points that have been calculated from the paths // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, // as well as diffusion sound sources From 24f0f37eb9a020244a261a80badc06945549b986 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 14:21:28 -0700 Subject: [PATCH 47/64] cleanup --- interface/src/Audio.cpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index da10f4cfe1..50f50a3347 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -806,17 +806,10 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { // Advance the start point for the next packet of audio to arrive _spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount(); - - // Advance the read position by the same amount - //ringBuffer.shiftReadPosition(numNetworkOutputSamples); - } else { - // copy the samples we'll resample from the ring buffer - this also // pushes the read pointer of the ring buffer forwards _ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); - - } // copy the packet from the RB to the output From e514c9564211f345e27b0a1bdb094dc5cc39caef Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 14:47:38 -0700 Subject: [PATCH 48/64] variable names cleanup to match coding standard --- interface/src/Audio.cpp | 55 ++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 31 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 50f50a3347..1d8dadea81 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -674,49 +674,42 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& s // Locate where in the accumulation buffer the new samples need to go if (sampleTime >= _spatialAudioFinish) { if (_spatialAudioStart == _spatialAudioFinish) { - - // Nothing in the spatial audio ring buffer yet - // Just do a straight copy, clipping if necessary - unsigned int sampleCt = (remaining < numSamples) ? remaining : numSamples; - if (sampleCt) { - _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCt); + // Nothing in the spatial audio ring buffer yet, Just do a straight copy, clipping if necessary + unsigned int sampleCount = (remaining < numSamples) ? remaining : numSamples; + if (sampleCount) { + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCount); } - _spatialAudioFinish = _spatialAudioStart + sampleCt / _desiredOutputFormat.channelCount(); - + _spatialAudioFinish = _spatialAudioStart + sampleCount / _desiredOutputFormat.channelCount(); } else { - // Spatial audio ring buffer already has data, but there is no overlap with the new sample. // Compute the appropriate time delay and pad with silence until the new start time. unsigned int delay = sampleTime - _spatialAudioFinish; - unsigned int ct = delay * _desiredOutputFormat.channelCount(); - unsigned int silentCt = (remaining < ct) ? remaining : ct; - if (silentCt) { - _spatialAudioRingBuffer.addSilentFrame(silentCt); + unsigned int delayCount = delay * _desiredOutputFormat.channelCount(); + unsigned int silentCount = (remaining < delayCount) ? remaining : delayCount; + if (silentCount) { + _spatialAudioRingBuffer.addSilentFrame(silentCount); } // Recalculate the number of remaining samples - remaining -= silentCt; - unsigned int sampleCt = (remaining < numSamples) ? remaining : numSamples; + remaining -= silentCount; + unsigned int sampleCount = (remaining < numSamples) ? remaining : numSamples; // Copy the new spatial audio to the accumulation ring buffer - if (sampleCt) { - _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCt); + if (sampleCount) { + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCount); } - _spatialAudioFinish += (sampleCt + silentCt) / _desiredOutputFormat.channelCount(); + _spatialAudioFinish += (sampleCount + silentCount) / _desiredOutputFormat.channelCount(); } } else { - - // There is overlap between the spatial audio buffer and the new sample, - // acumulate the overlap - + // There is overlap between the spatial audio buffer and the new sample, mix the overlap // Calculate the offset from the buffer's current read position, which should be located at _spatialAudioStart unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount(); - unsigned int accumulationCt = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount(); - accumulationCt = (accumulationCt < numSamples) ? accumulationCt : numSamples; + unsigned int mixedSamplesCount = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount(); + mixedSamplesCount = (mixedSamplesCount < numSamples) ? mixedSamplesCount : numSamples; const int16_t* spatial = reinterpret_cast(spatialAudio.data()); int j = 0; - for (int i = accumulationCt; --i >= 0; j++) { + for (int i = mixedSamplesCount; --i >= 0; j++) { int t1 = _spatialAudioRingBuffer[j + offset]; int t2 = spatial[j]; int tmp = t1 + t2; @@ -724,13 +717,13 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& s static_cast(glm::clamp(tmp, std::numeric_limits::min(), std::numeric_limits::max())); } - // Copy the remaining unoverlapped spatial audio to the accumulation buffer, if any - unsigned int sampleCt = numSamples - accumulationCt; - sampleCt = (remaining < sampleCt) ? remaining : sampleCt; - if (sampleCt) { - _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data() + accumulationCt, sampleCt); + // Copy the remaining unoverlapped spatial audio to the spatial audio buffer, if any + unsigned int nonMixedSampleCount = numSamples - mixedSamplesCount; + nonMixedSampleCount = (remaining < nonMixedSampleCount) ? remaining : nonMixedSampleCount; + if (nonMixedSampleCount) { + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data() + mixedSamplesCount, nonMixedSampleCount); // Extend the finish time by the amount of unoverlapped samples - _spatialAudioFinish += sampleCt / _desiredOutputFormat.channelCount(); + _spatialAudioFinish += nonMixedSampleCount / _desiredOutputFormat.channelCount(); } } } From 5bc8c83706f2f247baf4c283aec9ef8dcc754581 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 15:16:55 -0700 Subject: [PATCH 49/64] loop and variable name cleanup --- interface/src/Audio.cpp | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 1d8dadea81..d212f41098 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -708,13 +708,12 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& s mixedSamplesCount = (mixedSamplesCount < numSamples) ? mixedSamplesCount : numSamples; const int16_t* spatial = reinterpret_cast(spatialAudio.data()); - int j = 0; - for (int i = mixedSamplesCount; --i >= 0; j++) { - int t1 = _spatialAudioRingBuffer[j + offset]; - int t2 = spatial[j]; - int tmp = t1 + t2; - _spatialAudioRingBuffer[j + offset] = - static_cast(glm::clamp(tmp, std::numeric_limits::min(), std::numeric_limits::max())); + for (int i = 0; i < mixedSamplesCount; i++) { + int existingSample = _spatialAudioRingBuffer[i + offset]; + int newSample = spatial[i]; + int sumOfSamples = existingSample + newSample; + _spatialAudioRingBuffer[i + offset] = static_cast(glm::clamp(sumOfSamples, + std::numeric_limits::min(), std::numeric_limits::max())); } // Copy the remaining unoverlapped spatial audio to the spatial audio buffer, if any From f9b658543535261cd29eebee21a7ead6a335605e Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 15:20:03 -0700 Subject: [PATCH 50/64] some cleanup --- interface/src/Audio.cpp | 1 - interface/src/Audio.h | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index d212f41098..0328327249 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -666,7 +666,6 @@ unsigned int Audio::timeValToSampleTick(const quint64 time, int sampleRate) { } void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) { - // Calculate the number of remaining samples available. The source spatial audio buffer will get // clipped if there are insufficient samples available in the accumulation buffer. unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable(); diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 196058047d..99d9bd4884 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -172,9 +172,9 @@ private: // Process received audio by spatial attenuation geometric response bool _processSpatialAudio; - unsigned int _spatialAudioStart; ///< Start of spatial audio interval (in sample rate time base) - unsigned int _spatialAudioFinish; ///< End of spatial audio interval (in sample rate time base) - AudioRingBuffer _spatialAudioRingBuffer; ///< Spatially processed audio + unsigned int _spatialAudioStart; /// Start of spatial audio interval (in sample rate time base) + unsigned int _spatialAudioFinish; /// End of spatial audio interval (in sample rate time base) + AudioRingBuffer _spatialAudioRingBuffer; /// Spatially processed audio unsigned int timeValToSampleTick(const quint64 time, int sampleRate); From 585024d70ec0a502a94c095d7bb996ecccb38e4f Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 16:04:33 -0700 Subject: [PATCH 51/64] resample the local audio before sending it to any registered listeners --- interface/src/Audio.cpp | 53 ++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 19 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 0328327249..96af5f5f3b 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -403,13 +403,39 @@ void Audio::handleAudioInput() { unsigned int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio; QByteArray inputByteArray = _inputDevice->readAll(); + + // we may need to check for resampling our input audio in a couple cases: + // 1) the local loopback is enabled + // 2) spatial audio is enabled + bool spatialAudioEnabled = (_processSpatialAudio && !_muted && _audioOutput); + bool localLoopbackEnabled = (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput); + bool possiblyResampleInputAudio = spatialAudioEnabled || localLoopbackEnabled; + bool resampleNeeded = (_inputFormat != _outputFormat); + QByteArray resampledInputByteArray; + + if (possiblyResampleInputAudio && resampleNeeded) { + float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate()) + * (_outputFormat.channelCount() / _inputFormat.channelCount()); - // send our local loopback to any interested parties - if (_processSpatialAudio && !_muted && _audioOutput) { - emit processLocalAudio(_spatialAudioStart, inputByteArray, _inputFormat); + resampledInputByteArray.fill(0, inputByteArray.size() * loopbackOutputToInputRatio); + + linearResampling((int16_t*) inputByteArray.data(), (int16_t*) resampledInputByteArray.data(), + inputByteArray.size() / sizeof(int16_t), + resampledInputByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat); } - if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) { + // send our local loopback to any interested parties + if (spatialAudioEnabled) { + if (resampleNeeded) { + // local audio is sent already resampled to match the output format, so processors + // can easily handle the audio in a format ready to post back to the audio device + emit processLocalAudio(_spatialAudioStart, resampledInputByteArray, _outputFormat); + } else { + emit processLocalAudio(_spatialAudioStart, inputByteArray, _outputFormat); + } + } + + if (localLoopbackEnabled) { // if this person wants local loopback add that to the locally injected audio if (!_loopbackOutputDevice && _loopbackAudioOutput) { @@ -417,23 +443,12 @@ void Audio::handleAudioInput() { _loopbackOutputDevice = _loopbackAudioOutput->start(); } - if (_inputFormat == _outputFormat) { - if (_loopbackOutputDevice) { + if (_loopbackOutputDevice) { + if (resampleNeeded) { + _loopbackOutputDevice->write(resampledInputByteArray); + } else { _loopbackOutputDevice->write(inputByteArray); } - } else { - float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate()) - * (_outputFormat.channelCount() / _inputFormat.channelCount()); - - QByteArray loopBackByteArray(inputByteArray.size() * loopbackOutputToInputRatio, 0); - - linearResampling((int16_t*) inputByteArray.data(), (int16_t*) loopBackByteArray.data(), - inputByteArray.size() / sizeof(int16_t), - loopBackByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat); - - if (_loopbackOutputDevice) { - _loopbackOutputDevice->write(loopBackByteArray); - } } } From a7cbf7f667909c3519fa49cd0de6d04d2bce699d Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 16:26:46 -0700 Subject: [PATCH 52/64] revert some code --- interface/src/Audio.cpp | 39 ++++++++++++++------------------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 96af5f5f3b..0c910ab927 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -404,38 +404,18 @@ void Audio::handleAudioInput() { QByteArray inputByteArray = _inputDevice->readAll(); - // we may need to check for resampling our input audio in a couple cases: - // 1) the local loopback is enabled - // 2) spatial audio is enabled - bool spatialAudioEnabled = (_processSpatialAudio && !_muted && _audioOutput); - bool localLoopbackEnabled = (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput); - bool possiblyResampleInputAudio = spatialAudioEnabled || localLoopbackEnabled; - bool resampleNeeded = (_inputFormat != _outputFormat); - QByteArray resampledInputByteArray; - - if (possiblyResampleInputAudio && resampleNeeded) { - float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate()) - * (_outputFormat.channelCount() / _inputFormat.channelCount()); - - resampledInputByteArray.fill(0, inputByteArray.size() * loopbackOutputToInputRatio); - - linearResampling((int16_t*) inputByteArray.data(), (int16_t*) resampledInputByteArray.data(), - inputByteArray.size() / sizeof(int16_t), - resampledInputByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat); - } - // send our local loopback to any interested parties - if (spatialAudioEnabled) { - if (resampleNeeded) { + if (_processSpatialAudio && !_muted && _audioOutput) { + if (false) { // local audio is sent already resampled to match the output format, so processors // can easily handle the audio in a format ready to post back to the audio device - emit processLocalAudio(_spatialAudioStart, resampledInputByteArray, _outputFormat); + //emit processLocalAudio(_spatialAudioStart, resampledInputByteArray, _outputFormat); } else { emit processLocalAudio(_spatialAudioStart, inputByteArray, _outputFormat); } } - if (localLoopbackEnabled) { + if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) { // if this person wants local loopback add that to the locally injected audio if (!_loopbackOutputDevice && _loopbackAudioOutput) { @@ -444,7 +424,16 @@ void Audio::handleAudioInput() { } if (_loopbackOutputDevice) { - if (resampleNeeded) { + if (_inputFormat != _outputFormat) { + float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate()) + * (_outputFormat.channelCount() / _inputFormat.channelCount()); + + QByteArray resampledInputByteArray(inputByteArray.size() * loopbackOutputToInputRatio, 0); + + linearResampling((int16_t*) inputByteArray.data(), (int16_t*) resampledInputByteArray.data(), + inputByteArray.size() / sizeof(int16_t), + resampledInputByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat); + _loopbackOutputDevice->write(resampledInputByteArray); } else { _loopbackOutputDevice->write(inputByteArray); From 523cc4bdfe0bc28cf4e9c2aaa59b56595c0d683c Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 16:30:47 -0700 Subject: [PATCH 53/64] revert some code --- interface/src/Audio.cpp | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 0c910ab927..daf6a4a7ee 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -423,21 +423,23 @@ void Audio::handleAudioInput() { _loopbackOutputDevice = _loopbackAudioOutput->start(); } - if (_loopbackOutputDevice) { - if (_inputFormat != _outputFormat) { - float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate()) - * (_outputFormat.channelCount() / _inputFormat.channelCount()); - - QByteArray resampledInputByteArray(inputByteArray.size() * loopbackOutputToInputRatio, 0); - - linearResampling((int16_t*) inputByteArray.data(), (int16_t*) resampledInputByteArray.data(), - inputByteArray.size() / sizeof(int16_t), - resampledInputByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat); - - _loopbackOutputDevice->write(resampledInputByteArray); - } else { + if (_inputFormat == _outputFormat) { + if (_loopbackOutputDevice) { _loopbackOutputDevice->write(inputByteArray); } + } else { + float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate()) + * (_outputFormat.channelCount() / _inputFormat.channelCount()); + + QByteArray loopBackByteArray(inputByteArray.size() * loopbackOutputToInputRatio, 0); + + linearResampling((int16_t*) inputByteArray.data(), (int16_t*) loopBackByteArray.data(), + inputByteArray.size() / sizeof(int16_t), + loopBackByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat); + + if (_loopbackOutputDevice) { + _loopbackOutputDevice->write(loopBackByteArray); + } } } From a46d3fd254e30f0095a555ccd1fd09b701002ed7 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 19:57:37 -0700 Subject: [PATCH 54/64] first cut at local audio spatialization working --- interface/src/Audio.cpp | 31 ++++++++++++++++++++----------- interface/src/AudioReflector.cpp | 2 +- interface/src/Menu.cpp | 4 ++++ interface/src/Menu.h | 19 +++++++++++-------- 4 files changed, 36 insertions(+), 20 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index daf6a4a7ee..94e1cd336a 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -404,17 +404,6 @@ void Audio::handleAudioInput() { QByteArray inputByteArray = _inputDevice->readAll(); - // send our local loopback to any interested parties - if (_processSpatialAudio && !_muted && _audioOutput) { - if (false) { - // local audio is sent already resampled to match the output format, so processors - // can easily handle the audio in a format ready to post back to the audio device - //emit processLocalAudio(_spatialAudioStart, resampledInputByteArray, _outputFormat); - } else { - emit processLocalAudio(_spatialAudioStart, inputByteArray, _outputFormat); - } - } - if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) { // if this person wants local loopback add that to the locally injected audio @@ -575,6 +564,26 @@ void Audio::handleAudioInput() { _lastInputLoudness = 0; } + // at this point we have clean monoAudioSamples, which match our target output... this is what we should send + // to our interested listeners + // send our local loopback to any interested parties + if (_processSpatialAudio && !_muted && _audioOutput && + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio))) { + // local audio is sent already resampled to match the network input format, so processors + // can easily handle the audio in a format ready to post back to the audio device + const int NUM_CHANNELS = 2; + QByteArray stereoInputData; + stereoInputData.resize(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * NUM_CHANNELS * sizeof(int16_t)); + int16_t* stereoSamples = (int16_t*)stereoInputData.data(); + const float LOCAL_SIGNAL_ATTENUATION = 0.125f; + for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) { + stereoSamples[i* NUM_CHANNELS] = monoAudioSamples[i] * LOCAL_SIGNAL_ATTENUATION; + stereoSamples[(i * NUM_CHANNELS) + 1] = monoAudioSamples[i] * LOCAL_SIGNAL_ATTENUATION; + } + + emit processLocalAudio(_spatialAudioStart, stereoInputData, _desiredOutputFormat); + } + if (_proceduralAudioOutput) { processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL); } diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index fa4573b1b4..0bb2e7224e 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -224,7 +224,7 @@ void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint, _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); } void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - // nothing yet, but will do local reflections too... + echoAudio(sampleTime, samples, format); } void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index a02c8342a7..af4825fac8 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -423,6 +423,10 @@ Menu::Menu() : Qt::CTRL | Qt::SHIFT | Qt::Key_X, true); + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingProcessLocalAudio, + Qt::CTRL | Qt::SHIFT | Qt::Key_A, + true); + addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, Qt::CTRL | Qt::SHIFT | Qt::Key_V, this, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 5fcbf11724..bc05e8921b 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -237,15 +237,18 @@ namespace MenuOption { const QString Atmosphere = "Atmosphere"; const QString AudioNoiseReduction = "Audio Noise Reduction"; const QString AudioToneInjection = "Inject Test Tone"; + const QString AudioSpatialProcessing = "Audio Spatial Processing"; - const QString AudioSpatialProcessingIncudeOriginal = "Audio Spatial Processing includes Original"; - const QString AudioSpatialProcessingSeparateEars = "Audio Spatial Processing separates ears"; - const QString AudioSpatialProcessingPreDelay = "Audio Spatial Processing add Pre-Delay"; - const QString AudioSpatialProcessingStereoSource = "Audio Spatial Processing Stereo Source"; - const QString AudioSpatialProcessingHeadOriented = "Audio Spatial Processing Head Oriented"; - const QString AudioSpatialProcessingWithDiffusions = "Audio Spatial Processing With Diffusions"; - const QString AudioSpatialProcessingRenderPaths = "Audio Spatial Processing Render Paths"; - const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Audio Spatial Processing Slightly Random Surfaces"; + const QString AudioSpatialProcessingHeadOriented = "Head Oriented"; + const QString AudioSpatialProcessingIncudeOriginal = "Includes Network Original"; + const QString AudioSpatialProcessingPreDelay = "Add Pre-Delay"; + const QString AudioSpatialProcessingProcessLocalAudio = "Process Local Audio"; + const QString AudioSpatialProcessingRenderPaths = "Render Paths"; + const QString AudioSpatialProcessingSeparateEars = "Separate Ears"; + const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces"; + const QString AudioSpatialProcessingStereoSource = "Stereo Source"; + const QString AudioSpatialProcessingWithDiffusions = "With Diffusions"; + const QString Avatars = "Avatars"; const QString Bandwidth = "Bandwidth Display"; const QString BandwidthDetails = "Bandwidth Details"; From 82d9c50dc5250016b14997e6ccf2459db028d28e Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 20:11:53 -0700 Subject: [PATCH 55/64] moved all local processing to AudioReflector --- interface/src/Audio.cpp | 16 +++------------- interface/src/AudioReflector.cpp | 22 +++++++++++++++++++++- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 94e1cd336a..551acaad84 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -567,21 +567,11 @@ void Audio::handleAudioInput() { // at this point we have clean monoAudioSamples, which match our target output... this is what we should send // to our interested listeners // send our local loopback to any interested parties - if (_processSpatialAudio && !_muted && _audioOutput && - (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio))) { + if (_processSpatialAudio && !_muted && _audioOutput) { // local audio is sent already resampled to match the network input format, so processors // can easily handle the audio in a format ready to post back to the audio device - const int NUM_CHANNELS = 2; - QByteArray stereoInputData; - stereoInputData.resize(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * NUM_CHANNELS * sizeof(int16_t)); - int16_t* stereoSamples = (int16_t*)stereoInputData.data(); - const float LOCAL_SIGNAL_ATTENUATION = 0.125f; - for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) { - stereoSamples[i* NUM_CHANNELS] = monoAudioSamples[i] * LOCAL_SIGNAL_ATTENUATION; - stereoSamples[(i * NUM_CHANNELS) + 1] = monoAudioSamples[i] * LOCAL_SIGNAL_ATTENUATION; - } - - emit processLocalAudio(_spatialAudioStart, stereoInputData, _desiredOutputFormat); + QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t)); + emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat); } if (_proceduralAudioOutput) { diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 0bb2e7224e..0450a66e7b 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -223,8 +223,28 @@ void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint, _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); } + void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - echoAudio(sampleTime, samples, format); + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) { + const int NUM_CHANNELS_INPUT = 1; + const int NUM_CHANNELS_OUTPUT = 2; + const int EXPECTED_SAMPLE_RATE = 24000; + if (format.channelCount() == NUM_CHANNELS_INPUT && format.sampleRate() == EXPECTED_SAMPLE_RATE) { + QAudioFormat outputFormat = format; + outputFormat.setChannelCount(NUM_CHANNELS_OUTPUT); + QByteArray stereoInputData; + stereoInputData.resize(samples.size() * NUM_CHANNELS_OUTPUT); + int numberOfSamples = samples.size() / sizeof(int16_t); + int16_t* monoSamples = (int16_t*)samples.data(); + int16_t* stereoSamples = (int16_t*)stereoInputData.data(); + const float LOCAL_SIGNAL_ATTENUATION = 0.125f; + for (int i = 0; i < numberOfSamples; i++) { + stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * LOCAL_SIGNAL_ATTENUATION; + stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * LOCAL_SIGNAL_ATTENUATION; + } + echoAudio(sampleTime, stereoInputData, outputFormat); + } + } } void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { From 040df97be27cf5b0b2635ae2f83d500294cfd1b2 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 20:23:47 -0700 Subject: [PATCH 56/64] more cleanup --- interface/src/Audio.cpp | 5 ----- interface/src/Audio.h | 14 ++++++-------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 551acaad84..d6deb66e2a 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -665,11 +665,6 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { _lastReceiveTime = currentReceiveTime; } -unsigned int Audio::timeValToSampleTick(const quint64 time, int sampleRate) { - unsigned int sample = (unsigned int)(time / 1000000 * sampleRate); - return sample; -} - void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) { // Calculate the number of remaining samples available. The source spatial audio buffer will get // clipped if there are insufficient samples available in the accumulation buffer. diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 99d9bd4884..f2fed15f65 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -167,16 +167,14 @@ private: GLuint _boxTextureId; QRect _iconBounds; - // Audio callback in class context. + /// Audio callback in class context. inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight); - // Process received audio by spatial attenuation geometric response - bool _processSpatialAudio; - unsigned int _spatialAudioStart; /// Start of spatial audio interval (in sample rate time base) - unsigned int _spatialAudioFinish; /// End of spatial audio interval (in sample rate time base) - AudioRingBuffer _spatialAudioRingBuffer; /// Spatially processed audio - - unsigned int timeValToSampleTick(const quint64 time, int sampleRate); + + bool _processSpatialAudio; /// Process received audio by spatial audio hooks + unsigned int _spatialAudioStart; /// Start of spatial audio interval (in sample rate time base) + unsigned int _spatialAudioFinish; /// End of spatial audio interval (in sample rate time base) + AudioRingBuffer _spatialAudioRingBuffer; /// Spatially processed audio // Process procedural audio by // 1. Echo to the local procedural output device From 67de87d1141c204a679dd246c13164832323b1a6 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 21:31:35 -0700 Subject: [PATCH 57/64] added slider for local audio attenuation --- examples/audioReflectorTools.js | 69 ++++++++++++++++++++++++++++++++ interface/src/AudioReflector.cpp | 8 ++-- interface/src/AudioReflector.h | 6 +++ 3 files changed, 80 insertions(+), 3 deletions(-) diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js index acab563d4c..532cae84bc 100644 --- a/examples/audioReflectorTools.js +++ b/examples/audioReflectorTools.js @@ -14,6 +14,7 @@ var delayScale = 100.0; var fanoutScale = 10.0; var speedScale = 20; var factorScale = 5.0; +var localFactorScale = 1.0; var reflectiveScale = 100.0; var diffusionScale = 100.0; var absorptionScale = 100.0; @@ -223,6 +224,45 @@ var factorThumb = Overlays.addOverlay("image", { alpha: 1 }); +var localFactorY = topY; +topY += sliderHeight; + +var localFactorLabel = Overlays.addOverlay("text", { + x: 40, + y: localFactorY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Local\nFactor:" + }); + + +var localFactorSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: localFactorY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var localFactorMinThumbX = 110; +var localFactorMaxThumbX = localFactorMinThumbX + 110; +var localFactorThumbX = localFactorMinThumbX + ((localFactorMaxThumbX - localFactorMinThumbX) * (AudioReflector.getLocalAudioAttenuationFactor() / localFactorScale)); +var localFactorThumb = Overlays.addOverlay("image", { + x: localFactorThumbX, + y: localFactorY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 0, green: 128, blue: 128}, + alpha: 1 + }); + var reflectiveY = topY; topY += sliderHeight; @@ -347,6 +387,10 @@ function scriptEnding() { Overlays.deleteOverlay(factorThumb); Overlays.deleteOverlay(factorSlider); + Overlays.deleteOverlay(localFactorLabel); + Overlays.deleteOverlay(localFactorThumb); + Overlays.deleteOverlay(localFactorSlider); + Overlays.deleteOverlay(speedLabel); Overlays.deleteOverlay(speedThumb); Overlays.deleteOverlay(speedSlider); @@ -389,6 +433,7 @@ var movingSliderDelay = false; var movingSliderFanout = false; var movingSliderSpeed = false; var movingSliderFactor = false; +var movingSliderLocalFactor = false; var movingSliderReflective = false; var movingSliderDiffusion = false; var movingSliderAbsorption = false; @@ -444,6 +489,19 @@ function mouseMoveEvent(event) { AudioReflector.setDistanceAttenuationScalingFactor(factor); } + if (movingSliderLocalFactor) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < localFactorMinThumbX) { + newThumbX = localFactorMminThumbX; + } + if (newThumbX > localFactorMaxThumbX) { + newThumbX = localFactorMaxThumbX; + } + Overlays.editOverlay(localFactorThumb, { x: newThumbX } ); + var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale; + AudioReflector.setLocalAudioAttenuationFactor(localFactor); + } + if (movingSliderAbsorption) { newThumbX = event.x - thumbClickOffsetX; if (newThumbX < absorptionMinThumbX) { @@ -504,6 +562,10 @@ function mousePressEvent(event) { movingSliderFactor = true; thumbClickOffsetX = event.x - factorThumbX; } + if (clickedOverlay == localFactorThumb) { + movingSliderLocalFactor = true; + thumbClickOffsetX = event.x - localFactorThumbX; + } if (clickedOverlay == diffusionThumb) { movingSliderDiffusion = true; thumbClickOffsetX = event.x - diffusionThumbX; @@ -543,6 +605,13 @@ function mouseReleaseEvent(event) { factorThumbX = newThumbX; } + if (movingSliderLocalFactor) { + movingSliderLocalFactor = false; + var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale; + AudioReflector.setLocalAudioAttenuationFactor(localFactor); + localFactorThumbX = newThumbX; + } + if (movingSliderReflective) { movingSliderReflective = false; var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale; diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 0450a66e7b..0640ed038e 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -18,6 +18,7 @@ const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f; const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is this long const int DEFAULT_DIFFUSION_FANOUT = 5; const int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10; +const float DEFAULT_LOCAL_ATTENUATION_FACTOR = 0.125; const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point @@ -29,6 +30,7 @@ AudioReflector::AudioReflector(QObject* parent) : _preDelay(DEFAULT_PRE_DELAY), _soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), _distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), + _localAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR), _diffusionFanout(DEFAULT_DIFFUSION_FANOUT), _absorptionRatio(DEFAULT_ABSORPTION_RATIO), _diffusionRatio(DEFAULT_DIFFUSION_RATIO), @@ -36,6 +38,7 @@ AudioReflector::AudioReflector(QObject* parent) : _lastPreDelay(DEFAULT_PRE_DELAY), _lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), _lastDistanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), + _lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR), _lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT), _lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO), _lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO) @@ -237,10 +240,9 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray int numberOfSamples = samples.size() / sizeof(int16_t); int16_t* monoSamples = (int16_t*)samples.data(); int16_t* stereoSamples = (int16_t*)stereoInputData.data(); - const float LOCAL_SIGNAL_ATTENUATION = 0.125f; for (int i = 0; i < numberOfSamples; i++) { - stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * LOCAL_SIGNAL_ATTENUATION; - stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * LOCAL_SIGNAL_ATTENUATION; + stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor; + stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor; } echoAudio(sampleTime, stereoInputData, outputFormat); } diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 7867df3d9b..79fcdda68d 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -93,6 +93,10 @@ public slots: float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; } void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; } + /// scales attenuation of local audio to be louder or softer than the default attenuation + float getLocalAudioAttenuationFactor() const { return _localAudioAttenuationFactor; } + void setLocalAudioAttenuationFactor(float factor) { _localAudioAttenuationFactor = factor; } + /// number of points of diffusion from each reflection point, as fanout increases there are more chances for secondary /// echoes, but each diffusion ray is quieter and therefore more likely to be below the sound floor int getDiffusionFanout() const { return _diffusionFanout; } @@ -171,6 +175,7 @@ private: float _preDelay; float _soundMsPerMeter; float _distanceAttenuationScalingFactor; + float _localAudioAttenuationFactor; int _diffusionFanout; // number of points of diffusion from each reflection point @@ -186,6 +191,7 @@ private: float _lastPreDelay; float _lastSoundMsPerMeter; float _lastDistanceAttenuationScalingFactor; + float _lastLocalAudioAttenuationFactor; int _lastDiffusionFanout; float _lastAbsorptionRatio; float _lastDiffusionRatio; From ba361b0f62bb0834638efe7fdb88d32a8fb903fa Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Tue, 15 Apr 2014 21:50:44 -0700 Subject: [PATCH 58/64] add local audio stats --- interface/src/ui/Stats.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index 2b70b3538c..98955b34f5 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -343,7 +343,7 @@ void Stats::display( lines = _expanded ? 12 : 3; if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { - lines += 6; // spatial audio processing adds 1 spacing line and 5 extra lines of info + lines += 7; // spatial audio processing adds 1 spacing line and 6 extra lines of info } drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10); @@ -550,6 +550,14 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + sprintf(reflectionsStatus, "Local Audio: %s Attenuation: %5.3f", + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio) + ? "yes" : "no"), + audioReflector->getLocalAudioAttenuationFactor()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + bool diffusionEnabled = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); int fanout = diffusionEnabled ? audioReflector->getDiffusionFanout() : 0; int diffusionPaths = diffusionEnabled ? audioReflector->getDiffusionPathCount() : 0; From b00a67cb5705e00cd60c45ad0a0542a7c8703b79 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 16 Apr 2014 13:55:39 -0700 Subject: [PATCH 59/64] added support for local audio spatialization independent of inbound audio --- interface/src/Audio.cpp | 2 + interface/src/AudioReflector.cpp | 186 ++++++++++++++++++++----------- interface/src/AudioReflector.h | 22 +++- 3 files changed, 139 insertions(+), 71 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index d6deb66e2a..06cb1fe461 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -665,6 +665,8 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { _lastReceiveTime = currentReceiveTime; } +// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo +// data we know that we will have 2x samples for each stereo time sample at the format's sample rate void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) { // Calculate the number of remaining samples available. The source spatial audio buffer will get // clipped if there are insufficient samples available in the accumulation buffer. diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 0640ed038e..847f1dc0bf 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -203,7 +203,7 @@ void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint, _maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation); _minAttenuation = std::min(_minAttenuation,rightEarAttenuation); _minAttenuation = std::min(_minAttenuation,leftEarAttenuation); - + // run through the samples, and attenuate them for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) { int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; @@ -235,25 +235,25 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray if (format.channelCount() == NUM_CHANNELS_INPUT && format.sampleRate() == EXPECTED_SAMPLE_RATE) { QAudioFormat outputFormat = format; outputFormat.setChannelCount(NUM_CHANNELS_OUTPUT); - QByteArray stereoInputData; - stereoInputData.resize(samples.size() * NUM_CHANNELS_OUTPUT); - int numberOfSamples = samples.size() / sizeof(int16_t); + QByteArray stereoInputData(samples.size() * NUM_CHANNELS_OUTPUT, 0); + int numberOfSamples = (samples.size() / sizeof(int16_t)); int16_t* monoSamples = (int16_t*)samples.data(); int16_t* stereoSamples = (int16_t*)stereoInputData.data(); + for (int i = 0; i < numberOfSamples; i++) { stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor; stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor; } - echoAudio(sampleTime, stereoInputData, outputFormat); + echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat); } } } void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { - echoAudio(sampleTime, samples, format); + echoAudio(INBOUND_AUDIO, sampleTime, samples, format); } -void AudioReflector::echoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { +void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { _maxDelay = 0; _maxAttenuation = 0.0f; _minDelay = std::numeric_limits::max(); @@ -265,7 +265,10 @@ void AudioReflector::echoAudio(unsigned int sampleTime, const QByteArray& sample QMutexLocker locker(&_mutex); - foreach(const AudiblePoint& audiblePoint, _audiblePoints) { + // depending on if we're processing local or external audio, pick the correct points vector + QVector& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints; + + foreach(const AudiblePoint& audiblePoint, audiblePoints) { injectAudiblePoint(audiblePoint, samples, sampleTime, format.sampleRate()); } @@ -294,9 +297,10 @@ void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, co -AudioPath::AudioPath(const glm::vec3& origin, const glm::vec3& direction, +AudioPath::AudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& direction, float attenuation, float delay, float distance,bool isDiffusion, int bounceCount) : + source(source), isDiffusion(isDiffusion), startPoint(origin), startDirection(direction), @@ -315,12 +319,15 @@ AudioPath::AudioPath(const glm::vec3& origin, const glm::vec3& direction, { } -void AudioReflector::addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, +void AudioReflector::addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, float initialDelay, float initialDistance, bool isDiffusion) { - AudioPath* path = new AudioPath(origin, initialDirection, initialAttenuation, initialDelay, + AudioPath* path = new AudioPath(source, origin, initialDirection, initialAttenuation, initialDelay, initialDistance, isDiffusion, 0); - _audioPaths.push_back(path); + + QVector& audioPaths = source == INBOUND_AUDIO ? _inboundAudioPaths : _localAudioPaths; + + audioPaths.push_back(path); } void AudioReflector::calculateAllReflections() { @@ -355,12 +362,15 @@ void AudioReflector::calculateAllReflections() { void AudioReflector::drawRays() { const glm::vec3 RED(1,0,0); const glm::vec3 GREEN(0,1,0); + const glm::vec3 BLUE(0,0,1); + const glm::vec3 CYAN(0,1,1); int diffusionNumber = 0; QMutexLocker locker(&_mutex); - foreach(AudioPath* const& path, _audioPaths) { - + + // draw the paths for inbound audio + foreach(AudioPath* const& path, _inboundAudioPaths) { // if this is an original reflection, draw it in RED if (path->isDiffusion) { diffusionNumber++; @@ -369,6 +379,19 @@ void AudioReflector::drawRays() { drawPath(path, RED); } } + + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) { + // draw the paths for local audio + foreach(AudioPath* const& path, _localAudioPaths) { + // if this is an original reflection, draw it in RED + if (path->isDiffusion) { + diffusionNumber++; + drawPath(path, CYAN); + } else { + drawPath(path, BLUE); + } + } + } } void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) { @@ -383,6 +406,21 @@ void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) { } } +void AudioReflector::clearPaths() { + // clear our inbound audio paths + foreach(AudioPath* const& path, _inboundAudioPaths) { + delete path; + } + _inboundAudioPaths.clear(); + _inboundAudiblePoints.clear(); // clear our inbound audible points + + // clear our local audio paths + foreach(AudioPath* const& path, _localAudioPaths) { + delete path; + } + _localAudioPaths.clear(); + _localAudiblePoints.clear(); // clear our local audible points +} // Here's how this works: we have an array of AudioPaths, we loop on all of our currently calculating audio // paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it @@ -392,12 +430,7 @@ void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) { // fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation // of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop. void AudioReflector::analyzePaths() { - // clear our _audioPaths - foreach(AudioPath* const& path, _audioPaths) { - delete path; - } - _audioPaths.clear(); - _audiblePoints.clear(); // clear our audible points + clearPaths(); // add our initial paths glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); @@ -422,36 +455,49 @@ void AudioReflector::analyzePaths() { // NOTE: we're still calculating our initial paths based on the listeners position. But the analysis code has been // updated to support individual sound sources (which is how we support diffusion), we can use this new paradigm to // add support for individual sound sources, and more directional sound sources - addSoundSource(_origin, right, initialAttenuation, preDelay); - addSoundSource(_origin, front, initialAttenuation, preDelay); - addSoundSource(_origin, up, initialAttenuation, preDelay); - addSoundSource(_origin, down, initialAttenuation, preDelay); - addSoundSource(_origin, back, initialAttenuation, preDelay); - addSoundSource(_origin, left, initialAttenuation, preDelay); - addSoundSource(_origin, frontRightUp, initialAttenuation, preDelay); - addSoundSource(_origin, frontLeftUp, initialAttenuation, preDelay); - addSoundSource(_origin, backRightUp, initialAttenuation, preDelay); - addSoundSource(_origin, backLeftUp, initialAttenuation, preDelay); - addSoundSource(_origin, frontRightDown, initialAttenuation, preDelay); - addSoundSource(_origin, frontLeftDown, initialAttenuation, preDelay); - addSoundSource(_origin, backRightDown, initialAttenuation, preDelay); - addSoundSource(_origin, backLeftDown, initialAttenuation, preDelay); + + addAudioPath(INBOUND_AUDIO, _origin, front, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, right, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, up, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, down, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, back, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, left, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, backRightUp, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, backLeftUp, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, backRightDown, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, backLeftDown, initialAttenuation, preDelay); + + // the original paths for the local audio are directional to the front of the origin + addAudioPath(LOCAL_AUDIO, _origin, front, initialAttenuation, preDelay); + addAudioPath(LOCAL_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay); + addAudioPath(LOCAL_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay); + addAudioPath(LOCAL_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay); + addAudioPath(LOCAL_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay); // loop through all our audio paths and keep analyzing them until they complete int steps = 0; - int acitvePaths = _audioPaths.size(); // when we start, all paths are active + int acitvePaths = _inboundAudioPaths.size() + _localAudioPaths.size(); // when we start, all paths are active while(acitvePaths > 0) { acitvePaths = analyzePathsSingleStep(); steps++; } - _reflections = _audiblePoints.size(); + _reflections = _inboundAudiblePoints.size() + _localAudiblePoints.size(); _diffusionPathCount = countDiffusionPaths(); } int AudioReflector::countDiffusionPaths() { int diffusionCount = 0; - foreach(AudioPath* const& path, _audioPaths) { + foreach(AudioPath* const& path, _inboundAudioPaths) { + if (path->isDiffusion) { + diffusionCount++; + } + } + foreach(AudioPath* const& path, _localAudioPaths) { if (path->isDiffusion) { diffusionCount++; } @@ -462,36 +508,44 @@ int AudioReflector::countDiffusionPaths() { int AudioReflector::analyzePathsSingleStep() { // iterate all the active sound paths, calculate one step per active path int activePaths = 0; - foreach(AudioPath* const& path, _audioPaths) { + + QVector* pathsLists[] = { &_inboundAudioPaths, &_localAudioPaths }; + + for(int i = 0; i < sizeof(pathsLists) / sizeof(pathsLists[0]); i ++) { + + QVector& pathList = *pathsLists[i]; + + foreach(AudioPath* const& path, pathList) { - glm::vec3 start = path->lastPoint; - glm::vec3 direction = path->lastDirection; - OctreeElement* elementHit; // output from findRayIntersection - float distance; // output from findRayIntersection - BoxFace face; // output from findRayIntersection + glm::vec3 start = path->lastPoint; + glm::vec3 direction = path->lastDirection; + OctreeElement* elementHit; // output from findRayIntersection + float distance; // output from findRayIntersection + BoxFace face; // output from findRayIntersection - if (!path->finalized) { - activePaths++; + if (!path->finalized) { + activePaths++; - if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { - path->finalized = true; - } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { - // TODO: we need to decide how we want to handle locking on the ray intersection, if we force lock, - // we get an accurate picture, but it could prevent rendering of the voxels. If we trylock (default), - // we might not get ray intersections where they may exist, but we can't really detect that case... - // add last parameter of Octree::Lock to force locking - handlePathPoint(path, distance, elementHit, face); + if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { + path->finalized = true; + } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + // TODO: we need to decide how we want to handle locking on the ray intersection, if we force lock, + // we get an accurate picture, but it could prevent rendering of the voxels. If we trylock (default), + // we might not get ray intersections where they may exist, but we can't really detect that case... + // add last parameter of Octree::Lock to force locking + handlePathPoint(path, distance, elementHit, face); - } else { - // If we didn't intersect, but this was a diffusion ray, then we will go ahead and cast a short ray out - // from our last known point, in the last known direction, and leave that sound source hanging there - if (path->isDiffusion) { - const float MINIMUM_RANDOM_DISTANCE = 0.25f; - const float MAXIMUM_RANDOM_DISTANCE = 0.5f; - float distance = randFloatInRange(MINIMUM_RANDOM_DISTANCE, MAXIMUM_RANDOM_DISTANCE); - handlePathPoint(path, distance, NULL, UNKNOWN_FACE); } else { - path->finalized = true; // if it doesn't intersect, then it is finished + // If we didn't intersect, but this was a diffusion ray, then we will go ahead and cast a short ray out + // from our last known point, in the last known direction, and leave that sound source hanging there + if (path->isDiffusion) { + const float MINIMUM_RANDOM_DISTANCE = 0.25f; + const float MAXIMUM_RANDOM_DISTANCE = 0.5f; + float distance = randFloatInRange(MINIMUM_RANDOM_DISTANCE, MAXIMUM_RANDOM_DISTANCE); + handlePathPoint(path, distance, NULL, UNKNOWN_FACE); + } else { + path->finalized = true; // if it doesn't intersect, then it is finished + } } } } @@ -571,8 +625,8 @@ void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElem diffusion = glm::normalize(diffusion); - // add sound sources for these diffusions - addSoundSource(end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance, true); + // add new audio path for these diffusions, the new path's source is the same as the original source + addAudioPath(path->source, end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance, true); } } else { const bool wantDebugging = false; @@ -603,7 +657,9 @@ void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElem // audio so that it can be adjusted to ear position AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance}; - _audiblePoints.push_back(point); + QVector& audiblePoints = path->source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints; + + audiblePoints.push_back(point); // add this location to the path points, so we can visualize it path->reflections.push_back(end); diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 79fcdda68d..900f567920 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -16,11 +16,18 @@ #include "Audio.h" #include "avatar/MyAvatar.h" +enum AudioSource { + LOCAL_AUDIO, + INBOUND_AUDIO +}; + class AudioPath { public: - AudioPath(const glm::vec3& origin = glm::vec3(0), const glm::vec3& direction = glm::vec3(0), float attenuation = 1.0f, + AudioPath(AudioSource source = INBOUND_AUDIO, const glm::vec3& origin = glm::vec3(0), + const glm::vec3& direction = glm::vec3(0), float attenuation = 1.0f, float delay = 0.0f, float distance = 0.0f, bool isDiffusion = false, int bounceCount = 0); + AudioSource source; bool isDiffusion; glm::vec3 startPoint; glm::vec3 startDirection; @@ -53,7 +60,6 @@ public: float diffusionRatio; }; - class AudioReflector : public QObject { Q_OBJECT public: @@ -145,17 +151,21 @@ private: glm::vec3 _origin; glm::quat _orientation; - QVector _audioPaths; /// the various audio paths we're processing - QVector _audiblePoints; /// the audible points that have been calculated from the paths + QVector _inboundAudioPaths; /// audio paths we're processing for inbound audio + QVector _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths + + QVector _localAudioPaths; /// audio paths we're processing for local audio + QVector _localAudiblePoints; /// the audible points that have been calculated from the local audio paths // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, // as well as diffusion sound sources - void addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, + void addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, float initialDelay, float initialDistance = 0.0f, bool isDiffusion = false); // helper that handles audioPath analysis int analyzePathsSingleStep(); void handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face); + void clearPaths(); void analyzePaths(); void drawRays(); void drawPath(AudioPath* path, const glm::vec3& originalColor); @@ -164,7 +174,7 @@ private: glm::vec3 getFaceNormal(BoxFace face); void injectAudiblePoint(const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); - void echoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); // return the surface characteristics of the element we hit SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit = NULL); From 2e0c5fc81be6ef6326d227c42fc7cfa1f64c1de5 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 16 Apr 2014 15:13:20 -0700 Subject: [PATCH 60/64] implement support for comb filter suppression window --- examples/audioReflectorTools.js | 68 ++++++++++++++++++ interface/src/AudioReflector.cpp | 120 ++++++++++++++++++++----------- interface/src/AudioReflector.h | 15 +++- interface/src/ui/Stats.cpp | 10 ++- 4 files changed, 167 insertions(+), 46 deletions(-) diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js index 532cae84bc..3cc6a1a21e 100644 --- a/examples/audioReflectorTools.js +++ b/examples/audioReflectorTools.js @@ -18,6 +18,7 @@ var localFactorScale = 1.0; var reflectiveScale = 100.0; var diffusionScale = 100.0; var absorptionScale = 100.0; +var combFilterScale = 50.0; // these three properties are bound together, if you change one, the others will also change var reflectiveRatio = AudioReflector.getReflectiveRatio(); @@ -263,6 +264,46 @@ var localFactorThumb = Overlays.addOverlay("image", { alpha: 1 }); +var combFilterY = topY; +topY += sliderHeight; + +var combFilterLabel = Overlays.addOverlay("text", { + x: 40, + y: combFilterY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Comb Filter\nWindow:" + }); + + +var combFilterSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: combFilterY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var combFilterMinThumbX = 110; +var combFilterMaxThumbX = combFilterMinThumbX + 110; +var combFilterThumbX = combFilterMinThumbX + ((combFilterMaxThumbX - combFilterMinThumbX) * (AudioReflector.getCombFilterWindow() / combFilterScale)); +var combFilterThumb = Overlays.addOverlay("image", { + x: combFilterThumbX, + y: combFilterY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 128, green: 128, blue: 0}, + alpha: 1 + }); + + var reflectiveY = topY; topY += sliderHeight; @@ -387,6 +428,10 @@ function scriptEnding() { Overlays.deleteOverlay(factorThumb); Overlays.deleteOverlay(factorSlider); + Overlays.deleteOverlay(combFilterLabel); + Overlays.deleteOverlay(combFilterThumb); + Overlays.deleteOverlay(combFilterSlider); + Overlays.deleteOverlay(localFactorLabel); Overlays.deleteOverlay(localFactorThumb); Overlays.deleteOverlay(localFactorSlider); @@ -433,6 +478,7 @@ var movingSliderDelay = false; var movingSliderFanout = false; var movingSliderSpeed = false; var movingSliderFactor = false; +var movingSliderCombFilter = false; var movingSliderLocalFactor = false; var movingSliderReflective = false; var movingSliderDiffusion = false; @@ -488,6 +534,18 @@ function mouseMoveEvent(event) { var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale; AudioReflector.setDistanceAttenuationScalingFactor(factor); } + if (movingSliderCombFilter) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < combFilterMinThumbX) { + newThumbX = combFilterMminThumbX; + } + if (newThumbX > combFilterMaxThumbX) { + newThumbX = combFilterMaxThumbX; + } + Overlays.editOverlay(combFilterThumb, { x: newThumbX } ); + var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale; + AudioReflector.setCombFilterWindow(combFilter); + } if (movingSliderLocalFactor) { newThumbX = event.x - thumbClickOffsetX; @@ -566,6 +624,10 @@ function mousePressEvent(event) { movingSliderLocalFactor = true; thumbClickOffsetX = event.x - localFactorThumbX; } + if (clickedOverlay == combFilterThumb) { + movingSliderCombFilter = true; + thumbClickOffsetX = event.x - combFilterThumbX; + } if (clickedOverlay == diffusionThumb) { movingSliderDiffusion = true; thumbClickOffsetX = event.x - diffusionThumbX; @@ -604,6 +666,12 @@ function mouseReleaseEvent(event) { AudioReflector.setDistanceAttenuationScalingFactor(factor); factorThumbX = newThumbX; } + if (movingSliderCombFilter) { + movingSliderCombFilter = false; + var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale; + AudioReflector.setCombFilterWindow(combFilter); + combFilterThumbX = newThumbX; + } if (movingSliderLocalFactor) { movingSliderLocalFactor = false; diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 847f1dc0bf..6cfc23da97 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -19,6 +19,7 @@ const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is const int DEFAULT_DIFFUSION_FANOUT = 5; const int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10; const float DEFAULT_LOCAL_ATTENUATION_FACTOR = 0.125; +const float DEFAULT_COMB_FILTER_WINDOW = 0.05f; //ms delay differential to avoid const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point @@ -31,6 +32,7 @@ AudioReflector::AudioReflector(QObject* parent) : _soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), _distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), _localAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR), + _combFilterWindow(DEFAULT_COMB_FILTER_WINDOW), _diffusionFanout(DEFAULT_DIFFUSION_FANOUT), _absorptionRatio(DEFAULT_ABSORPTION_RATIO), _diffusionRatio(DEFAULT_DIFFUSION_RATIO), @@ -152,7 +154,7 @@ glm::vec3 AudioReflector::getFaceNormal(BoxFace face) { // set up our buffers for our attenuated and delayed samples const int NUMBER_OF_CHANNELS = 2; -void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint, +void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate) { bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars); @@ -180,51 +182,77 @@ void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint, float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay; float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay; - - _totalDelay += rightEarDelayMsecs + leftEarDelayMsecs; - _delayCount += 2; - _maxDelay = std::max(_maxDelay,rightEarDelayMsecs); - _maxDelay = std::max(_maxDelay,leftEarDelayMsecs); - _minDelay = std::min(_minDelay,rightEarDelayMsecs); - _minDelay = std::min(_minDelay,leftEarDelayMsecs); - - int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; - int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; - - float rightEarAttenuation = audiblePoint.attenuation * - getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance); - - float leftEarAttenuation = audiblePoint.attenuation * - getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance); - - _totalAttenuation += rightEarAttenuation + leftEarAttenuation; - _attenuationCount += 2; - _maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation); - _maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation); - _minAttenuation = std::min(_minAttenuation,rightEarAttenuation); - _minAttenuation = std::min(_minAttenuation,leftEarAttenuation); + float averageEarDelayMsecs = (leftEarDelayMsecs + rightEarDelayMsecs) / 2.0f; - // run through the samples, and attenuate them - for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) { - int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; - int16_t rightSample = leftSample; - if (wantStereo) { - rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; + bool safeToInject = true; // assume the best + + QMap& knownDelays = (source == INBOUND_AUDIO) ? _inboundAudioDelays : _localAudioDelays; + + // check to see if the known delays is too close + QMap::const_iterator lowerBound = knownDelays.lowerBound(averageEarDelayMsecs - _combFilterWindow); + if (lowerBound != knownDelays.end()) { + float closestFound = lowerBound.value(); + float deltaToClosest = (averageEarDelayMsecs - closestFound); + //qDebug() << "knownDelays=" << knownDelays; + //qDebug() << "averageEarDelayMsecs=" << averageEarDelayMsecs << " closestFound=" << closestFound; + //qDebug() << "deltaToClosest=" << deltaToClosest; + if (deltaToClosest > -_combFilterWindow && deltaToClosest < _combFilterWindow) { + //qDebug() << "**** WE THINK WE'RE TOO CLOSE!! ****"; + safeToInject = false; } - - attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; - attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; - - attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; - attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; } - - // now inject the attenuated array with the appropriate delay - unsigned int sampleTimeLeft = sampleTime + leftEarDelay; - unsigned int sampleTimeRight = sampleTime + rightEarDelay; - _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); - _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); + if (!safeToInject) { + QVector& suppressedEchoes = (source == INBOUND_AUDIO) ? _inboundEchoesSuppressed : _localEchoesSuppressed; + suppressedEchoes << averageEarDelayMsecs; + } else { + knownDelays[averageEarDelayMsecs] = averageEarDelayMsecs; + + _totalDelay += rightEarDelayMsecs + leftEarDelayMsecs; + _delayCount += 2; + _maxDelay = std::max(_maxDelay,rightEarDelayMsecs); + _maxDelay = std::max(_maxDelay,leftEarDelayMsecs); + _minDelay = std::min(_minDelay,rightEarDelayMsecs); + _minDelay = std::min(_minDelay,leftEarDelayMsecs); + + int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + + float rightEarAttenuation = audiblePoint.attenuation * + getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance); + + float leftEarAttenuation = audiblePoint.attenuation * + getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance); + + _totalAttenuation += rightEarAttenuation + leftEarAttenuation; + _attenuationCount += 2; + _maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation); + _maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation); + _minAttenuation = std::min(_minAttenuation,rightEarAttenuation); + _minAttenuation = std::min(_minAttenuation,leftEarAttenuation); + + // run through the samples, and attenuate them + for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) { + int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; + int16_t rightSample = leftSample; + if (wantStereo) { + rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; + } + + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; + + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; + } + + // now inject the attenuated array with the appropriate delay + unsigned int sampleTimeLeft = sampleTime + leftEarDelay; + unsigned int sampleTimeRight = sampleTime + rightEarDelay; + + _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); + _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); + } } void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { @@ -244,13 +272,19 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor; stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor; } + _localAudioDelays.clear(); + _localEchoesSuppressed.clear(); echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat); + //qDebug() << _localAudioDelays; } } } void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + _inboundAudioDelays.clear(); + _inboundEchoesSuppressed.clear(); echoAudio(INBOUND_AUDIO, sampleTime, samples, format); + //qDebug() << _inboundAudioDelays; } void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { @@ -269,7 +303,7 @@ void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, cons QVector& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints; foreach(const AudiblePoint& audiblePoint, audiblePoints) { - injectAudiblePoint(audiblePoint, samples, sampleTime, format.sampleRate()); + injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate()); } _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 900f567920..9e697b4918 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -86,6 +86,8 @@ public slots: float getMinAttenuation() const { return _minAttenuation; } float getDelayFromDistance(float distance); int getDiffusionPathCount() const { return _diffusionPathCount; } + int getEchoesInjected() const { return _inboundAudioDelays.size() + _localAudioDelays.size(); } + int getEchoesSuppressed() const { return _inboundEchoesSuppressed.size() + _localEchoesSuppressed.size(); } /// ms of delay added to all echos float getPreDelay() const { return _preDelay; } @@ -103,6 +105,10 @@ public slots: float getLocalAudioAttenuationFactor() const { return _localAudioAttenuationFactor; } void setLocalAudioAttenuationFactor(float factor) { _localAudioAttenuationFactor = factor; } + /// ms window in which we will suppress echoes to reduce comb filter effects + float getCombFilterWindow() const { return _combFilterWindow; } + void setCombFilterWindow(float value) { _combFilterWindow = value; } + /// number of points of diffusion from each reflection point, as fanout increases there are more chances for secondary /// echoes, but each diffusion ray is quieter and therefore more likely to be below the sound floor int getDiffusionFanout() const { return _diffusionFanout; } @@ -153,10 +159,14 @@ private: QVector _inboundAudioPaths; /// audio paths we're processing for inbound audio QVector _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths + QMap _inboundAudioDelays; /// delay times for currently injected audio points + QVector _inboundEchoesSuppressed; /// delay times for currently injected audio points QVector _localAudioPaths; /// audio paths we're processing for local audio QVector _localAudiblePoints; /// the audible points that have been calculated from the local audio paths - + QMap _localAudioDelays; /// delay times for currently injected audio points + QVector _localEchoesSuppressed; /// delay times for currently injected audio points + // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, // as well as diffusion sound sources void addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, @@ -173,7 +183,7 @@ private: int countDiffusionPaths(); glm::vec3 getFaceNormal(BoxFace face); - void injectAudiblePoint(const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); + void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); // return the surface characteristics of the element we hit @@ -186,6 +196,7 @@ private: float _soundMsPerMeter; float _distanceAttenuationScalingFactor; float _localAudioAttenuationFactor; + float _combFilterWindow; int _diffusionFanout; // number of points of diffusion from each reflection point diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index 98955b34f5..153e39452c 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -343,7 +343,7 @@ void Stats::display( lines = _expanded ? 12 : 3; if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { - lines += 7; // spatial audio processing adds 1 spacing line and 6 extra lines of info + lines += 8; // spatial audio processing adds 1 spacing line and 7 extra lines of info } drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10); @@ -577,6 +577,14 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + sprintf(reflectionsStatus, "Comb Filter Window: %5.3f ms, Allowed: %d, Suppressed: %d", + audioReflector->getCombFilterWindow(), + audioReflector->getEchoesInjected(), + audioReflector->getEchoesSuppressed()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + } } From 3cb109ec89a240ce8ff79f2b64b32b7e9e2475fd Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 16 Apr 2014 15:21:04 -0700 Subject: [PATCH 61/64] clean up --- interface/src/Audio.cpp | 7 ++----- interface/src/AudioReflector.cpp | 11 +++-------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 06cb1fe461..cdd549752b 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -564,12 +564,9 @@ void Audio::handleAudioInput() { _lastInputLoudness = 0; } - // at this point we have clean monoAudioSamples, which match our target output... this is what we should send - // to our interested listeners - // send our local loopback to any interested parties + // at this point we have clean monoAudioSamples, which match our target output... + // this is what we should send to our interested listeners if (_processSpatialAudio && !_muted && _audioOutput) { - // local audio is sent already resampled to match the network input format, so processors - // can easily handle the audio in a format ready to post back to the audio device QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t)); emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat); } diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 6cfc23da97..e71c53857f 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -186,22 +186,19 @@ void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& bool safeToInject = true; // assume the best + // check to see if this new injection point would be within the comb filter + // suppression window for any of the existing known delays QMap& knownDelays = (source == INBOUND_AUDIO) ? _inboundAudioDelays : _localAudioDelays; - - // check to see if the known delays is too close QMap::const_iterator lowerBound = knownDelays.lowerBound(averageEarDelayMsecs - _combFilterWindow); if (lowerBound != knownDelays.end()) { float closestFound = lowerBound.value(); float deltaToClosest = (averageEarDelayMsecs - closestFound); - //qDebug() << "knownDelays=" << knownDelays; - //qDebug() << "averageEarDelayMsecs=" << averageEarDelayMsecs << " closestFound=" << closestFound; - //qDebug() << "deltaToClosest=" << deltaToClosest; if (deltaToClosest > -_combFilterWindow && deltaToClosest < _combFilterWindow) { - //qDebug() << "**** WE THINK WE'RE TOO CLOSE!! ****"; safeToInject = false; } } + // keep track of any of our suppressed echoes so we can report them in our statistics if (!safeToInject) { QVector& suppressedEchoes = (source == INBOUND_AUDIO) ? _inboundEchoesSuppressed : _localEchoesSuppressed; suppressedEchoes << averageEarDelayMsecs; @@ -275,7 +272,6 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray _localAudioDelays.clear(); _localEchoesSuppressed.clear(); echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat); - //qDebug() << _localAudioDelays; } } } @@ -284,7 +280,6 @@ void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArr _inboundAudioDelays.clear(); _inboundEchoesSuppressed.clear(); echoAudio(INBOUND_AUDIO, sampleTime, samples, format); - //qDebug() << _inboundAudioDelays; } void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { From 4c6471caa4b67dd106e2b01dd5ad226242a20c51 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 16 Apr 2014 16:43:01 -0700 Subject: [PATCH 62/64] fix typo --- interface/src/Audio.cpp | 2 +- interface/src/Menu.cpp | 2 +- interface/src/Menu.h | 2 +- interface/src/ui/Stats.cpp | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index cdd549752b..674eaa0d70 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -783,7 +783,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { _ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); // Accumulate direct transmission of audio from sender to receiver - if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal)) { + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); } diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 4e620d38e8..85c3d5b0c3 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -393,7 +393,7 @@ Menu::Menu() : appInstance->getAudio(), SLOT(toggleAudioSpatialProcessing())); - addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingIncudeOriginal, + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingIncludeOriginal, Qt::CTRL | Qt::SHIFT | Qt::Key_O, true); diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 057e6165e3..a62f54b0c6 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -260,7 +260,7 @@ namespace MenuOption { const QString AudioSpatialProcessing = "Audio Spatial Processing"; const QString AudioSpatialProcessingHeadOriented = "Head Oriented"; - const QString AudioSpatialProcessingIncudeOriginal = "Includes Network Original"; + const QString AudioSpatialProcessingIncludeOriginal = "Includes Network Original"; const QString AudioSpatialProcessingPreDelay = "Add Pre-Delay"; const QString AudioSpatialProcessingProcessLocalAudio = "Process Local Audio"; const QString AudioSpatialProcessingRenderPaths = "Render Paths"; diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index 153e39452c..64616cbdf8 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -515,7 +515,7 @@ void Stats::display( sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s, Normals: %s", audioReflector->getReflections(), - (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal) + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal) ? "included" : "silent"), (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars) ? "two" : "one"), From b269a26441b54e6e8fc7211aa793d3447302da36 Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 16 Apr 2014 17:08:06 -0700 Subject: [PATCH 63/64] CR feedback --- interface/src/AudioReflector.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index e71c53857f..9e4b97bf46 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -593,7 +593,6 @@ void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElem pathDistance += glm::distance(start, end); - // We aren't using this... should we be???? float toListenerDistance = glm::distance(end, _listenerPosition); // adjust our current delay by just the delay from the most recent ray @@ -609,7 +608,7 @@ void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElem bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); int fanout = wantDiffusions ? _diffusionFanout : 0; - float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / fanout; + float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / (float)fanout; // total delay includes the bounce back to listener float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance); From 9fc650dd6e7af6432f0ed8e581bcc168411418ce Mon Sep 17 00:00:00 2001 From: ZappoMan Date: Wed, 16 Apr 2014 17:10:26 -0700 Subject: [PATCH 64/64] CR feedback --- interface/src/AudioReflector.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 9e697b4918..2408b70a96 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -23,8 +23,8 @@ enum AudioSource { class AudioPath { public: - AudioPath(AudioSource source = INBOUND_AUDIO, const glm::vec3& origin = glm::vec3(0), - const glm::vec3& direction = glm::vec3(0), float attenuation = 1.0f, + AudioPath(AudioSource source = INBOUND_AUDIO, const glm::vec3& origin = glm::vec3(0.0f), + const glm::vec3& direction = glm::vec3(0.0f), float attenuation = 1.0f, float delay = 0.0f, float distance = 0.0f, bool isDiffusion = false, int bounceCount = 0); AudioSource source;