added wet/dry mix, and alternate distance attenuation features

This commit is contained in:
ZappoMan 2014-04-18 08:55:32 -07:00
parent 4fe2ec3950
commit fa787adce2
9 changed files with 327 additions and 39 deletions

View file

@ -19,6 +19,8 @@ var reflectiveScale = 100.0;
var diffusionScale = 100.0; var diffusionScale = 100.0;
var absorptionScale = 100.0; var absorptionScale = 100.0;
var combFilterScale = 50.0; var combFilterScale = 50.0;
var originalScale = 2.0;
var echoesScale = 2.0;
// these three properties are bound together, if you change one, the others will also change // these three properties are bound together, if you change one, the others will also change
var reflectiveRatio = AudioReflector.getReflectiveRatio(); var reflectiveRatio = AudioReflector.getReflectiveRatio();
@ -421,6 +423,84 @@ var absorptionThumb = Overlays.addOverlay("image", {
alpha: 1 alpha: 1
}); });
var originalY = topY;
topY += sliderHeight;
var originalLabel = Overlays.addOverlay("text", {
x: 40,
y: originalY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Original\nMix:"
});
var originalSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: originalY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var originalMinThumbX = 110;
var originalMaxThumbX = originalMinThumbX + 110;
var originalThumbX = originalMinThumbX + ((originalMaxThumbX - originalMinThumbX) * (AudioReflector.getOriginalSourceAttenuation() / originalScale));
var originalThumb = Overlays.addOverlay("image", {
x: originalThumbX,
y: originalY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 128, green: 128, blue: 0},
alpha: 1
});
var echoesY = topY;
topY += sliderHeight;
var echoesLabel = Overlays.addOverlay("text", {
x: 40,
y: echoesY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Echoes\nMix:"
});
var echoesSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: echoesY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var echoesMinThumbX = 110;
var echoesMaxThumbX = echoesMinThumbX + 110;
var echoesThumbX = echoesMinThumbX + ((echoesMaxThumbX - echoesMinThumbX) * (AudioReflector.getEchoesAttenuation() / echoesScale));
var echoesThumb = Overlays.addOverlay("image", {
x: echoesThumbX,
y: echoesY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 128, green: 128, blue: 0},
alpha: 1
});
// When our script shuts down, we should clean up all of our overlays // When our script shuts down, we should clean up all of our overlays
function scriptEnding() { function scriptEnding() {
@ -460,6 +540,14 @@ function scriptEnding() {
Overlays.deleteOverlay(absorptionThumb); Overlays.deleteOverlay(absorptionThumb);
Overlays.deleteOverlay(absorptionSlider); Overlays.deleteOverlay(absorptionSlider);
Overlays.deleteOverlay(echoesLabel);
Overlays.deleteOverlay(echoesThumb);
Overlays.deleteOverlay(echoesSlider);
Overlays.deleteOverlay(originalLabel);
Overlays.deleteOverlay(originalThumb);
Overlays.deleteOverlay(originalSlider);
} }
Script.scriptEnding.connect(scriptEnding); Script.scriptEnding.connect(scriptEnding);
@ -483,6 +571,8 @@ var movingSliderLocalFactor = false;
var movingSliderReflective = false; var movingSliderReflective = false;
var movingSliderDiffusion = false; var movingSliderDiffusion = false;
var movingSliderAbsorption = false; var movingSliderAbsorption = false;
var movingSliderOriginal = false;
var movingSliderEchoes = false;
var thumbClickOffsetX = 0; var thumbClickOffsetX = 0;
function mouseMoveEvent(event) { function mouseMoveEvent(event) {
@ -546,7 +636,6 @@ function mouseMoveEvent(event) {
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale; var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
AudioReflector.setCombFilterWindow(combFilter); AudioReflector.setCombFilterWindow(combFilter);
} }
if (movingSliderLocalFactor) { if (movingSliderLocalFactor) {
newThumbX = event.x - thumbClickOffsetX; newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < localFactorMinThumbX) { if (newThumbX < localFactorMinThumbX) {
@ -598,6 +687,30 @@ function mouseMoveEvent(event) {
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale; var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
setDiffusionRatio(diffusion); setDiffusionRatio(diffusion);
} }
if (movingSliderEchoes) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < echoesMinThumbX) {
newThumbX = echoesMminThumbX;
}
if (newThumbX > echoesMaxThumbX) {
newThumbX = echoesMaxThumbX;
}
Overlays.editOverlay(echoesThumb, { x: newThumbX } );
var echoes = ((newThumbX - echoesMinThumbX) / (echoesMaxThumbX - echoesMinThumbX)) * echoesScale;
AudioReflector.setEchoesAttenuation(echoes);
}
if (movingSliderOriginal) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < originalMinThumbX) {
newThumbX = originalMminThumbX;
}
if (newThumbX > originalMaxThumbX) {
newThumbX = originalMaxThumbX;
}
Overlays.editOverlay(originalThumb, { x: newThumbX } );
var original = ((newThumbX - originalMinThumbX) / (originalMaxThumbX - originalMinThumbX)) * originalScale;
AudioReflector.setOriginalSourceAttenuation(original);
}
} }
@ -640,7 +753,16 @@ function mousePressEvent(event) {
movingSliderReflective = true; movingSliderReflective = true;
thumbClickOffsetX = event.x - reflectiveThumbX; thumbClickOffsetX = event.x - reflectiveThumbX;
} }
if (clickedOverlay == originalThumb) {
movingSliderOriginal = true;
thumbClickOffsetX = event.x - originalThumbX;
}
if (clickedOverlay == echoesThumb) {
movingSliderEchoes = true;
thumbClickOffsetX = event.x - echoesThumbX;
}
} }
function mouseReleaseEvent(event) { function mouseReleaseEvent(event) {
if (movingSliderDelay) { if (movingSliderDelay) {
movingSliderDelay = false; movingSliderDelay = false;
@ -672,14 +794,12 @@ function mouseReleaseEvent(event) {
AudioReflector.setCombFilterWindow(combFilter); AudioReflector.setCombFilterWindow(combFilter);
combFilterThumbX = newThumbX; combFilterThumbX = newThumbX;
} }
if (movingSliderLocalFactor) { if (movingSliderLocalFactor) {
movingSliderLocalFactor = false; movingSliderLocalFactor = false;
var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale; var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale;
AudioReflector.setLocalAudioAttenuationFactor(localFactor); AudioReflector.setLocalAudioAttenuationFactor(localFactor);
localFactorThumbX = newThumbX; localFactorThumbX = newThumbX;
} }
if (movingSliderReflective) { if (movingSliderReflective) {
movingSliderReflective = false; movingSliderReflective = false;
var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale; var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale;
@ -687,7 +807,6 @@ function mouseReleaseEvent(event) {
reflectiveThumbX = newThumbX; reflectiveThumbX = newThumbX;
updateRatioSliders(); updateRatioSliders();
} }
if (movingSliderDiffusion) { if (movingSliderDiffusion) {
movingSliderDiffusion = false; movingSliderDiffusion = false;
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale; var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
@ -695,7 +814,6 @@ function mouseReleaseEvent(event) {
diffusionThumbX = newThumbX; diffusionThumbX = newThumbX;
updateRatioSliders(); updateRatioSliders();
} }
if (movingSliderAbsorption) { if (movingSliderAbsorption) {
movingSliderAbsorption = false; movingSliderAbsorption = false;
var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale; var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale;
@ -703,6 +821,18 @@ function mouseReleaseEvent(event) {
absorptionThumbX = newThumbX; absorptionThumbX = newThumbX;
updateRatioSliders(); updateRatioSliders();
} }
if (movingSliderEchoes) {
movingSliderEchoes = false;
var echoes = ((newThumbX - echoesMinThumbX) / (echoesMaxThumbX - echoesMinThumbX)) * echoesScale;
AudioReflector.setEchoesAttenuation(echoes);
echoesThumbX = newThumbX;
}
if (movingSliderOriginal) {
movingSliderOriginal = false;
var original = ((newThumbX - originalMinThumbX) / (originalMaxThumbX - originalMinThumbX)) * originalScale;
AudioReflector.setOriginalSourceAttenuation(original);
originalThumbX = newThumbX;
}
} }
Controller.mouseMoveEvent.connect(mouseMoveEvent); Controller.mouseMoveEvent.connect(mouseMoveEvent);

View file

@ -1679,8 +1679,12 @@ void Application::init() {
_audioReflector.setMyAvatar(getAvatar()); _audioReflector.setMyAvatar(getAvatar());
_audioReflector.setVoxels(_voxels.getTree()); _audioReflector.setVoxels(_voxels.getTree());
_audioReflector.setAudio(getAudio()); _audioReflector.setAudio(getAudio());
_audioReflector.setAvatarManager(&_avatarManager);
connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection); connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection);
connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection); connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection);
connect(getAudio(), &Audio::preProcessOriginalInboundAudio, &_audioReflector,
&AudioReflector::preProcessOriginalInboundAudio,Qt::DirectConnection);
// save settings when avatar changes // save settings when avatar changes
connect(_myAvatar, &MyAvatar::transformChanged, this, &Application::bumpSettings); connect(_myAvatar, &MyAvatar::transformChanged, this, &Application::bumpSettings);

View file

@ -784,6 +784,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
_ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); _ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
// Accumulate direct transmission of audio from sender to receiver // Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
} }

View file

@ -99,6 +99,7 @@ public slots:
signals: signals:
bool muteToggled(); bool muteToggled();
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);

View file

@ -25,6 +25,8 @@ const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on
const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed
const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused
const float DEFAULT_ORIGINAL_ATTENUATION = 1.0f;
const float DEFAULT_ECHO_ATTENUATION = 1.0f;
AudioReflector::AudioReflector(QObject* parent) : AudioReflector::AudioReflector(QObject* parent) :
QObject(parent), QObject(parent),
@ -36,6 +38,8 @@ AudioReflector::AudioReflector(QObject* parent) :
_diffusionFanout(DEFAULT_DIFFUSION_FANOUT), _diffusionFanout(DEFAULT_DIFFUSION_FANOUT),
_absorptionRatio(DEFAULT_ABSORPTION_RATIO), _absorptionRatio(DEFAULT_ABSORPTION_RATIO),
_diffusionRatio(DEFAULT_DIFFUSION_RATIO), _diffusionRatio(DEFAULT_DIFFUSION_RATIO),
_originalSourceAttenuation(DEFAULT_ORIGINAL_ATTENUATION),
_allEchoesAttenuation(DEFAULT_ECHO_ATTENUATION),
_withDiffusion(false), _withDiffusion(false),
_lastPreDelay(DEFAULT_PRE_DELAY), _lastPreDelay(DEFAULT_PRE_DELAY),
_lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), _lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
@ -43,20 +47,29 @@ AudioReflector::AudioReflector(QObject* parent) :
_lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR), _lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
_lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT), _lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT),
_lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO), _lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO),
_lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO) _lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO),
_lastDontDistanceAttenuate(false),
_lastAlternateDistanceAttenuate(false)
{ {
_reflections = 0; _reflections = 0;
_diffusionPathCount = 0; _diffusionPathCount = 0;
_averageAttenuation = 0.0f; _averageAttenuationOfficial = _averageAttenuation = 0.0f;
_maxAttenuation = 0.0f; _maxAttenuationOfficial = _maxAttenuation = 0.0f;
_minAttenuation = 0.0f; _minAttenuationOfficial = _minAttenuation = 0.0f;
_averageDelay = 0; _averageDelayOfficial = _averageDelay = 0;
_maxDelay = 0; _maxDelayOfficial = _maxDelay = 0;
_minDelay = 0; _minDelayOfficial = _minDelay = 0;
_inboundEchoesCount = 0;
_inboundEchoesSuppressedCount = 0;
_localEchoesCount = 0;
_localEchoesSuppressedCount = 0;
} }
bool AudioReflector::haveAttributesChanged() { bool AudioReflector::haveAttributesChanged() {
bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
bool dontDistanceAttenuate = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
bool alternateDistanceAttenuate = Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
bool attributesChange = (_withDiffusion != withDiffusion bool attributesChange = (_withDiffusion != withDiffusion
|| _lastPreDelay != _preDelay || _lastPreDelay != _preDelay
@ -64,7 +77,9 @@ bool AudioReflector::haveAttributesChanged() {
|| _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor || _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor
|| _lastDiffusionFanout != _diffusionFanout || _lastDiffusionFanout != _diffusionFanout
|| _lastAbsorptionRatio != _absorptionRatio || _lastAbsorptionRatio != _absorptionRatio
|| _lastDiffusionRatio != _diffusionRatio); || _lastDiffusionRatio != _diffusionRatio
|| _lastDontDistanceAttenuate != dontDistanceAttenuate
|| _lastAlternateDistanceAttenuate != alternateDistanceAttenuate);
if (attributesChange) { if (attributesChange) {
_withDiffusion = withDiffusion; _withDiffusion = withDiffusion;
@ -74,6 +89,8 @@ bool AudioReflector::haveAttributesChanged() {
_lastDiffusionFanout = _diffusionFanout; _lastDiffusionFanout = _diffusionFanout;
_lastAbsorptionRatio = _absorptionRatio; _lastAbsorptionRatio = _absorptionRatio;
_lastDiffusionRatio = _diffusionRatio; _lastDiffusionRatio = _diffusionRatio;
_lastDontDistanceAttenuate = dontDistanceAttenuate;
_lastAlternateDistanceAttenuate = alternateDistanceAttenuate;
} }
return attributesChange; return attributesChange;
@ -107,19 +124,47 @@ float AudioReflector::getDelayFromDistance(float distance) {
// attenuation = from the Audio Mixer // attenuation = from the Audio Mixer
float AudioReflector::getDistanceAttenuationCoefficient(float distance) { float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
const float DISTANCE_SCALE = 2.5f;
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
const float DISTANCE_LOG_BASE = 2.5f; bool doDistanceAttenuation = !Menu::getInstance()->isOptionChecked(
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE); MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
bool originalFormula = !Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
float distanceSquareToSource = distance * distance;
float distanceCoefficient = 1.0f;
if (doDistanceAttenuation) {
if (originalFormula) {
const float DISTANCE_SCALE = 2.5f;
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
const float DISTANCE_LOG_BASE = 2.5f;
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
float distanceSquareToSource = distance * distance;
// calculate the distance coefficient using the distance to this node // calculate the distance coefficient using the distance to this node
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR, distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
DISTANCE_SCALE_LOG + DISTANCE_SCALE_LOG +
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor()); } else {
// From Fred: If we wanted something that would produce a tail that could go up to 5 seconds in a
// really big room, that would suggest the sound still has to be in the audible after traveling about
// 1500 meters. If its a sound of average volume, we probably have about 30 db, or 5 base2 orders
// of magnitude we can drop down before the sound becomes inaudible. (Thats approximate headroom
// based on a few sloppy assumptions.) So we could try a factor like 1 / (2^(D/300)) for starters.
// 1 / (2^(D/300))
const float DISTANCE_BASE = 2.0f;
const float DISTANCE_DENOMINATOR = 300.0f;
const float DISTANCE_NUMERATOR = 300.0f;
distanceCoefficient = DISTANCE_NUMERATOR / powf(DISTANCE_BASE, (distance / DISTANCE_DENOMINATOR ));
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
}
}
return distanceCoefficient; return distanceCoefficient;
} }
@ -236,11 +281,13 @@ void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint&
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
} }
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] =
leftSample * leftEarAttenuation * _allEchoesAttenuation;
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] =
rightSample * rightEarAttenuation * _allEchoesAttenuation;
} }
// now inject the attenuated array with the appropriate delay // now inject the attenuated array with the appropriate delay
@ -249,9 +296,25 @@ void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint&
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
_injectedEchoes++;
} }
} }
void AudioReflector::preProcessOriginalInboundAudio(unsigned int sampleTime,
QByteArray& samples, const QAudioFormat& format) {
if (_originalSourceAttenuation != 1.0f) {
int numberOfSamples = (samples.size() / sizeof(int16_t));
int16_t* sampleData = (int16_t*)samples.data();
for (int i = 0; i < numberOfSamples; i++) {
sampleData[i] = sampleData[i] * _originalSourceAttenuation;
}
}
}
void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) { if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) {
const int NUM_CHANNELS_INPUT = 1; const int NUM_CHANNELS_INPUT = 1;
@ -272,6 +335,8 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray
_localAudioDelays.clear(); _localAudioDelays.clear();
_localEchoesSuppressed.clear(); _localEchoesSuppressed.clear();
echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat); echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat);
_localEchoesCount = _localAudioDelays.size();
_localEchoesSuppressedCount = _localEchoesSuppressed.size();
} }
} }
} }
@ -280,9 +345,13 @@ void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArr
_inboundAudioDelays.clear(); _inboundAudioDelays.clear();
_inboundEchoesSuppressed.clear(); _inboundEchoesSuppressed.clear();
echoAudio(INBOUND_AUDIO, sampleTime, samples, format); echoAudio(INBOUND_AUDIO, sampleTime, samples, format);
_inboundEchoesCount = _inboundAudioDelays.size();
_inboundEchoesSuppressedCount = _inboundEchoesSuppressed.size();
} }
void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
QMutexLocker locker(&_mutex);
_maxDelay = 0; _maxDelay = 0;
_maxAttenuation = 0.0f; _maxAttenuation = 0.0f;
_minDelay = std::numeric_limits<int>::max(); _minDelay = std::numeric_limits<int>::max();
@ -292,14 +361,20 @@ void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, cons
_totalAttenuation = 0.0f; _totalAttenuation = 0.0f;
_attenuationCount = 0; _attenuationCount = 0;
QMutexLocker locker(&_mutex);
// depending on if we're processing local or external audio, pick the correct points vector // depending on if we're processing local or external audio, pick the correct points vector
QVector<AudiblePoint>& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints; QVector<AudiblePoint>& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
int injectCalls = 0;
_injectedEchoes = 0;
foreach(const AudiblePoint& audiblePoint, audiblePoints) { foreach(const AudiblePoint& audiblePoint, audiblePoints) {
injectCalls++;
injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate()); injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate());
} }
/*
qDebug() << "injectCalls=" << injectCalls;
qDebug() << "_injectedEchoes=" << _injectedEchoes;
*/
_averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount;
_averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount;
@ -308,6 +383,14 @@ void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, cons
_minDelay = 0.0f; _minDelay = 0.0f;
_minAttenuation = 0.0f; _minAttenuation = 0.0f;
} }
_maxDelayOfficial = _maxDelay;
_minDelayOfficial = _minDelay;
_maxAttenuationOfficial = _maxAttenuation;
_minAttenuationOfficial = _minAttenuation;
_averageDelayOfficial = _averageDelay;
_averageAttenuationOfficial = _averageAttenuation;
} }
void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) { void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) {
@ -359,6 +442,19 @@ void AudioReflector::addAudioPath(AudioSource source, const glm::vec3& origin, c
audioPaths.push_back(path); audioPaths.push_back(path);
} }
// NOTE: This is a prototype of an eventual utility that will identify the speaking sources for the inbound audio
// stream. It's not currently called but will be added soon.
void AudioReflector::identifyAudioSources() {
// looking for audio sources....
foreach (const AvatarSharedPointer& avatarPointer, _avatarManager->getAvatarHash()) {
Avatar* avatar = static_cast<Avatar*>(avatarPointer.data());
if (!avatar->isInitialized()) {
continue;
}
qDebug() << "avatar["<< avatar <<"] loudness:" << avatar->getAudioLoudness();
}
}
void AudioReflector::calculateAllReflections() { void AudioReflector::calculateAllReflections() {
// only recalculate when we've moved, or if the attributes have changed // only recalculate when we've moved, or if the attributes have changed
// TODO: what about case where new voxels are added in front of us??? // TODO: what about case where new voxels are added in front of us???

View file

@ -15,6 +15,7 @@
#include "Audio.h" #include "Audio.h"
#include "avatar/MyAvatar.h" #include "avatar/MyAvatar.h"
#include "avatar/AvatarManager.h"
enum AudioSource { enum AudioSource {
LOCAL_AUDIO, LOCAL_AUDIO,
@ -69,25 +70,27 @@ public:
void setVoxels(VoxelTree* voxels) { _voxels = voxels; } void setVoxels(VoxelTree* voxels) { _voxels = voxels; }
void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; } void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; }
void setAudio(Audio* audio) { _audio = audio; } void setAudio(Audio* audio) { _audio = audio; }
void setAvatarManager(AvatarManager* avatarManager) { _avatarManager = avatarManager; }
void render(); /// must be called in the application render loop void render(); /// must be called in the application render loop
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
public slots: public slots:
// statistics // statistics
int getReflections() const { return _reflections; } int getReflections() const { return _reflections; }
float getAverageDelayMsecs() const { return _averageDelay; } float getAverageDelayMsecs() const { return _averageDelayOfficial; }
float getAverageAttenuation() const { return _averageAttenuation; } float getAverageAttenuation() const { return _averageAttenuationOfficial; }
float getMaxDelayMsecs() const { return _maxDelay; } float getMaxDelayMsecs() const { return _maxDelayOfficial; }
float getMaxAttenuation() const { return _maxAttenuation; } float getMaxAttenuation() const { return _maxAttenuationOfficial; }
float getMinDelayMsecs() const { return _minDelay; } float getMinDelayMsecs() const { return _minDelayOfficial; }
float getMinAttenuation() const { return _minAttenuation; } float getMinAttenuation() const { return _minAttenuationOfficial; }
float getDelayFromDistance(float distance); float getDelayFromDistance(float distance);
int getDiffusionPathCount() const { return _diffusionPathCount; } int getDiffusionPathCount() const { return _diffusionPathCount; }
int getEchoesInjected() const { return _inboundAudioDelays.size() + _localAudioDelays.size(); } int getEchoesInjected() const { return _inboundEchoesCount + _localEchoesCount; }
int getEchoesSuppressed() const { return _inboundEchoesSuppressed.size() + _localEchoesSuppressed.size(); } int getEchoesSuppressed() const { return _inboundEchoesSuppressedCount + _localEchoesSuppressedCount; }
/// ms of delay added to all echos /// ms of delay added to all echos
float getPreDelay() const { return _preDelay; } float getPreDelay() const { return _preDelay; }
@ -126,12 +129,19 @@ public slots:
float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); } float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); }
void setReflectiveRatio(float ratio); void setReflectiveRatio(float ratio);
// wet/dry mix - these don't affect any reflection calculations, only the final mix volumes
float getOriginalSourceAttenuation() const { return _originalSourceAttenuation; }
void setOriginalSourceAttenuation(float value) { _originalSourceAttenuation = value; }
float getEchoesAttenuation() const { return _allEchoesAttenuation; }
void setEchoesAttenuation(float value) { _allEchoesAttenuation = value; }
signals: signals:
private: private:
VoxelTree* _voxels; // used to access voxel scene VoxelTree* _voxels; // used to access voxel scene
MyAvatar* _myAvatar; // access to listener MyAvatar* _myAvatar; // access to listener
Audio* _audio; // access to audio API Audio* _audio; // access to audio API
AvatarManager* _avatarManager; // access to avatar manager API
// Helpers for drawing // Helpers for drawing
void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color);
@ -147,11 +157,18 @@ private:
float _averageDelay; float _averageDelay;
float _maxDelay; float _maxDelay;
float _minDelay; float _minDelay;
float _averageDelayOfficial;
float _maxDelayOfficial;
float _minDelayOfficial;
int _attenuationCount; int _attenuationCount;
float _totalAttenuation; float _totalAttenuation;
float _averageAttenuation; float _averageAttenuation;
float _maxAttenuation; float _maxAttenuation;
float _minAttenuation; float _minAttenuation;
float _averageAttenuationOfficial;
float _maxAttenuationOfficial;
float _minAttenuationOfficial;
glm::vec3 _listenerPosition; glm::vec3 _listenerPosition;
glm::vec3 _origin; glm::vec3 _origin;
@ -161,11 +178,15 @@ private:
QVector<AudiblePoint> _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths QVector<AudiblePoint> _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths
QMap<float, float> _inboundAudioDelays; /// delay times for currently injected audio points QMap<float, float> _inboundAudioDelays; /// delay times for currently injected audio points
QVector<float> _inboundEchoesSuppressed; /// delay times for currently injected audio points QVector<float> _inboundEchoesSuppressed; /// delay times for currently injected audio points
int _inboundEchoesCount;
int _inboundEchoesSuppressedCount;
QVector<AudioPath*> _localAudioPaths; /// audio paths we're processing for local audio QVector<AudioPath*> _localAudioPaths; /// audio paths we're processing for local audio
QVector<AudiblePoint> _localAudiblePoints; /// the audible points that have been calculated from the local audio paths QVector<AudiblePoint> _localAudiblePoints; /// the audible points that have been calculated from the local audio paths
QMap<float, float> _localAudioDelays; /// delay times for currently injected audio points QMap<float, float> _localAudioDelays; /// delay times for currently injected audio points
QVector<float> _localEchoesSuppressed; /// delay times for currently injected audio points QVector<float> _localEchoesSuppressed; /// delay times for currently injected audio points
int _localEchoesCount;
int _localEchoesSuppressedCount;
// adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties,
// as well as diffusion sound sources // as well as diffusion sound sources
@ -182,6 +203,7 @@ private:
void calculateAllReflections(); void calculateAllReflections();
int countDiffusionPaths(); int countDiffusionPaths();
glm::vec3 getFaceNormal(BoxFace face); glm::vec3 getFaceNormal(BoxFace face);
void identifyAudioSources();
void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
@ -197,13 +219,16 @@ private:
float _distanceAttenuationScalingFactor; float _distanceAttenuationScalingFactor;
float _localAudioAttenuationFactor; float _localAudioAttenuationFactor;
float _combFilterWindow; float _combFilterWindow;
int _diffusionFanout; // number of points of diffusion from each reflection point int _diffusionFanout; // number of points of diffusion from each reflection point
// all elements have the same material for now... // all elements have the same material for now...
float _absorptionRatio; float _absorptionRatio;
float _diffusionRatio; float _diffusionRatio;
float _reflectiveRatio; float _reflectiveRatio;
// wet/dry mix - these don't affect any reflection calculations, only the final mix volumes
float _originalSourceAttenuation; /// each sample of original signal will be multiplied by this
float _allEchoesAttenuation; /// each sample of all echo signals will be multiplied by this
// remember the last known values at calculation // remember the last known values at calculation
bool haveAttributesChanged(); bool haveAttributesChanged();
@ -216,6 +241,10 @@ private:
int _lastDiffusionFanout; int _lastDiffusionFanout;
float _lastAbsorptionRatio; float _lastAbsorptionRatio;
float _lastDiffusionRatio; float _lastDiffusionRatio;
bool _lastDontDistanceAttenuate;
bool _lastAlternateDistanceAttenuate;
int _injectedEchoes;
}; };

View file

@ -429,6 +429,14 @@ Menu::Menu() :
Qt::CTRL | Qt::SHIFT | Qt::Key_A, Qt::CTRL | Qt::SHIFT | Qt::Key_A,
true); true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingDontDistanceAttenuate,
Qt::CTRL | Qt::SHIFT | Qt::Key_Y,
false);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate,
Qt::CTRL | Qt::SHIFT | Qt::Key_U,
false);
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
Qt::CTRL | Qt::SHIFT | Qt::Key_V, Qt::CTRL | Qt::SHIFT | Qt::Key_V,
this, this,

View file

@ -268,6 +268,10 @@ namespace MenuOption {
const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces"; const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces";
const QString AudioSpatialProcessingStereoSource = "Stereo Source"; const QString AudioSpatialProcessingStereoSource = "Stereo Source";
const QString AudioSpatialProcessingWithDiffusions = "With Diffusions"; const QString AudioSpatialProcessingWithDiffusions = "With Diffusions";
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
const QString Avatars = "Avatars"; const QString Avatars = "Avatars";
const QString Bandwidth = "Bandwidth Display"; const QString Bandwidth = "Bandwidth Display";

View file

@ -343,7 +343,7 @@ void Stats::display(
lines = _expanded ? 12 : 3; lines = _expanded ? 12 : 3;
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
lines += 8; // spatial audio processing adds 1 spacing line and 7 extra lines of info lines += 9; // spatial audio processing adds 1 spacing line and 8 extra lines of info
} }
drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10); drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10);
@ -540,11 +540,19 @@ void Stats::display(
verticalOffset += STATS_PELS_PER_LINE; verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
bool distanceAttenuationDisabled = Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, Factor: %5.3f", bool alternateDistanceAttenuationEnabled = Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, %s: %5.3f",
audioReflector->getAverageAttenuation(), audioReflector->getAverageAttenuation(),
audioReflector->getMaxAttenuation(), audioReflector->getMaxAttenuation(),
audioReflector->getMinAttenuation(), audioReflector->getMinAttenuation(),
(distanceAttenuationDisabled ? "Distance Factor [DISABLED]" :
alternateDistanceAttenuationEnabled ? "Distance Factor [ALTERNATE]" : "Distance Factor [STANARD]"),
audioReflector->getDistanceAttenuationScalingFactor()); audioReflector->getDistanceAttenuationScalingFactor());
verticalOffset += STATS_PELS_PER_LINE; verticalOffset += STATS_PELS_PER_LINE;
@ -585,6 +593,13 @@ void Stats::display(
verticalOffset += STATS_PELS_PER_LINE; verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
sprintf(reflectionsStatus, "Wet/Dry Mix: Original: %5.3f Echoes: %5.3f",
audioReflector->getOriginalSourceAttenuation(),
audioReflector->getEchoesAttenuation());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
} }
} }