add reflection stats to stats display, add pre-delay, add toggle for separate ears, add surface randomness

This commit is contained in:
ZappoMan 2014-04-04 11:25:43 -07:00
parent b56e0ff3d4
commit c35d6bdb0c
5 changed files with 81 additions and 22 deletions

View file

@ -2835,6 +2835,36 @@ void Application::displayStats() {
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, downloadStats.str().c_str(), WHITE_TEXT);
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
// add some reflection stats
char reflectionsStatus[128];
sprintf(reflectionsStatus, "Reflections: %d, Pre-Delay: %f, Separate Ears:%s",
_audioReflector.getReflections(),
_audioReflector.getDelayFromDistance(0.0f),
debug::valueOf(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)));
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
sprintf(reflectionsStatus, "Delay: average %f, max %f, min %f",
_audioReflector.getAverageDelayMsecs(),
_audioReflector.getMaxDelayMsecs(),
_audioReflector.getMinDelayMsecs());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
sprintf(reflectionsStatus, "Attenuation: average %f, max %f, min %f",
_audioReflector.getAverageAttenuation(),
_audioReflector.getMaxAttenuation(),
_audioReflector.getMinAttenuation());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
}
}
verticalOffset = 0;

View file

@ -9,6 +9,7 @@
#include <QMutexLocker>
#include "AudioReflector.h"
#include "Menu.h"
AudioReflector::AudioReflector(QObject* parent) :
QObject(parent)
@ -28,14 +29,23 @@ void AudioReflector::render() {
}
// delay = 1ms per foot
// = 3ms per meter
// attenuation =
// BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance))
const float PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections
int getDelayFromDistance(float distance) {
float AudioReflector::getDelayFromDistance(float distance) {
const int MS_DELAY_PER_METER = 3;
return MS_DELAY_PER_METER * distance;
float delay = (MS_DELAY_PER_METER * distance);
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) {
delay += PRE_DELAY;
}
return delay;
}
// **option 1**: this is what we're using
@ -61,24 +71,29 @@ float getDistanceAttenuationCoefficient(float distance) {
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
DISTANCE_SCALE_LOG +
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
distanceCoefficient = std::min(1.0f, distanceCoefficient);
const float DISTANCE_SCALING_FACTOR = 2.0f;
distanceCoefficient = std::min(1.0f, distanceCoefficient * DISTANCE_SCALING_FACTOR);
return distanceCoefficient;
}
glm::vec3 getFaceNormal(BoxFace face) {
float surfaceRandomness = randFloatInRange(0.99,1.0);
float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f;
if (face == MIN_X_FACE) {
return glm::vec3(-1, 0, 0);
return glm::vec3(-surfaceRandomness, surfaceRemainder, surfaceRemainder);
} else if (face == MAX_X_FACE) {
return glm::vec3(1, 0, 0);
return glm::vec3(surfaceRandomness, surfaceRemainder, surfaceRemainder);
} else if (face == MIN_Y_FACE) {
return glm::vec3(0, -1, 0);
return glm::vec3(surfaceRemainder, -surfaceRandomness, surfaceRemainder);
} else if (face == MAX_Y_FACE) {
return glm::vec3(0, 1, 0);
return glm::vec3(surfaceRemainder, surfaceRandomness, surfaceRemainder);
} else if (face == MIN_Z_FACE) {
return glm::vec3(0, 0, -1);
return glm::vec3(surfaceRemainder, surfaceRemainder, -surfaceRandomness);
} else if (face == MAX_Z_FACE) {
return glm::vec3(0, 0, 1);
return glm::vec3(surfaceRemainder, surfaceRemainder, surfaceRandomness);
}
return glm::vec3(0, 0, 0); //error case
}
@ -113,7 +128,7 @@ void AudioReflector::calculateAllReflections() {
// only recalculate when we've moved...
// TODO: what about case where new voxels are added in front of us???
if (_myAvatar->getHead()->getPosition() != _origin) {
if (_reflections == 0 || _myAvatar->getHead()->getPosition() != _origin) {
QMutexLocker locker(&_mutex);
qDebug() << "origin has changed...";
@ -218,8 +233,11 @@ const int NUMBER_OF_CHANNELS = 2;
void AudioReflector::echoReflections(const glm::vec3& origin, const QVector<glm::vec3>& reflections, const QByteArray& samples,
unsigned int sampleTime, int sampleRate) {
glm::vec3 rightEarPosition = _myAvatar->getHead()->getRightEarPosition();
glm::vec3 leftEarPosition = _myAvatar->getHead()->getLeftEarPosition();
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() :
_myAvatar->getHead()->getPosition();
glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() :
_myAvatar->getHead()->getPosition();
glm::vec3 start = origin;
int totalNumberOfSamples = samples.size() / sizeof(int16_t);
@ -253,8 +271,8 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector<glm:
float rightTotalDistance = rightEarDistance + rightDistance;
float leftTotalDistance = leftEarDistance + leftDistance;
int rightEarDelayMsecs = getDelayFromDistance(rightTotalDistance);
int leftEarDelayMsecs = getDelayFromDistance(leftTotalDistance);
float rightEarDelayMsecs = getDelayFromDistance(rightTotalDistance);
float leftEarDelayMsecs = getDelayFromDistance(leftTotalDistance);
_totalDelay += rightEarDelayMsecs + leftEarDelayMsecs;
_delayCount += 2;
@ -324,7 +342,7 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArr
_maxAttenuation = 0.0f;
_minDelay = std::numeric_limits<int>::max();
_minAttenuation = std::numeric_limits<float>::max();
_totalDelay = 0;
_totalDelay = 0.0f;
_delayCount = 0;
_totalAttenuation = 0.0f;
_attenuationCount = 0;

View file

@ -28,12 +28,13 @@ public:
void render();
int getReflections() const { return _reflections; }
int getAverageDelayMsecs() const { return _averageDelay; }
float getAverageDelayMsecs() const { return _averageDelay; }
float getAverageAttenuation() const { return _averageAttenuation; }
int getMaxDelayMsecs() const { return _maxDelay; }
float getMaxDelayMsecs() const { return _maxDelay; }
float getMaxAttenuation() const { return _maxAttenuation; }
int getMinDelayMsecs() const { return _minDelay; }
float getMinDelayMsecs() const { return _minDelay; }
float getMinAttenuation() const { return _minAttenuation; }
float getDelayFromDistance(float distance);
public slots:
void processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
@ -60,10 +61,10 @@ private:
int _reflections;
int _delayCount;
int _totalDelay;
int _averageDelay;
int _maxDelay;
int _minDelay;
float _totalDelay;
float _averageDelay;
float _maxDelay;
float _minDelay;
int _attenuationCount;
float _totalAttenuation;

View file

@ -376,6 +376,14 @@ Menu::Menu() :
Qt::CTRL | Qt::SHIFT | Qt::Key_O,
true);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingSeparateEars,
Qt::CTRL | Qt::SHIFT | Qt::Key_E,
true);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingPreDelay,
Qt::CTRL | Qt::SHIFT | Qt::Key_D,
true);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter,
Qt::CTRL | Qt::SHIFT | Qt::Key_F,
false);

View file

@ -245,6 +245,8 @@ namespace MenuOption {
const QString AudioToneInjection = "Inject Test Tone";
const QString AudioSpatialProcessing = "Audio Spatial Processing";
const QString AudioSpatialProcessingIncudeOriginal = "Audio Spatial Processing includes Original";
const QString AudioSpatialProcessingSeparateEars = "Audio Spatial Processing separates ears";
const QString AudioSpatialProcessingPreDelay = "Audio Spatial Processing add Pre-Delay";
const QString EchoServerAudio = "Echo Server Audio";
const QString EchoLocalAudio = "Echo Local Audio";
const QString MuteAudio = "Mute Microphone";