mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 11:28:03 +02:00
lots of knobs and dials
This commit is contained in:
parent
c35d6bdb0c
commit
593fc6c963
5 changed files with 154 additions and 78 deletions
|
@ -2787,7 +2787,7 @@ void Application::displayStats() {
|
||||||
|
|
||||||
glm::vec3 avatarPos = _myAvatar->getPosition();
|
glm::vec3 avatarPos = _myAvatar->getPosition();
|
||||||
|
|
||||||
lines = _statsExpanded ? 5 : 3;
|
lines = _statsExpanded ? 8 : 3;
|
||||||
displayStatsBackground(backgroundColor, horizontalOffset, 0, _glWidget->width() - (mirrorEnabled ? 301 : 411) - horizontalOffset, lines * STATS_PELS_PER_LINE + 10);
|
displayStatsBackground(backgroundColor, horizontalOffset, 0, _glWidget->width() - (mirrorEnabled ? 301 : 411) - horizontalOffset, lines * STATS_PELS_PER_LINE + 10);
|
||||||
horizontalOffset += 5;
|
horizontalOffset += 5;
|
||||||
|
|
||||||
|
@ -2840,26 +2840,34 @@ void Application::displayStats() {
|
||||||
// add some reflection stats
|
// add some reflection stats
|
||||||
char reflectionsStatus[128];
|
char reflectionsStatus[128];
|
||||||
|
|
||||||
sprintf(reflectionsStatus, "Reflections: %d, Pre-Delay: %f, Separate Ears:%s",
|
sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s",
|
||||||
_audioReflector.getReflections(),
|
_audioReflector.getReflections(),
|
||||||
_audioReflector.getDelayFromDistance(0.0f),
|
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal)
|
||||||
debug::valueOf(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)));
|
? "with" : "without"),
|
||||||
|
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)
|
||||||
|
? "two" : "one"),
|
||||||
|
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource)
|
||||||
|
? "stereo" : "mono")
|
||||||
|
);
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
|
||||||
|
|
||||||
sprintf(reflectionsStatus, "Delay: average %f, max %f, min %f",
|
sprintf(reflectionsStatus, "Delay: pre: %f, average %f, max %f, min %f, speed: %f",
|
||||||
|
_audioReflector.getDelayFromDistance(0.0f),
|
||||||
_audioReflector.getAverageDelayMsecs(),
|
_audioReflector.getAverageDelayMsecs(),
|
||||||
_audioReflector.getMaxDelayMsecs(),
|
_audioReflector.getMaxDelayMsecs(),
|
||||||
_audioReflector.getMinDelayMsecs());
|
_audioReflector.getMinDelayMsecs(),
|
||||||
|
_audioReflector.getSoundMsPerMeter());
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
|
||||||
|
|
||||||
sprintf(reflectionsStatus, "Attenuation: average %f, max %f, min %f",
|
sprintf(reflectionsStatus, "Attenuation: average %f, max %f, min %f, distance scale: %f",
|
||||||
_audioReflector.getAverageAttenuation(),
|
_audioReflector.getAverageAttenuation(),
|
||||||
_audioReflector.getMaxAttenuation(),
|
_audioReflector.getMaxAttenuation(),
|
||||||
_audioReflector.getMinAttenuation());
|
_audioReflector.getMinAttenuation(),
|
||||||
|
_audioReflector.getDistanceAttenuationScalingFactor());
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, WHITE_TEXT);
|
||||||
|
@ -3725,6 +3733,7 @@ void Application::loadScript(const QString& scriptName) {
|
||||||
scriptEngine->registerGlobalObject("Menu", MenuScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("Menu", MenuScriptingInterface::getInstance());
|
||||||
scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance());
|
||||||
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
||||||
|
scriptEngine->registerGlobalObject("AudioReflector", &_audioReflector);
|
||||||
|
|
||||||
QThread* workerThread = new QThread(this);
|
QThread* workerThread = new QThread(this);
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,18 @@
|
||||||
#include "AudioReflector.h"
|
#include "AudioReflector.h"
|
||||||
#include "Menu.h"
|
#include "Menu.h"
|
||||||
|
|
||||||
|
|
||||||
|
const float DEFAULT_PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections
|
||||||
|
const float DEFAULT_MS_DELAY_PER_METER = 3.0f;
|
||||||
|
const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f;
|
||||||
|
const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f;
|
||||||
|
|
||||||
|
|
||||||
AudioReflector::AudioReflector(QObject* parent) :
|
AudioReflector::AudioReflector(QObject* parent) :
|
||||||
QObject(parent)
|
QObject(parent),
|
||||||
|
_preDelay(DEFAULT_PRE_DELAY),
|
||||||
|
_soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
|
||||||
|
_distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR)
|
||||||
{
|
{
|
||||||
reset();
|
reset();
|
||||||
}
|
}
|
||||||
|
@ -34,14 +44,12 @@ void AudioReflector::render() {
|
||||||
// = 3ms per meter
|
// = 3ms per meter
|
||||||
// attenuation =
|
// attenuation =
|
||||||
// BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance))
|
// BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance))
|
||||||
const float PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections
|
|
||||||
|
|
||||||
float AudioReflector::getDelayFromDistance(float distance) {
|
float AudioReflector::getDelayFromDistance(float distance) {
|
||||||
const int MS_DELAY_PER_METER = 3;
|
float delay = (_soundMsPerMeter * distance);
|
||||||
float delay = (MS_DELAY_PER_METER * distance);
|
|
||||||
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) {
|
||||||
delay += PRE_DELAY;
|
delay += _preDelay;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,14 +60,14 @@ float AudioReflector::getDelayFromDistance(float distance) {
|
||||||
const float PER_BOUNCE_ATTENUATION_FACTOR = 0.5f;
|
const float PER_BOUNCE_ATTENUATION_FACTOR = 0.5f;
|
||||||
|
|
||||||
// **option 2**: we're not using these
|
// **option 2**: we're not using these
|
||||||
const float BOUNCE_ATTENUATION_FACTOR = 0.125f;
|
//const float BOUNCE_ATTENUATION_FACTOR = 0.125f;
|
||||||
// each bounce we adjust our attenuation by this factor, the result is an asymptotically decreasing attenuation...
|
// each bounce we adjust our attenuation by this factor, the result is an asymptotically decreasing attenuation...
|
||||||
// 0.125, 0.25, 0.5, ...
|
// 0.125, 0.25, 0.5, ...
|
||||||
const float PER_BOUNCE_ATTENUATION_ADJUSTMENT = 2.0f;
|
//const float PER_BOUNCE_ATTENUATION_ADJUSTMENT = 2.0f;
|
||||||
// we don't grow larger than this, which means by the 4th bounce we don't get that much less quiet
|
// we don't grow larger than this, which means by the 4th bounce we don't get that much less quiet
|
||||||
const float MAX_BOUNCE_ATTENUATION = 0.99f;
|
//const float MAX_BOUNCE_ATTENUATION = 0.99f;
|
||||||
|
|
||||||
float getDistanceAttenuationCoefficient(float distance) {
|
float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
|
||||||
const float DISTANCE_SCALE = 2.5f;
|
const float DISTANCE_SCALE = 2.5f;
|
||||||
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
||||||
const float DISTANCE_LOG_BASE = 2.5f;
|
const float DISTANCE_LOG_BASE = 2.5f;
|
||||||
|
@ -72,13 +80,15 @@ float getDistanceAttenuationCoefficient(float distance) {
|
||||||
DISTANCE_SCALE_LOG +
|
DISTANCE_SCALE_LOG +
|
||||||
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
||||||
|
|
||||||
const float DISTANCE_SCALING_FACTOR = 2.0f;
|
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
|
||||||
|
|
||||||
distanceCoefficient = std::min(1.0f, distanceCoefficient * DISTANCE_SCALING_FACTOR);
|
|
||||||
|
|
||||||
return distanceCoefficient;
|
return distanceCoefficient;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float getBounceAttenuationCoefficient(int bounceCount) {
|
||||||
|
return PER_BOUNCE_ATTENUATION_FACTOR * bounceCount;
|
||||||
|
}
|
||||||
|
|
||||||
glm::vec3 getFaceNormal(BoxFace face) {
|
glm::vec3 getFaceNormal(BoxFace face) {
|
||||||
float surfaceRandomness = randFloatInRange(0.99,1.0);
|
float surfaceRandomness = randFloatInRange(0.99,1.0);
|
||||||
float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f;
|
float surfaceRemainder = (1.0f - surfaceRandomness)/2.0f;
|
||||||
|
@ -128,10 +138,14 @@ void AudioReflector::calculateAllReflections() {
|
||||||
|
|
||||||
// only recalculate when we've moved...
|
// only recalculate when we've moved...
|
||||||
// TODO: what about case where new voxels are added in front of us???
|
// TODO: what about case where new voxels are added in front of us???
|
||||||
if (_reflections == 0 || _myAvatar->getHead()->getPosition() != _origin) {
|
bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
|
||||||
QMutexLocker locker(&_mutex);
|
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation();
|
||||||
|
|
||||||
|
bool shouldRecalc = _reflections == 0 || _myAvatar->getHead()->getPosition() != _origin || (orientation != _orientation);
|
||||||
|
|
||||||
|
/*
|
||||||
|
qDebug() << "wantHeadOrientation=" << wantHeadOrientation;
|
||||||
|
|
||||||
qDebug() << "origin has changed...";
|
|
||||||
qDebug(" _myAvatar->getHead()->getPosition()=%f,%f,%f",
|
qDebug(" _myAvatar->getHead()->getPosition()=%f,%f,%f",
|
||||||
_myAvatar->getHead()->getPosition().x,
|
_myAvatar->getHead()->getPosition().x,
|
||||||
_myAvatar->getHead()->getPosition().y,
|
_myAvatar->getHead()->getPosition().y,
|
||||||
|
@ -142,15 +156,34 @@ void AudioReflector::calculateAllReflections() {
|
||||||
_origin.y,
|
_origin.y,
|
||||||
_origin.z);
|
_origin.z);
|
||||||
|
|
||||||
|
qDebug(" orientation=%f,%f,%f,%f",
|
||||||
|
orientation.x,
|
||||||
|
orientation.y,
|
||||||
|
orientation.z,
|
||||||
|
orientation.w);
|
||||||
|
|
||||||
|
qDebug(" _orientation=%f,%f,%f,%f",
|
||||||
|
_orientation.x,
|
||||||
|
_orientation.y,
|
||||||
|
_orientation.z,
|
||||||
|
_orientation.w);
|
||||||
|
*/
|
||||||
|
if (shouldRecalc) {
|
||||||
|
//qDebug() << "origin or orientation has changed...";
|
||||||
|
|
||||||
|
QMutexLocker locker(&_mutex);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
quint64 start = usecTimestampNow();
|
quint64 start = usecTimestampNow();
|
||||||
|
|
||||||
_origin = _myAvatar->getHead()->getPosition();
|
_origin = _myAvatar->getHead()->getPosition();
|
||||||
|
glm::vec3 averageEarPosition = _myAvatar->getHead()->getPosition();
|
||||||
|
|
||||||
glm::quat orientation = _myAvatar->getOrientation(); // _myAvatar->getHead()->getOrientation();
|
_orientation = orientation;
|
||||||
glm::vec3 right = glm::normalize(orientation * IDENTITY_RIGHT);
|
glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT);
|
||||||
glm::vec3 up = glm::normalize(orientation * IDENTITY_UP);
|
glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP);
|
||||||
glm::vec3 front = glm::normalize(orientation * IDENTITY_FRONT);
|
glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT);
|
||||||
glm::vec3 left = -right;
|
glm::vec3 left = -right;
|
||||||
glm::vec3 down = -up;
|
glm::vec3 down = -up;
|
||||||
glm::vec3 back = -front;
|
glm::vec3 back = -front;
|
||||||
|
@ -163,33 +196,31 @@ void AudioReflector::calculateAllReflections() {
|
||||||
glm::vec3 backRightDown = glm::normalize(back + right + down);
|
glm::vec3 backRightDown = glm::normalize(back + right + down);
|
||||||
glm::vec3 backLeftDown = glm::normalize(back + left + down);
|
glm::vec3 backLeftDown = glm::normalize(back + left + down);
|
||||||
|
|
||||||
const int BOUNCE_COUNT = 5;
|
_frontRightUpReflections = calculateReflections(averageEarPosition, _origin, frontRightUp);
|
||||||
|
_frontLeftUpReflections = calculateReflections(averageEarPosition, _origin, frontLeftUp);
|
||||||
_frontRightUpReflections = calculateReflections(_origin, frontRightUp, BOUNCE_COUNT);
|
_backRightUpReflections = calculateReflections(averageEarPosition, _origin, backRightUp);
|
||||||
_frontLeftUpReflections = calculateReflections(_origin, frontLeftUp, BOUNCE_COUNT);
|
_backLeftUpReflections = calculateReflections(averageEarPosition, _origin, backLeftUp);
|
||||||
_backRightUpReflections = calculateReflections(_origin, backRightUp, BOUNCE_COUNT);
|
_frontRightDownReflections = calculateReflections(averageEarPosition, _origin, frontRightDown);
|
||||||
_backLeftUpReflections = calculateReflections(_origin, backLeftUp, BOUNCE_COUNT);
|
_frontLeftDownReflections = calculateReflections(averageEarPosition, _origin, frontLeftDown);
|
||||||
_frontRightDownReflections = calculateReflections(_origin, frontRightDown, BOUNCE_COUNT);
|
_backRightDownReflections = calculateReflections(averageEarPosition, _origin, backRightDown);
|
||||||
_frontLeftDownReflections = calculateReflections(_origin, frontLeftDown, BOUNCE_COUNT);
|
_backLeftDownReflections = calculateReflections(averageEarPosition, _origin, backLeftDown);
|
||||||
_backRightDownReflections = calculateReflections(_origin, backRightDown, BOUNCE_COUNT);
|
_frontReflections = calculateReflections(averageEarPosition, _origin, front);
|
||||||
_backLeftDownReflections = calculateReflections(_origin, backLeftDown, BOUNCE_COUNT);
|
_backReflections = calculateReflections(averageEarPosition, _origin, back);
|
||||||
_frontReflections = calculateReflections(_origin, front, BOUNCE_COUNT);
|
_leftReflections = calculateReflections(averageEarPosition, _origin, left);
|
||||||
_backReflections = calculateReflections(_origin, back, BOUNCE_COUNT);
|
_rightReflections = calculateReflections(averageEarPosition, _origin, right);
|
||||||
_leftReflections = calculateReflections(_origin, left, BOUNCE_COUNT);
|
_upReflections = calculateReflections(averageEarPosition, _origin, up);
|
||||||
_rightReflections = calculateReflections(_origin, right, BOUNCE_COUNT);
|
_downReflections = calculateReflections(averageEarPosition, _origin, down);
|
||||||
_upReflections = calculateReflections(_origin, up, BOUNCE_COUNT);
|
|
||||||
_downReflections = calculateReflections(_origin, down, BOUNCE_COUNT);
|
|
||||||
|
|
||||||
quint64 end = usecTimestampNow();
|
quint64 end = usecTimestampNow();
|
||||||
|
|
||||||
reset();
|
reset();
|
||||||
|
|
||||||
qDebug() << "Reflections recalculated in " << (end - start) << "usecs";
|
//qDebug() << "Reflections recalculated in " << (end - start) << "usecs";
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
QVector<glm::vec3> AudioReflector::calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces) {
|
QVector<glm::vec3> AudioReflector::calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection) {
|
||||||
QVector<glm::vec3> reflectionPoints;
|
QVector<glm::vec3> reflectionPoints;
|
||||||
glm::vec3 start = origin;
|
glm::vec3 start = origin;
|
||||||
glm::vec3 direction = originalDirection;
|
glm::vec3 direction = originalDirection;
|
||||||
|
@ -197,16 +228,28 @@ QVector<glm::vec3> AudioReflector::calculateReflections(const glm::vec3& origin,
|
||||||
float distance;
|
float distance;
|
||||||
BoxFace face;
|
BoxFace face;
|
||||||
const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point
|
const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point
|
||||||
|
float currentAttenuation = 1.0f;
|
||||||
|
float totalDistance = 0.0f;
|
||||||
|
int bounceCount = 1;
|
||||||
|
|
||||||
for (int i = 0; i < maxBounces; i++) {
|
while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) {
|
||||||
if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) {
|
if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) {
|
||||||
glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT));
|
glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT));
|
||||||
|
|
||||||
reflectionPoints.push_back(end);
|
totalDistance += glm::distance(start, end);
|
||||||
|
float earDistance = glm::distance(end, earPosition);
|
||||||
|
float totalDistance = earDistance + distance;
|
||||||
|
currentAttenuation = getDistanceAttenuationCoefficient(totalDistance) * getBounceAttenuationCoefficient(bounceCount);
|
||||||
|
|
||||||
|
if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) {
|
||||||
|
reflectionPoints.push_back(end);
|
||||||
glm::vec3 faceNormal = getFaceNormal(face);
|
glm::vec3 faceNormal = getFaceNormal(face);
|
||||||
direction = glm::normalize(glm::reflect(direction,faceNormal));
|
direction = glm::normalize(glm::reflect(direction,faceNormal));
|
||||||
start = end;
|
start = end;
|
||||||
|
bounceCount++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
currentAttenuation = 0.0f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return reflectionPoints;
|
return reflectionPoints;
|
||||||
|
@ -234,6 +277,7 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector<glm:
|
||||||
unsigned int sampleTime, int sampleRate) {
|
unsigned int sampleTime, int sampleRate) {
|
||||||
|
|
||||||
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
|
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
|
||||||
|
bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource);
|
||||||
glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() :
|
glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() :
|
||||||
_myAvatar->getHead()->getPosition();
|
_myAvatar->getHead()->getPosition();
|
||||||
glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() :
|
glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() :
|
||||||
|
@ -254,12 +298,10 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector<glm:
|
||||||
|
|
||||||
float rightDistance = 0;
|
float rightDistance = 0;
|
||||||
float leftDistance = 0;
|
float leftDistance = 0;
|
||||||
float bounceAttenuation = PER_BOUNCE_ATTENUATION_FACTOR;
|
int bounceCount = 0;
|
||||||
|
|
||||||
int bounce = 0;
|
|
||||||
|
|
||||||
foreach (glm::vec3 end, reflections) {
|
foreach (glm::vec3 end, reflections) {
|
||||||
bounce++;
|
bounceCount++;
|
||||||
|
|
||||||
rightDistance += glm::distance(start, end);
|
rightDistance += glm::distance(start, end);
|
||||||
leftDistance += glm::distance(start, end);
|
leftDistance += glm::distance(start, end);
|
||||||
|
@ -287,6 +329,7 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector<glm:
|
||||||
//qDebug() << "leftTotalDistance=" << leftTotalDistance << "rightTotalDistance=" << rightTotalDistance;
|
//qDebug() << "leftTotalDistance=" << leftTotalDistance << "rightTotalDistance=" << rightTotalDistance;
|
||||||
//qDebug() << "leftEarDelay=" << leftEarDelay << "rightEarDelay=" << rightEarDelay;
|
//qDebug() << "leftEarDelay=" << leftEarDelay << "rightEarDelay=" << rightEarDelay;
|
||||||
|
|
||||||
|
float bounceAttenuation = getBounceAttenuationCoefficient(bounceCount);
|
||||||
float rightEarAttenuation = getDistanceAttenuationCoefficient(rightTotalDistance) * bounceAttenuation;
|
float rightEarAttenuation = getDistanceAttenuationCoefficient(rightTotalDistance) * bounceAttenuation;
|
||||||
float leftEarAttenuation = getDistanceAttenuationCoefficient(leftTotalDistance) * bounceAttenuation;
|
float leftEarAttenuation = getDistanceAttenuationCoefficient(leftTotalDistance) * bounceAttenuation;
|
||||||
|
|
||||||
|
@ -297,20 +340,15 @@ void AudioReflector::echoReflections(const glm::vec3& origin, const QVector<glm:
|
||||||
_minAttenuation = std::min(_minAttenuation,rightEarAttenuation);
|
_minAttenuation = std::min(_minAttenuation,rightEarAttenuation);
|
||||||
_minAttenuation = std::min(_minAttenuation,leftEarAttenuation);
|
_minAttenuation = std::min(_minAttenuation,leftEarAttenuation);
|
||||||
|
|
||||||
//qDebug() << "leftEarAttenuation=" << leftEarAttenuation << "rightEarAttenuation=" << rightEarAttenuation;
|
|
||||||
|
|
||||||
//qDebug() << "bounce=" << bounce <<
|
|
||||||
// "bounceAttenuation=" << bounceAttenuation;
|
|
||||||
//bounceAttenuationFactor = std::min(MAX_BOUNCE_ATTENUATION, bounceAttenuationFactor * PER_BOUNCE_ATTENUATION_ADJUSTMENT);
|
|
||||||
bounceAttenuation *= PER_BOUNCE_ATTENUATION_FACTOR;
|
|
||||||
|
|
||||||
// run through the samples, and attenuate them
|
// run through the samples, and attenuate them
|
||||||
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
|
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
|
||||||
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
|
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
|
||||||
int16_t rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
|
int16_t rightSample = leftSample;
|
||||||
|
if (wantStereo) {
|
||||||
|
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
|
||||||
|
}
|
||||||
|
|
||||||
//qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample;
|
//qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample;
|
||||||
|
|
||||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
|
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
|
||||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
|
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
|
||||||
|
|
||||||
|
@ -374,6 +412,7 @@ void AudioReflector::processSpatialAudio(unsigned int sampleTime, const QByteArr
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioReflector::drawRays() {
|
void AudioReflector::drawRays() {
|
||||||
|
//qDebug() << "AudioReflector::drawRays()";
|
||||||
|
|
||||||
calculateAllReflections();
|
calculateAllReflections();
|
||||||
|
|
||||||
|
@ -482,6 +521,7 @@ void AudioReflector::drawRays() {
|
||||||
drawReflections(_origin, upColor, _upReflections);
|
drawReflections(_origin, upColor, _upReflections);
|
||||||
drawReflections(_origin, downColor, _downReflections);
|
drawReflections(_origin, downColor, _downReflections);
|
||||||
|
|
||||||
|
/*
|
||||||
qDebug() << "_reflections:" << _reflections
|
qDebug() << "_reflections:" << _reflections
|
||||||
<< "_averageDelay:" << _averageDelay
|
<< "_averageDelay:" << _averageDelay
|
||||||
<< "_maxDelay:" << _maxDelay
|
<< "_maxDelay:" << _maxDelay
|
||||||
|
@ -490,6 +530,7 @@ void AudioReflector::drawRays() {
|
||||||
qDebug() << "_averageAttenuation:" << _averageAttenuation
|
qDebug() << "_averageAttenuation:" << _averageAttenuation
|
||||||
<< "_maxAttenuation:" << _maxAttenuation
|
<< "_maxAttenuation:" << _maxAttenuation
|
||||||
<< "_minAttenuation:" << _minAttenuation;
|
<< "_minAttenuation:" << _minAttenuation;
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) {
|
void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) {
|
||||||
|
|
|
@ -36,9 +36,19 @@ public:
|
||||||
float getMinAttenuation() const { return _minAttenuation; }
|
float getMinAttenuation() const { return _minAttenuation; }
|
||||||
float getDelayFromDistance(float distance);
|
float getDelayFromDistance(float distance);
|
||||||
|
|
||||||
public slots:
|
|
||||||
void processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
void processSpatialAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
|
|
||||||
|
public slots:
|
||||||
|
|
||||||
|
float getPreDelay() const { return _preDelay; }
|
||||||
|
void setPreDelay(float preDelay) { _preDelay = preDelay; }
|
||||||
|
float getSoundMsPerMeter() const { return _soundMsPerMeter; } /// ms per meter, larger means slower
|
||||||
|
void setSoundMsPerMeter(float soundMsPerMeter) { _soundMsPerMeter = soundMsPerMeter; }
|
||||||
|
float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; } /// ms per meter, larger means slower
|
||||||
|
void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -52,11 +62,12 @@ private:
|
||||||
void echoReflections(const glm::vec3& origin, const QVector<glm::vec3>& reflections, const QByteArray& samples,
|
void echoReflections(const glm::vec3& origin, const QVector<glm::vec3>& reflections, const QByteArray& samples,
|
||||||
unsigned int sampleTime, int sampleRate);
|
unsigned int sampleTime, int sampleRate);
|
||||||
|
|
||||||
QVector<glm::vec3> calculateReflections(const glm::vec3& origin, const glm::vec3& originalDirection, int maxBounces);
|
QVector<glm::vec3> calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection);
|
||||||
void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector<glm::vec3>& reflections);
|
void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector<glm::vec3>& reflections);
|
||||||
|
|
||||||
void calculateAllReflections();
|
void calculateAllReflections();
|
||||||
void reset();
|
void reset();
|
||||||
|
float getDistanceAttenuationCoefficient(float distance);
|
||||||
|
|
||||||
int _reflections;
|
int _reflections;
|
||||||
|
|
||||||
|
@ -73,6 +84,7 @@ private:
|
||||||
float _minAttenuation;
|
float _minAttenuation;
|
||||||
|
|
||||||
glm::vec3 _origin;
|
glm::vec3 _origin;
|
||||||
|
glm::quat _orientation;
|
||||||
QVector<glm::vec3> _frontRightUpReflections;
|
QVector<glm::vec3> _frontRightUpReflections;
|
||||||
QVector<glm::vec3> _frontLeftUpReflections;
|
QVector<glm::vec3> _frontLeftUpReflections;
|
||||||
QVector<glm::vec3> _backRightUpReflections;
|
QVector<glm::vec3> _backRightUpReflections;
|
||||||
|
@ -90,6 +102,10 @@ private:
|
||||||
|
|
||||||
QMutex _mutex;
|
QMutex _mutex;
|
||||||
|
|
||||||
|
float _preDelay;
|
||||||
|
float _soundMsPerMeter;
|
||||||
|
float _distanceAttenuationScalingFactor;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -331,8 +331,8 @@ Menu::Menu() :
|
||||||
|
|
||||||
|
|
||||||
QMenu* renderDebugMenu = developerMenu->addMenu("Render Debugging Tools");
|
QMenu* renderDebugMenu = developerMenu->addMenu("Render Debugging Tools");
|
||||||
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings, Qt::CTRL | Qt::SHIFT | Qt::Key_P);
|
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings);
|
||||||
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings, Qt::CTRL | Qt::SHIFT | Qt::Key_S);
|
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings);
|
||||||
|
|
||||||
addCheckableActionToQMenuAndActionHash(renderDebugMenu,
|
addCheckableActionToQMenuAndActionHash(renderDebugMenu,
|
||||||
MenuOption::CullSharedFaces,
|
MenuOption::CullSharedFaces,
|
||||||
|
@ -384,6 +384,14 @@ Menu::Menu() :
|
||||||
Qt::CTRL | Qt::SHIFT | Qt::Key_D,
|
Qt::CTRL | Qt::SHIFT | Qt::Key_D,
|
||||||
true);
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingStereoSource,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_S,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioSpatialProcessingHeadOriented,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_H,
|
||||||
|
true);
|
||||||
|
|
||||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter,
|
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::LowPassFilter,
|
||||||
Qt::CTRL | Qt::SHIFT | Qt::Key_F,
|
Qt::CTRL | Qt::SHIFT | Qt::Key_F,
|
||||||
false);
|
false);
|
||||||
|
|
|
@ -247,6 +247,8 @@ namespace MenuOption {
|
||||||
const QString AudioSpatialProcessingIncudeOriginal = "Audio Spatial Processing includes Original";
|
const QString AudioSpatialProcessingIncudeOriginal = "Audio Spatial Processing includes Original";
|
||||||
const QString AudioSpatialProcessingSeparateEars = "Audio Spatial Processing separates ears";
|
const QString AudioSpatialProcessingSeparateEars = "Audio Spatial Processing separates ears";
|
||||||
const QString AudioSpatialProcessingPreDelay = "Audio Spatial Processing add Pre-Delay";
|
const QString AudioSpatialProcessingPreDelay = "Audio Spatial Processing add Pre-Delay";
|
||||||
|
const QString AudioSpatialProcessingStereoSource = "Audio Spatial Processing Stereo Source";
|
||||||
|
const QString AudioSpatialProcessingHeadOriented = "Audio Spatial Processing Head Oriented";
|
||||||
const QString EchoServerAudio = "Echo Server Audio";
|
const QString EchoServerAudio = "Echo Server Audio";
|
||||||
const QString EchoLocalAudio = "Echo Local Audio";
|
const QString EchoLocalAudio = "Echo Local Audio";
|
||||||
const QString MuteAudio = "Mute Microphone";
|
const QString MuteAudio = "Mute Microphone";
|
||||||
|
|
Loading…
Reference in a new issue