remove old mode, replaced by new mode

This commit is contained in:
ZappoMan 2014-04-14 21:02:09 -07:00
parent 7448e0ab16
commit 1ed5512925
3 changed files with 44 additions and 524 deletions

View file

@ -35,7 +35,14 @@ AudioReflector::AudioReflector(QObject* parent) :
_diffusionRatio(DEFAULT_DIFFUSION_RATIO),
_withDiffusion(false)
{
reset();
_reflections = 0;
_diffusionPathCount = 0;
_averageAttenuation = 0.0f;
_maxAttenuation = 0.0f;
_minAttenuation = 0.0f;
_averageDelay = 0;
_maxDelay = 0;
_minDelay = 0;
}
@ -45,46 +52,27 @@ void AudioReflector::render() {
if (!_myAvatar || !_audio->getProcessSpatialAudio()) {
return;
}
bool withDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
// Even if we're not rendering, use this as a chance to recalculate our reflections
if (withDiffusions) {
newCalculateAllReflections();
} else {
calculateAllReflections();
}
// use this oportunity to calculate our reflections
calculateAllReflections();
// only render if we've been asked to do so
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)) {
// here's where we actually render
if (withDiffusions) {
newDrawRays();
} else {
drawRays();
}
drawRays();
}
}
// delay = 1ms per foot
// = 3ms per meter
// attenuation =
// BOUNCE_ATTENUATION_FACTOR [0.5] * (1/(1+distance))
float AudioReflector::getDelayFromDistance(float distance) {
float delay = (_soundMsPerMeter * distance);
// NOTE: kind of hacky, the old code (which didn't handle diffusions, assumes that this function
// will add in any and all pre delay. But the new method (which includes diffusions) handles pre delay
// on it's own. So we only add in pre delay if the pre delay is enabled, and we're not in diffusion mode
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) &&
!Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) {
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) {
delay += _preDelay;
}
return delay;
}
// attenuation = from the Audio Mixer
float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
const float DISTANCE_SCALE = 2.5f;
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
@ -103,14 +91,6 @@ float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
return distanceCoefficient;
}
// This is used in the "old" model with diffusions... it's essentially the amount of energy that is reflected on each bounce
float AudioReflector::getBounceAttenuationCoefficient(int bounceCount) {
// now we know the current attenuation for the "perfect" reflection case, but we now incorporate
// our surface materials to determine how much of this ray is absorbed, reflected, and diffused
SurfaceCharacteristics material = getSurfaceCharacteristics();
return powf(material.reflectiveRatio, bounceCount);
}
glm::vec3 AudioReflector::getFaceNormal(BoxFace face) {
bool wantSlightRandomness = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces);
@ -137,285 +117,10 @@ glm::vec3 AudioReflector::getFaceNormal(BoxFace face) {
return faceNormal;
}
void AudioReflector::reset() {
_reflections = 0;
_diffusionPathCount = 0;
_averageAttenuation = 0.0f;
_maxAttenuation = 0.0f;
_minAttenuation = 0.0f;
_averageDelay = 0;
_maxDelay = 0;
_minDelay = 0;
_reflections = _frontRightUpReflections.size() +
_frontLeftUpReflections.size() +
_backRightUpReflections.size() +
_backLeftUpReflections.size() +
_frontRightDownReflections.size() +
_frontLeftDownReflections.size() +
_backRightDownReflections.size() +
_backLeftDownReflections.size() +
_frontReflections.size() +
_backReflections.size() +
_leftReflections.size() +
_rightReflections.size() +
_upReflections.size() +
_downReflections.size();
}
void AudioReflector::calculateAllReflections() {
// only recalculate when we've moved...
// TODO: what about case where new voxels are added in front of us???
bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation();
glm::vec3 origin = _myAvatar->getHead()->getPosition();
glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition();
bool withDiffusion = false; // this is the non-diffusion mode.
bool shouldRecalc = _reflections == 0
|| !isSimilarPosition(origin, _origin)
|| !isSimilarOrientation(orientation, _orientation)
|| !isSimilarPosition(listenerPosition, _listenerPosition)
|| (withDiffusion != _withDiffusion);
if (shouldRecalc) {
qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!";
if (_reflections == 0) {
qDebug() << "RECALC...... No reflections!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
}
if (!isSimilarPosition(origin, _origin)) {
qDebug() << "RECALC...... origin changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
}
if (!isSimilarOrientation(orientation, _orientation)) {
qDebug() << "RECALC...... orientation changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
}
if (!isSimilarPosition(listenerPosition, _listenerPosition)) {
qDebug() << "RECALC...... listenerPosition changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
}
if (withDiffusion != _withDiffusion) {
qDebug() << "RECALC...... withDiffusion changed...!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
}
QMutexLocker locker(&_mutex);
quint64 start = usecTimestampNow();
_origin = origin;
_listenerPosition = listenerPosition;
_orientation = orientation;
_withDiffusion = withDiffusion;
glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT);
glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP);
glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT);
glm::vec3 left = -right;
glm::vec3 down = -up;
glm::vec3 back = -front;
glm::vec3 frontRightUp = glm::normalize(front + right + up);
glm::vec3 frontLeftUp = glm::normalize(front + left + up);
glm::vec3 backRightUp = glm::normalize(back + right + up);
glm::vec3 backLeftUp = glm::normalize(back + left + up);
glm::vec3 frontRightDown = glm::normalize(front + right + down);
glm::vec3 frontLeftDown = glm::normalize(front + left + down);
glm::vec3 backRightDown = glm::normalize(back + right + down);
glm::vec3 backLeftDown = glm::normalize(back + left + down);
_rightReflections = calculateReflections(listenerPosition, _origin, right);
_frontRightUpReflections = calculateReflections(listenerPosition, _origin, frontRightUp);
_frontLeftUpReflections = calculateReflections(listenerPosition, _origin, frontLeftUp);
_backRightUpReflections = calculateReflections(listenerPosition, _origin, backRightUp);
_backLeftUpReflections = calculateReflections(listenerPosition, _origin, backLeftUp);
_frontRightDownReflections = calculateReflections(listenerPosition, _origin, frontRightDown);
_frontLeftDownReflections = calculateReflections(listenerPosition, _origin, frontLeftDown);
_backRightDownReflections = calculateReflections(listenerPosition, _origin, backRightDown);
_backLeftDownReflections = calculateReflections(listenerPosition, _origin, backLeftDown);
_frontReflections = calculateReflections(listenerPosition, _origin, front);
_backReflections = calculateReflections(listenerPosition, _origin, back);
_leftReflections = calculateReflections(listenerPosition, _origin, left);
_upReflections = calculateReflections(listenerPosition, _origin, up);
_downReflections = calculateReflections(listenerPosition, _origin, down);
quint64 end = usecTimestampNow();
reset();
const bool wantDebugging = false;
if (wantDebugging) {
qDebug() << "calculateAllReflections() elapsed=" << (end - start);
}
}
}
QVector<glm::vec3> AudioReflector::calculateReflections(const glm::vec3& earPosition,
const glm::vec3& origin, const glm::vec3& originalDirection) {
QVector<glm::vec3> reflectionPoints;
glm::vec3 start = origin;
glm::vec3 direction = originalDirection;
OctreeElement* elementHit;
float distance;
BoxFace face;
const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point
float currentAttenuation = 1.0f;
float totalDistance = 0.0f;
float totalDelay = 0.0f;
int bounceCount = 1;
while (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS && bounceCount < ABSOLUTE_MAXIMUM_BOUNCE_COUNT) {
if (_voxels->findRayIntersection(start, direction, elementHit, distance, face, Octree::Lock)) {
glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT));
totalDistance += glm::distance(start, end);
float earDistance = glm::distance(end, earPosition);
float totalDistance = earDistance + distance;
totalDelay = getDelayFromDistance(totalDistance);
currentAttenuation = getDistanceAttenuationCoefficient(totalDistance) *
getBounceAttenuationCoefficient(bounceCount);
if (currentAttenuation > MINIMUM_ATTENUATION_TO_REFLECT && totalDelay < MAXIMUM_DELAY_MS) {
reflectionPoints.push_back(end);
glm::vec3 faceNormal = getFaceNormal(face);
direction = glm::normalize(glm::reflect(direction,faceNormal));
start = end;
bounceCount++;
}
} else {
currentAttenuation = 0.0f;
}
}
return reflectionPoints;
}
void AudioReflector::drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector<glm::vec3>& reflections) {
glm::vec3 start = origin;
glm::vec3 color = originalColor;
const float COLOR_ADJUST_PER_BOUNCE = 0.75f;
foreach (glm::vec3 end, reflections) {
drawVector(start, end, color);
start = end;
color = color * COLOR_ADJUST_PER_BOUNCE;
}
}
// set up our buffers for our attenuated and delayed samples
const int NUMBER_OF_CHANNELS = 2;
void AudioReflector::echoReflections(const glm::vec3& origin, const QVector<glm::vec3>& reflections, const QByteArray& samples,
unsigned int sampleTime, int sampleRate) {
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource);
glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() :
_myAvatar->getHead()->getPosition();
glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() :
_myAvatar->getHead()->getPosition();
glm::vec3 start = origin;
int totalNumberOfSamples = samples.size() / sizeof(int16_t);
int totalNumberOfStereoSamples = samples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS);
const int16_t* originalSamplesData = (const int16_t*)samples.constData();
QByteArray attenuatedLeftSamples;
QByteArray attenuatedRightSamples;
attenuatedLeftSamples.resize(samples.size());
attenuatedRightSamples.resize(samples.size());
int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data();
int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data();
float rightDistance = 0;
float leftDistance = 0;
int bounceCount = 0;
foreach (glm::vec3 end, reflections) {
bounceCount++;
rightDistance += glm::distance(start, end);
leftDistance += glm::distance(start, end);
// calculate the distance to the ears
float rightEarDistance = glm::distance(end, rightEarPosition);
float leftEarDistance = glm::distance(end, leftEarPosition);
float rightTotalDistance = rightEarDistance + rightDistance;
float leftTotalDistance = leftEarDistance + leftDistance;
float rightEarDelayMsecs = getDelayFromDistance(rightTotalDistance);
float leftEarDelayMsecs = getDelayFromDistance(leftTotalDistance);
_totalDelay += rightEarDelayMsecs + leftEarDelayMsecs;
_delayCount += 2;
_maxDelay = std::max(_maxDelay,rightEarDelayMsecs);
_maxDelay = std::max(_maxDelay,leftEarDelayMsecs);
_minDelay = std::min(_minDelay,rightEarDelayMsecs);
_minDelay = std::min(_minDelay,leftEarDelayMsecs);
int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
//qDebug() << "leftTotalDistance=" << leftTotalDistance << "rightTotalDistance=" << rightTotalDistance;
//qDebug() << "leftEarDelay=" << leftEarDelay << "rightEarDelay=" << rightEarDelay;
float bounceAttenuation = getBounceAttenuationCoefficient(bounceCount);
float rightEarAttenuation = getDistanceAttenuationCoefficient(rightTotalDistance) * bounceAttenuation;
float leftEarAttenuation = getDistanceAttenuationCoefficient(leftTotalDistance) * bounceAttenuation;
/*
qDebug() << "audible point...";
qDebug() << " bounceCount=" << bounceCount;
qDebug() << " bounceAttenuation=" << bounceAttenuation;
qDebug() << " rightEarAttenuation=" << rightEarAttenuation;
qDebug() << " leftEarAttenuation=" << leftEarAttenuation;
*/
_totalAttenuation += rightEarAttenuation + leftEarAttenuation;
_attenuationCount += 2;
_maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation);
_maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation);
_minAttenuation = std::min(_minAttenuation,rightEarAttenuation);
_minAttenuation = std::min(_minAttenuation,leftEarAttenuation);
// run through the samples, and attenuate them
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
int16_t rightSample = leftSample;
if (wantStereo) {
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
}
//qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample;
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
//qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation);
}
// now inject the attenuated array with the appropriate delay
unsigned int sampleTimeLeft = sampleTime + leftEarDelay;
unsigned int sampleTimeRight = sampleTime + rightEarDelay;
//qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight;
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
start = end;
}
}
void AudioReflector::injectAudiblePoint(const AudioPoint& audiblePoint,
void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint,
const QByteArray& samples, unsigned int sampleTime, int sampleRate) {
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
@ -444,13 +149,6 @@ void AudioReflector::injectAudiblePoint(const AudioPoint& audiblePoint,
float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay;
float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay;
/*
qDebug() << "injectAudiblePoint()... ";
qDebug() << " audiblePoint.delay=" << audiblePoint.delay;
qDebug() << " rightEarDelayMsecs=" << rightEarDelayMsecs;
qDebug() << " leftEarDelayMsecs=" << leftEarDelayMsecs;
*/
_totalDelay += rightEarDelayMsecs + leftEarDelayMsecs;
_delayCount += 2;
_maxDelay = std::max(_maxDelay,rightEarDelayMsecs);
@ -464,13 +162,6 @@ qDebug() << " leftEarDelayMsecs=" << leftEarDelayMsecs;
float rightEarAttenuation = audiblePoint.attenuation * getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance);
float leftEarAttenuation = audiblePoint.attenuation * getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance);
/*
qDebug() << "audible point...";
qDebug() << " audiblePoint.attenuation=" << audiblePoint.attenuation;
qDebug() << " rightEarAttenuation=" << rightEarAttenuation;
qDebug() << " leftEarAttenuation=" << leftEarAttenuation;
*/
_totalAttenuation += rightEarAttenuation + leftEarAttenuation;
_attenuationCount += 2;
_maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation);
@ -486,23 +177,17 @@ qDebug() << " leftEarDelayMsecs=" << leftEarDelayMsecs;
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
}
//qDebug() << "leftSample=" << leftSample << "rightSample=" << rightSample;
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
//qDebug() << "attenuated... leftSample=" << (leftSample * leftEarAttenuation) << "rightSample=" << (rightSample * rightEarAttenuation);
}
// now inject the attenuated array with the appropriate delay
unsigned int sampleTimeLeft = sampleTime + leftEarDelay;
unsigned int sampleTimeRight = sampleTime + rightEarDelay;
//qDebug() << "sampleTimeLeft=" << sampleTimeLeft << "sampleTimeRight=" << sampleTimeRight;
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
}
@ -511,14 +196,10 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray
}
void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions)) {
newEchoAudio(sampleTime, samples, format);
} else {
oldEchoAudio(sampleTime, samples, format);
}
echoAudio(sampleTime, samples, format);
}
void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
void AudioReflector::echoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
_maxDelay = 0;
_maxAttenuation = 0.0f;
_minDelay = std::numeric_limits<int>::max();
@ -530,7 +211,7 @@ void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& sam
QMutexLocker locker(&_mutex);
foreach(const AudioPoint& audiblePoint, _audiblePoints) {
foreach(const AudiblePoint& audiblePoint, _audiblePoints) {
injectAudiblePoint(audiblePoint, samples, sampleTime, format.sampleRate());
}
@ -545,84 +226,8 @@ void AudioReflector::newEchoAudio(unsigned int sampleTime, const QByteArray& sam
}
}
void AudioReflector::oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
//quint64 start = usecTimestampNow();
_maxDelay = 0;
_maxAttenuation = 0.0f;
_minDelay = std::numeric_limits<int>::max();
_minAttenuation = std::numeric_limits<float>::max();
_totalDelay = 0.0f;
_delayCount = 0;
_totalAttenuation = 0.0f;
_attenuationCount = 0;
QMutexLocker locker(&_mutex);
echoReflections(_origin, _frontRightUpReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _frontLeftUpReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _backRightUpReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _backLeftUpReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _frontRightDownReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _frontLeftDownReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _backRightDownReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _backLeftDownReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _frontReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _backReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _leftReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _rightReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _upReflections, samples, sampleTime, format.sampleRate());
echoReflections(_origin, _downReflections, samples, sampleTime, format.sampleRate());
_averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount;
_averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount;
_reflections = _frontRightUpReflections.size() +
_frontLeftUpReflections.size() +
_backRightUpReflections.size() +
_backLeftUpReflections.size() +
_frontRightDownReflections.size() +
_frontLeftDownReflections.size() +
_backRightDownReflections.size() +
_backLeftDownReflections.size() +
_frontReflections.size() +
_backReflections.size() +
_leftReflections.size() +
_rightReflections.size() +
_upReflections.size() +
_downReflections.size();
_diffusionPathCount = 0;
if (_reflections == 0) {
_minDelay = 0.0f;
_minAttenuation = 0.0f;
}
}
void AudioReflector::drawRays() {
const glm::vec3 RED(1,0,0);
QMutexLocker locker(&_mutex);
drawReflections(_origin, RED, _frontRightUpReflections);
drawReflections(_origin, RED, _frontLeftUpReflections);
drawReflections(_origin, RED, _backRightUpReflections);
drawReflections(_origin, RED, _backLeftUpReflections);
drawReflections(_origin, RED, _frontRightDownReflections);
drawReflections(_origin, RED, _frontLeftDownReflections);
drawReflections(_origin, RED, _backRightDownReflections);
drawReflections(_origin, RED, _backLeftDownReflections);
drawReflections(_origin, RED, _frontReflections);
drawReflections(_origin, RED, _backReflections);
drawReflections(_origin, RED, _leftReflections);
drawReflections(_origin, RED, _rightReflections);
drawReflections(_origin, RED, _upReflections);
drawReflections(_origin, RED, _downReflections);
}
void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) {
glDisable(GL_LIGHTING); // ??
glDisable(GL_LIGHTING);
glLineWidth(2.0);
// Draw the vector itself
@ -632,7 +237,7 @@ void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, co
glVertex3f(end.x, end.y, end.z);
glEnd();
glEnable(GL_LIGHTING); // ??
glEnable(GL_LIGHTING);
}
@ -657,7 +262,6 @@ AudioPath::AudioPath(const glm::vec3& origin, const glm::vec3& direction,
{
}
void AudioReflector::addSoundSource(const glm::vec3& origin, const glm::vec3& initialDirection,
float initialAttenuation, float initialDelay, float initialDistance) {
@ -665,7 +269,7 @@ void AudioReflector::addSoundSource(const glm::vec3& origin, const glm::vec3& in
_audioPaths.push_back(path);
}
void AudioReflector::newCalculateAllReflections() {
void AudioReflector::calculateAllReflections() {
// only recalculate when we've moved...
// TODO: what about case where new voxels are added in front of us???
bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
@ -673,19 +277,15 @@ void AudioReflector::newCalculateAllReflections() {
glm::vec3 origin = _myAvatar->getHead()->getPosition();
glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition();
bool withDiffusion = true; // this is the diffusion mode.
bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
// _audiblePoints.size() == 0 ??
bool shouldRecalc = _reflections == 0
bool shouldRecalc = _reflections == 0
|| !isSimilarPosition(origin, _origin)
|| !isSimilarOrientation(orientation, _orientation)
|| !isSimilarPosition(listenerPosition, _listenerPosition)
|| (withDiffusion != _withDiffusion);
if (shouldRecalc) {
qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!";
QMutexLocker locker(&_mutex);
quint64 start = usecTimestampNow();
_origin = origin;
@ -701,7 +301,7 @@ qDebug() << "RECALC...... !!!!!!!!!!!!!!!!!!!!!!!!!!!!";
}
}
void AudioReflector::newDrawRays() {
void AudioReflector::drawRays() {
const glm::vec3 RED(1,0,0);
const glm::vec3 GREEN(0,1,0);
@ -805,8 +405,8 @@ int AudioReflector::analyzePathsSingleStep() {
int activePaths = 0;
foreach(AudioPath* const& path, _audioPaths) {
bool wantExtraDebuggging = false;
bool isDiffusion = (path->startPoint != _origin);
//bool wantExtraDebuggging = false;
//bool isDiffusion = (path->startPoint != _origin);
glm::vec3 start = path->lastPoint;
glm::vec3 direction = path->lastDirection;
@ -823,22 +423,10 @@ int AudioReflector::analyzePathsSingleStep() {
if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) {
path->finalized = true;
if (wantExtraDebuggging && isDiffusion) {
qDebug() << "diffusion bounceCount too high!";
}
} else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face, Octree::Lock)) {
glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT));
pathDistance += glm::distance(start, end);
if (wantExtraDebuggging) {
qDebug() << "ray intersection... "
<< " startPoint=[" << path->startPoint.x << "," << path->startPoint.y << "," << path->startPoint.z << "]"
<< " bouceCount= " << path->bounceCount
<< " end=[" << end.x << "," << end.y << "," << end.z << "]"
<< " pathDistance=" << pathDistance;
}
// We aren't using this... should we be????
float toListenerDistance = glm::distance(end, _listenerPosition);
@ -846,16 +434,17 @@ int AudioReflector::analyzePathsSingleStep() {
// adjust our current delay by just the delay from the most recent ray
currentDelay += getDelayFromDistance(distance);
// adjust our previous attenuation based on the distance traveled in last ray
//float distanceAttenuation = getDistanceAttenuationCoefficient(pathDistance);
// now we know the current attenuation for the "perfect" reflection case, but we now incorporate
// our surface materials to determine how much of this ray is absorbed, reflected, and diffused
SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit);
float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio;
float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio;
float partialDiffusionAttenuation = _diffusionFanout < 1 ? 0.0f : totalDiffusionAttenuation / _diffusionFanout;
bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
int fanout = wantDiffusions ? _diffusionFanout : 0;
float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / fanout;
// total delay includes the bounce back to listener
float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance);
@ -867,7 +456,7 @@ int AudioReflector::analyzePathsSingleStep() {
&& totalDelay < MAXIMUM_DELAY_MS) {
// diffusions fan out from random places on the semisphere of the collision point
for(int i = 0; i < _diffusionFanout; i++) {
for(int i = 0; i < fanout; i++) {
glm::vec3 diffusion;
float randomness = randFloatInRange(0.5f,1.0f);
@ -891,15 +480,6 @@ int AudioReflector::analyzePathsSingleStep() {
diffusion = glm::normalize(diffusion);
if (wantExtraDebuggging) {
qDebug() << "DIFFUSION... addSoundSource()... " <<
" partialDiffusionAttenuation=" << partialDiffusionAttenuation << "\n" <<
" MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT << "\n" <<
" direction=[" << direction.x << "," << direction.y << "," << direction.z << "]\n" <<
" diffusion=[" << diffusion.x << "," << diffusion.y << "," << diffusion.z << "]\n" <<
" end=[" << end.x << "," << end.y << "," << end.z << "]";
}
// add sound sources for these diffusions
addSoundSource(end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance);
}
@ -907,16 +487,6 @@ int AudioReflector::analyzePathsSingleStep() {
// if our reflective attenuation is above our minimum, then add our reflection point and
// allow our path to continue
if (wantExtraDebuggging && isDiffusion) {
qDebug() << "checking diffusion";
qDebug() << "reflectiveAttenuation=" << reflectiveAttenuation;
qDebug() << "totalDiffusionAttenuation=" << totalDiffusionAttenuation;
qDebug() << "toListenerAttenuation=" << toListenerAttenuation;
qDebug() << "(reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation);
}
// we used to use... ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT
if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT
&& totalDelay < MAXIMUM_DELAY_MS) {
@ -924,18 +494,8 @@ int AudioReflector::analyzePathsSingleStep() {
// NOTE: we add the delay to the audible point, not back to the listener. The additional delay
// and attenuation to the listener is recalculated at the point where we actually inject the
// audio so that it can be adjusted to ear position
AudioPoint point = { end, currentDelay,
(reflectiveAttenuation + totalDiffusionAttenuation),
pathDistance};
AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance};
/*
qDebug() << "audible point...";
qDebug() << " reflectiveAttenuation=" << reflectiveAttenuation;
qDebug() << " toListenerAttenuation=" << toListenerAttenuation;
qDebug() << " likely attenuation=" << (reflectiveAttenuation * toListenerAttenuation);
qDebug() << " totalDiffusionAttenuation=" << totalDiffusionAttenuation;
*/
_audiblePoints.push_back(point);
// add this location to the path points, so we can visualize it

View file

@ -36,7 +36,7 @@ public:
QVector<glm::vec3> reflections;
};
class AudioPoint {
class AudiblePoint {
public:
glm::vec3 location;
float delay; // includes total delay including pre delay to the point of the audible location, not to the listener's ears
@ -96,35 +96,11 @@ private:
Audio* _audio; // access to audio API
// Helpers for drawing
void drawRays();
void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color);
// OLD helper for playing audio
void echoReflections(const glm::vec3& origin, const QVector<glm::vec3>& reflections, const QByteArray& samples,
unsigned int sampleTime, int sampleRate);
// OLD helper for calculating reflections
QVector<glm::vec3> calculateReflections(const glm::vec3& earPosition, const glm::vec3& origin, const glm::vec3& originalDirection);
void calculateDiffusions(const glm::vec3& earPosition, const glm::vec3& origin,
const glm::vec3& thisReflection, float thisDistance, float thisAttenuation, int thisBounceCount,
BoxFace thisReflectionFace, QVector<glm::vec3> reflectionPoints);
// OLD helper for drawing refections
void drawReflections(const glm::vec3& origin, const glm::vec3& originalColor, const QVector<glm::vec3>& reflections);
// OLD helper for calculating reflections
void calculateAllReflections();
// resets statistics
void reset();
// helper for generically calculating attenuation based on distance
float getDistanceAttenuationCoefficient(float distance);
// helper for generically calculating attenuation based on bounce count, used in old/non-diffusion mode
float getBounceAttenuationCoefficient(int bounceCount);
// statistics
int _reflections;
int _diffusionPathCount;
@ -143,23 +119,6 @@ private:
glm::vec3 _origin;
glm::quat _orientation;
// old way of doing this...
QVector<glm::vec3> _frontRightUpReflections;
QVector<glm::vec3> _frontLeftUpReflections;
QVector<glm::vec3> _backRightUpReflections;
QVector<glm::vec3> _backLeftUpReflections;
QVector<glm::vec3> _frontRightDownReflections;
QVector<glm::vec3> _frontLeftDownReflections;
QVector<glm::vec3> _backRightDownReflections;
QVector<glm::vec3> _backLeftDownReflections;
QVector<glm::vec3> _frontReflections;
QVector<glm::vec3> _backReflections;
QVector<glm::vec3> _leftReflections;
QVector<glm::vec3> _rightReflections;
QVector<glm::vec3> _upReflections;
QVector<glm::vec3> _downReflections;
// NOTE: Here's the new way, we will have an array of AudioPaths, we will loop on all of our currently calculating audio
// paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it
// is considered finalized.
@ -168,7 +127,7 @@ private:
// fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation
// of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop.
QVector<AudioPath*> _audioPaths;
QVector<AudioPoint> _audiblePoints;
QVector<AudiblePoint> _audiblePoints;
// adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties,
// as well as diffusion sound sources
@ -178,15 +137,14 @@ private:
// helper that handles audioPath analysis
int analyzePathsSingleStep();
void analyzePaths();
void newDrawRays();
void drawRays();
void drawPath(AudioPath* path, const glm::vec3& originalColor);
void newCalculateAllReflections();
void calculateAllReflections();
int countDiffusionPaths();
glm::vec3 getFaceNormal(BoxFace face);
void injectAudiblePoint(const AudioPoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
void oldEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
void newEchoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
void injectAudiblePoint(const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
void echoAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
// return the surface characteristics of the element we hit
SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit = NULL);

View file

@ -513,14 +513,16 @@ void Stats::display(
// add some reflection stats
char reflectionsStatus[128];
sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s",
sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s, Normals: %s",
audioReflector->getReflections(),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal)
? "included" : "silent"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)
? "two" : "one"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource)
? "stereo" : "mono")
? "stereo" : "mono"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces)
? "random" : "regular")
);
verticalOffset += STATS_PELS_PER_LINE;