remove spatial audio menu items

This commit is contained in:
ZappoMan 2014-11-24 13:07:22 -08:00
parent 137642f8a5
commit 08772bbae0
5 changed files with 55 additions and 106 deletions

View file

@ -989,7 +989,8 @@ void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& ou
QByteArray buffer = inputBuffer;
// Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
bool includeOriginal = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)
if (includeOriginal) {
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
}
@ -1264,7 +1265,8 @@ void Audio::selectAudioSourceSine440() {
}
void Audio::toggleAudioSpatialProcessing() {
_processSpatialAudio = !_processSpatialAudio;
// spatial audio disabled for now
_processSpatialAudio = false; //!_processSpatialAudio;
if (_processSpatialAudio) {
_spatialAudioStart = 0;
_spatialAudioFinish = 0;

View file

@ -66,10 +66,15 @@ AudioReflector::AudioReflector(QObject* parent) :
}
bool AudioReflector::haveAttributesChanged() {
bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
bool dontDistanceAttenuate = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
bool alternateDistanceAttenuate = Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
// Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
bool withDiffusion = true;
// Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
bool dontDistanceAttenuate = false;
//Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
bool alternateDistanceAttenuate = false;
bool attributesChange = (_withDiffusion != withDiffusion
|| _lastPreDelay != _preDelay
@ -107,7 +112,8 @@ void AudioReflector::render() {
calculateAllReflections();
// only render if we've been asked to do so
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)) {
bool renderPaths = false; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)
if (renderPaths) {
drawRays();
}
}
@ -116,7 +122,8 @@ void AudioReflector::render() {
// = 3ms per meter
float AudioReflector::getDelayFromDistance(float distance) {
float delay = (_soundMsPerMeter * distance);
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) {
bool includePreDelay = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)
if (includePreDelay) {
delay += _preDelay;
}
return delay;
@ -126,12 +133,11 @@ float AudioReflector::getDelayFromDistance(float distance) {
float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
bool doDistanceAttenuation = !Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
//!Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
bool doDistanceAttenuation = true;
bool originalFormula = !Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
//!Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
bool originalFormula = true;
float distanceCoefficient = 1.0f;
@ -170,7 +176,8 @@ float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
}
glm::vec3 AudioReflector::getFaceNormal(BoxFace face) {
bool wantSlightRandomness = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces);
// Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces);
bool wantSlightRandomness = true;
glm::vec3 faceNormal;
const float MIN_RANDOM_LENGTH = 0.99f;
const float MAX_RANDOM_LENGTH = 1.0f;
@ -202,8 +209,8 @@ const int NUMBER_OF_CHANNELS = 2;
void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint,
const QByteArray& samples, unsigned int sampleTime, int sampleRate) {
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource);
bool wantEarSeparation = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
bool wantStereo = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource);
glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() :
_myAvatar->getHead()->getPosition();
glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() :
@ -316,7 +323,8 @@ void AudioReflector::preProcessOriginalInboundAudio(unsigned int sampleTime,
}
void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) {
bool processLocalAudio = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)
if (processLocalAudio) {
const int NUM_CHANNELS_INPUT = 1;
const int NUM_CHANNELS_OUTPUT = 2;
const int EXPECTED_SAMPLE_RATE = 24000;
@ -458,7 +466,7 @@ void AudioReflector::identifyAudioSources() {
void AudioReflector::calculateAllReflections() {
// only recalculate when we've moved, or if the attributes have changed
// TODO: what about case where new voxels are added in front of us???
bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
bool wantHeadOrientation = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientationInWorldFrame() : _myAvatar->getOrientation();
glm::vec3 origin = _myAvatar->getHead()->getPosition();
glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition();
@ -505,7 +513,8 @@ void AudioReflector::drawRays() {
}
}
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) {
bool processLocalAudio = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)
if (processLocalAudio) {
// draw the paths for local audio
foreach(AudioPath* const& path, _localAudioPaths) {
// if this is an original reflection, draw it in RED
@ -575,7 +584,8 @@ void AudioReflector::analyzePaths() {
float initialAttenuation = 1.0f;
float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? _preDelay : 0.0f;
bool wantPreDelay = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)
float preDelay = wantPreDelay ? _preDelay : 0.0f;
// NOTE: we're still calculating our initial paths based on the listeners position. But the analysis code has been
// updated to support individual sound sources (which is how we support diffusion), we can use this new paradigm to
@ -701,7 +711,7 @@ void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElem
float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio;
float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio;
bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
bool wantDiffusions = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
int fanout = wantDiffusions ? _diffusionFanout : 0;
float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / (float)fanout;

View file

@ -620,58 +620,6 @@ Menu::Menu() :
audioScopeFramesGroup->addAction(fiftyFrames);
}
QMenu* spatialAudioMenu = audioDebugMenu->addMenu("Spatial Audio");
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessing,
Qt::CTRL | Qt::SHIFT | Qt::Key_M,
false,
appInstance->getAudio(),
SLOT(toggleAudioSpatialProcessing()));
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingIncludeOriginal,
Qt::CTRL | Qt::SHIFT | Qt::Key_O,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSeparateEars,
Qt::CTRL | Qt::SHIFT | Qt::Key_E,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingPreDelay,
Qt::CTRL | Qt::SHIFT | Qt::Key_D,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingStereoSource,
Qt::CTRL | Qt::SHIFT | Qt::Key_S,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingHeadOriented,
Qt::CTRL | Qt::SHIFT | Qt::Key_H,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingWithDiffusions,
Qt::CTRL | Qt::SHIFT | Qt::Key_W,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingRenderPaths,
Qt::CTRL | Qt::SHIFT | Qt::Key_R,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces,
Qt::CTRL | Qt::SHIFT | Qt::Key_X,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingProcessLocalAudio,
Qt::CTRL | Qt::SHIFT | Qt::Key_A,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingDontDistanceAttenuate,
Qt::CTRL | Qt::SHIFT | Qt::Key_Y,
false);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate,
Qt::CTRL | Qt::SHIFT | Qt::Key_U,
false);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStats,
Qt::CTRL | Qt::Key_A,
false,

View file

@ -341,18 +341,6 @@ namespace MenuOption {
const QString AudioScopeTwentyFrames = "Twenty";
const QString AudioStats = "Audio Stats";
const QString AudioStatsShowInjectedStreams = "Audio Stats Show Injected Streams";
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
const QString AudioSpatialProcessing = "Audio Spatial Processing";
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
const QString AudioSpatialProcessingHeadOriented = "Head Oriented";
const QString AudioSpatialProcessingIncludeOriginal = "Includes Network Original";
const QString AudioSpatialProcessingPreDelay = "Add Pre-Delay";
const QString AudioSpatialProcessingProcessLocalAudio = "Process Local Audio";
const QString AudioSpatialProcessingRenderPaths = "Render Paths";
const QString AudioSpatialProcessingSeparateEars = "Separate Ears";
const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces";
const QString AudioSpatialProcessingStereoSource = "Stereo Source";
const QString AudioSpatialProcessingWithDiffusions = "With Diffusions";
const QString AudioSourceInject = "Generated Audio";
const QString AudioSourcePinkNoise = "Pink Noise";
const QString AudioSourceSine440 = "Sine 440hz";

View file

@ -460,7 +460,8 @@ void Stats::display(
VoxelSystem* voxels = Application::getInstance()->getVoxels();
lines = _expanded ? 14 : 3;
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
bool wantSpatialProcessing = false; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)
if (_expanded && wantSpatialProcessing) {
lines += 10; // spatial audio processing adds 1 spacing line and 8 extra lines of info
}
@ -652,7 +653,7 @@ void Stats::display(
drawText(horizontalOffset, verticalOffset, scale, rotation, font, (char*)voxelStats.str().c_str(), color);
}
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
if (_expanded && wantSpatialProcessing) {
verticalOffset += STATS_PELS_PER_LINE; // space one line...
const AudioReflector* audioReflector = Application::getInstance()->getAudioReflector();
@ -660,23 +661,24 @@ void Stats::display(
// add some reflection stats
char reflectionsStatus[128];
bool includeOriginal = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)
bool separateEars = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)
bool stereoSource = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource)
bool randomSurfaces = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces)
sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s, Normals: %s",
audioReflector->getReflections(),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)
? "included" : "silent"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)
? "two" : "one"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource)
? "stereo" : "mono"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces)
? "random" : "regular")
(includeOriginal ? "included" : "silent"),
(separateEars ? "two" : "one"),
(stereoSource ? "stereo" : "mono"),
(randomSurfaces ? "random" : "regular")
);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ?
audioReflector->getPreDelay() : 0.0f;
bool wantPreDelay = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)
float preDelay = wantPreDelay ? audioReflector->getPreDelay() : 0.0f;
sprintf(reflectionsStatus, "Delay: pre: %6.3f, average %6.3f, max %6.3f, min %6.3f, speed: %6.3f",
preDelay,
@ -688,12 +690,12 @@ void Stats::display(
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
bool distanceAttenuationDisabled = Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
bool alternateDistanceAttenuationEnabled = Menu::getInstance()->isOptionChecked(
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
//Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
bool distanceAttenuationDisabled = false;
// Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
bool alternateDistanceAttenuationEnabled = false;
sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, %s: %5.3f",
audioReflector->getAverageAttenuation(),
@ -706,15 +708,14 @@ void Stats::display(
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
sprintf(reflectionsStatus, "Local Audio: %s Attenuation: %5.3f",
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)
? "yes" : "no"),
bool localAudio = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio);
sprintf(reflectionsStatus, "Local Audio: %s Attenuation: %5.3f", (localAudio ? "yes" : "no"),
audioReflector->getLocalAudioAttenuationFactor());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
bool diffusionEnabled = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
bool diffusionEnabled = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
int fanout = diffusionEnabled ? audioReflector->getDiffusionFanout() : 0;
int diffusionPaths = diffusionEnabled ? audioReflector->getDiffusionPathCount() : 0;
sprintf(reflectionsStatus, "Diffusion: %s, Fanout: %d, Paths: %d",