mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-08-03 22:53:10 +02:00
implement support for comb filter suppression window
This commit is contained in:
parent
b00a67cb57
commit
2e0c5fc81b
4 changed files with 167 additions and 46 deletions
|
@ -18,6 +18,7 @@ var localFactorScale = 1.0;
|
|||
var reflectiveScale = 100.0;
|
||||
var diffusionScale = 100.0;
|
||||
var absorptionScale = 100.0;
|
||||
var combFilterScale = 50.0;
|
||||
|
||||
// these three properties are bound together, if you change one, the others will also change
|
||||
var reflectiveRatio = AudioReflector.getReflectiveRatio();
|
||||
|
@ -263,6 +264,46 @@ var localFactorThumb = Overlays.addOverlay("image", {
|
|||
alpha: 1
|
||||
});
|
||||
|
||||
var combFilterY = topY;
|
||||
topY += sliderHeight;
|
||||
|
||||
var combFilterLabel = Overlays.addOverlay("text", {
|
||||
x: 40,
|
||||
y: combFilterY,
|
||||
width: 60,
|
||||
height: sliderHeight,
|
||||
color: { red: 0, green: 0, blue: 0},
|
||||
textColor: { red: 255, green: 255, blue: 255},
|
||||
topMargin: 6,
|
||||
leftMargin: 5,
|
||||
text: "Comb Filter\nWindow:"
|
||||
});
|
||||
|
||||
|
||||
var combFilterSlider = Overlays.addOverlay("image", {
|
||||
// alternate form of expressing bounds
|
||||
bounds: { x: 100, y: combFilterY, width: 150, height: sliderHeight},
|
||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||
color: { red: 255, green: 255, blue: 255},
|
||||
alpha: 1
|
||||
});
|
||||
|
||||
|
||||
var combFilterMinThumbX = 110;
|
||||
var combFilterMaxThumbX = combFilterMinThumbX + 110;
|
||||
var combFilterThumbX = combFilterMinThumbX + ((combFilterMaxThumbX - combFilterMinThumbX) * (AudioReflector.getCombFilterWindow() / combFilterScale));
|
||||
var combFilterThumb = Overlays.addOverlay("image", {
|
||||
x: combFilterThumbX,
|
||||
y: combFilterY+9,
|
||||
width: 18,
|
||||
height: 17,
|
||||
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||
color: { red: 128, green: 128, blue: 0},
|
||||
alpha: 1
|
||||
});
|
||||
|
||||
|
||||
var reflectiveY = topY;
|
||||
topY += sliderHeight;
|
||||
|
||||
|
@ -387,6 +428,10 @@ function scriptEnding() {
|
|||
Overlays.deleteOverlay(factorThumb);
|
||||
Overlays.deleteOverlay(factorSlider);
|
||||
|
||||
Overlays.deleteOverlay(combFilterLabel);
|
||||
Overlays.deleteOverlay(combFilterThumb);
|
||||
Overlays.deleteOverlay(combFilterSlider);
|
||||
|
||||
Overlays.deleteOverlay(localFactorLabel);
|
||||
Overlays.deleteOverlay(localFactorThumb);
|
||||
Overlays.deleteOverlay(localFactorSlider);
|
||||
|
@ -433,6 +478,7 @@ var movingSliderDelay = false;
|
|||
var movingSliderFanout = false;
|
||||
var movingSliderSpeed = false;
|
||||
var movingSliderFactor = false;
|
||||
var movingSliderCombFilter = false;
|
||||
var movingSliderLocalFactor = false;
|
||||
var movingSliderReflective = false;
|
||||
var movingSliderDiffusion = false;
|
||||
|
@ -488,6 +534,18 @@ function mouseMoveEvent(event) {
|
|||
var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale;
|
||||
AudioReflector.setDistanceAttenuationScalingFactor(factor);
|
||||
}
|
||||
if (movingSliderCombFilter) {
|
||||
newThumbX = event.x - thumbClickOffsetX;
|
||||
if (newThumbX < combFilterMinThumbX) {
|
||||
newThumbX = combFilterMminThumbX;
|
||||
}
|
||||
if (newThumbX > combFilterMaxThumbX) {
|
||||
newThumbX = combFilterMaxThumbX;
|
||||
}
|
||||
Overlays.editOverlay(combFilterThumb, { x: newThumbX } );
|
||||
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
|
||||
AudioReflector.setCombFilterWindow(combFilter);
|
||||
}
|
||||
|
||||
if (movingSliderLocalFactor) {
|
||||
newThumbX = event.x - thumbClickOffsetX;
|
||||
|
@ -566,6 +624,10 @@ function mousePressEvent(event) {
|
|||
movingSliderLocalFactor = true;
|
||||
thumbClickOffsetX = event.x - localFactorThumbX;
|
||||
}
|
||||
if (clickedOverlay == combFilterThumb) {
|
||||
movingSliderCombFilter = true;
|
||||
thumbClickOffsetX = event.x - combFilterThumbX;
|
||||
}
|
||||
if (clickedOverlay == diffusionThumb) {
|
||||
movingSliderDiffusion = true;
|
||||
thumbClickOffsetX = event.x - diffusionThumbX;
|
||||
|
@ -604,6 +666,12 @@ function mouseReleaseEvent(event) {
|
|||
AudioReflector.setDistanceAttenuationScalingFactor(factor);
|
||||
factorThumbX = newThumbX;
|
||||
}
|
||||
if (movingSliderCombFilter) {
|
||||
movingSliderCombFilter = false;
|
||||
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
|
||||
AudioReflector.setCombFilterWindow(combFilter);
|
||||
combFilterThumbX = newThumbX;
|
||||
}
|
||||
|
||||
if (movingSliderLocalFactor) {
|
||||
movingSliderLocalFactor = false;
|
||||
|
|
|
@ -19,6 +19,7 @@ const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is
|
|||
const int DEFAULT_DIFFUSION_FANOUT = 5;
|
||||
const int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10;
|
||||
const float DEFAULT_LOCAL_ATTENUATION_FACTOR = 0.125;
|
||||
const float DEFAULT_COMB_FILTER_WINDOW = 0.05f; //ms delay differential to avoid
|
||||
|
||||
const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point
|
||||
|
||||
|
@ -31,6 +32,7 @@ AudioReflector::AudioReflector(QObject* parent) :
|
|||
_soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
|
||||
_distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR),
|
||||
_localAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
|
||||
_combFilterWindow(DEFAULT_COMB_FILTER_WINDOW),
|
||||
_diffusionFanout(DEFAULT_DIFFUSION_FANOUT),
|
||||
_absorptionRatio(DEFAULT_ABSORPTION_RATIO),
|
||||
_diffusionRatio(DEFAULT_DIFFUSION_RATIO),
|
||||
|
@ -152,7 +154,7 @@ glm::vec3 AudioReflector::getFaceNormal(BoxFace face) {
|
|||
// set up our buffers for our attenuated and delayed samples
|
||||
const int NUMBER_OF_CHANNELS = 2;
|
||||
|
||||
void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint,
|
||||
void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint,
|
||||
const QByteArray& samples, unsigned int sampleTime, int sampleRate) {
|
||||
|
||||
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
|
||||
|
@ -180,51 +182,77 @@ void AudioReflector::injectAudiblePoint(const AudiblePoint& audiblePoint,
|
|||
|
||||
float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay;
|
||||
float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay;
|
||||
|
||||
_totalDelay += rightEarDelayMsecs + leftEarDelayMsecs;
|
||||
_delayCount += 2;
|
||||
_maxDelay = std::max(_maxDelay,rightEarDelayMsecs);
|
||||
_maxDelay = std::max(_maxDelay,leftEarDelayMsecs);
|
||||
_minDelay = std::min(_minDelay,rightEarDelayMsecs);
|
||||
_minDelay = std::min(_minDelay,leftEarDelayMsecs);
|
||||
|
||||
int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
|
||||
int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
|
||||
|
||||
float rightEarAttenuation = audiblePoint.attenuation *
|
||||
getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance);
|
||||
|
||||
float leftEarAttenuation = audiblePoint.attenuation *
|
||||
getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance);
|
||||
|
||||
_totalAttenuation += rightEarAttenuation + leftEarAttenuation;
|
||||
_attenuationCount += 2;
|
||||
_maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation);
|
||||
_maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation);
|
||||
_minAttenuation = std::min(_minAttenuation,rightEarAttenuation);
|
||||
_minAttenuation = std::min(_minAttenuation,leftEarAttenuation);
|
||||
float averageEarDelayMsecs = (leftEarDelayMsecs + rightEarDelayMsecs) / 2.0f;
|
||||
|
||||
// run through the samples, and attenuate them
|
||||
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
|
||||
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
|
||||
int16_t rightSample = leftSample;
|
||||
if (wantStereo) {
|
||||
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
|
||||
bool safeToInject = true; // assume the best
|
||||
|
||||
QMap<float, float>& knownDelays = (source == INBOUND_AUDIO) ? _inboundAudioDelays : _localAudioDelays;
|
||||
|
||||
// check to see if the known delays is too close
|
||||
QMap<float, float>::const_iterator lowerBound = knownDelays.lowerBound(averageEarDelayMsecs - _combFilterWindow);
|
||||
if (lowerBound != knownDelays.end()) {
|
||||
float closestFound = lowerBound.value();
|
||||
float deltaToClosest = (averageEarDelayMsecs - closestFound);
|
||||
//qDebug() << "knownDelays=" << knownDelays;
|
||||
//qDebug() << "averageEarDelayMsecs=" << averageEarDelayMsecs << " closestFound=" << closestFound;
|
||||
//qDebug() << "deltaToClosest=" << deltaToClosest;
|
||||
if (deltaToClosest > -_combFilterWindow && deltaToClosest < _combFilterWindow) {
|
||||
//qDebug() << "**** WE THINK WE'RE TOO CLOSE!! ****";
|
||||
safeToInject = false;
|
||||
}
|
||||
|
||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
|
||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
|
||||
|
||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
|
||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
|
||||
}
|
||||
|
||||
// now inject the attenuated array with the appropriate delay
|
||||
unsigned int sampleTimeLeft = sampleTime + leftEarDelay;
|
||||
unsigned int sampleTimeRight = sampleTime + rightEarDelay;
|
||||
|
||||
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
|
||||
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
|
||||
if (!safeToInject) {
|
||||
QVector<float>& suppressedEchoes = (source == INBOUND_AUDIO) ? _inboundEchoesSuppressed : _localEchoesSuppressed;
|
||||
suppressedEchoes << averageEarDelayMsecs;
|
||||
} else {
|
||||
knownDelays[averageEarDelayMsecs] = averageEarDelayMsecs;
|
||||
|
||||
_totalDelay += rightEarDelayMsecs + leftEarDelayMsecs;
|
||||
_delayCount += 2;
|
||||
_maxDelay = std::max(_maxDelay,rightEarDelayMsecs);
|
||||
_maxDelay = std::max(_maxDelay,leftEarDelayMsecs);
|
||||
_minDelay = std::min(_minDelay,rightEarDelayMsecs);
|
||||
_minDelay = std::min(_minDelay,leftEarDelayMsecs);
|
||||
|
||||
int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
|
||||
int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
|
||||
|
||||
float rightEarAttenuation = audiblePoint.attenuation *
|
||||
getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance);
|
||||
|
||||
float leftEarAttenuation = audiblePoint.attenuation *
|
||||
getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance);
|
||||
|
||||
_totalAttenuation += rightEarAttenuation + leftEarAttenuation;
|
||||
_attenuationCount += 2;
|
||||
_maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation);
|
||||
_maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation);
|
||||
_minAttenuation = std::min(_minAttenuation,rightEarAttenuation);
|
||||
_minAttenuation = std::min(_minAttenuation,leftEarAttenuation);
|
||||
|
||||
// run through the samples, and attenuate them
|
||||
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
|
||||
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
|
||||
int16_t rightSample = leftSample;
|
||||
if (wantStereo) {
|
||||
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
|
||||
}
|
||||
|
||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
|
||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
|
||||
|
||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
|
||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
|
||||
}
|
||||
|
||||
// now inject the attenuated array with the appropriate delay
|
||||
unsigned int sampleTimeLeft = sampleTime + leftEarDelay;
|
||||
unsigned int sampleTimeRight = sampleTime + rightEarDelay;
|
||||
|
||||
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
|
||||
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
||||
|
@ -244,13 +272,19 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray
|
|||
stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor;
|
||||
stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor;
|
||||
}
|
||||
_localAudioDelays.clear();
|
||||
_localEchoesSuppressed.clear();
|
||||
echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat);
|
||||
//qDebug() << _localAudioDelays;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
||||
_inboundAudioDelays.clear();
|
||||
_inboundEchoesSuppressed.clear();
|
||||
echoAudio(INBOUND_AUDIO, sampleTime, samples, format);
|
||||
//qDebug() << _inboundAudioDelays;
|
||||
}
|
||||
|
||||
void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
||||
|
@ -269,7 +303,7 @@ void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, cons
|
|||
QVector<AudiblePoint>& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
|
||||
|
||||
foreach(const AudiblePoint& audiblePoint, audiblePoints) {
|
||||
injectAudiblePoint(audiblePoint, samples, sampleTime, format.sampleRate());
|
||||
injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate());
|
||||
}
|
||||
|
||||
_averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount;
|
||||
|
|
|
@ -86,6 +86,8 @@ public slots:
|
|||
float getMinAttenuation() const { return _minAttenuation; }
|
||||
float getDelayFromDistance(float distance);
|
||||
int getDiffusionPathCount() const { return _diffusionPathCount; }
|
||||
int getEchoesInjected() const { return _inboundAudioDelays.size() + _localAudioDelays.size(); }
|
||||
int getEchoesSuppressed() const { return _inboundEchoesSuppressed.size() + _localEchoesSuppressed.size(); }
|
||||
|
||||
/// ms of delay added to all echos
|
||||
float getPreDelay() const { return _preDelay; }
|
||||
|
@ -103,6 +105,10 @@ public slots:
|
|||
float getLocalAudioAttenuationFactor() const { return _localAudioAttenuationFactor; }
|
||||
void setLocalAudioAttenuationFactor(float factor) { _localAudioAttenuationFactor = factor; }
|
||||
|
||||
/// ms window in which we will suppress echoes to reduce comb filter effects
|
||||
float getCombFilterWindow() const { return _combFilterWindow; }
|
||||
void setCombFilterWindow(float value) { _combFilterWindow = value; }
|
||||
|
||||
/// number of points of diffusion from each reflection point, as fanout increases there are more chances for secondary
|
||||
/// echoes, but each diffusion ray is quieter and therefore more likely to be below the sound floor
|
||||
int getDiffusionFanout() const { return _diffusionFanout; }
|
||||
|
@ -153,10 +159,14 @@ private:
|
|||
|
||||
QVector<AudioPath*> _inboundAudioPaths; /// audio paths we're processing for inbound audio
|
||||
QVector<AudiblePoint> _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths
|
||||
QMap<float, float> _inboundAudioDelays; /// delay times for currently injected audio points
|
||||
QVector<float> _inboundEchoesSuppressed; /// delay times for currently injected audio points
|
||||
|
||||
QVector<AudioPath*> _localAudioPaths; /// audio paths we're processing for local audio
|
||||
QVector<AudiblePoint> _localAudiblePoints; /// the audible points that have been calculated from the local audio paths
|
||||
|
||||
QMap<float, float> _localAudioDelays; /// delay times for currently injected audio points
|
||||
QVector<float> _localEchoesSuppressed; /// delay times for currently injected audio points
|
||||
|
||||
// adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties,
|
||||
// as well as diffusion sound sources
|
||||
void addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation,
|
||||
|
@ -173,7 +183,7 @@ private:
|
|||
int countDiffusionPaths();
|
||||
glm::vec3 getFaceNormal(BoxFace face);
|
||||
|
||||
void injectAudiblePoint(const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
|
||||
void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
|
||||
void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||
|
||||
// return the surface characteristics of the element we hit
|
||||
|
@ -186,6 +196,7 @@ private:
|
|||
float _soundMsPerMeter;
|
||||
float _distanceAttenuationScalingFactor;
|
||||
float _localAudioAttenuationFactor;
|
||||
float _combFilterWindow;
|
||||
|
||||
int _diffusionFanout; // number of points of diffusion from each reflection point
|
||||
|
||||
|
|
|
@ -343,7 +343,7 @@ void Stats::display(
|
|||
|
||||
lines = _expanded ? 12 : 3;
|
||||
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
|
||||
lines += 7; // spatial audio processing adds 1 spacing line and 6 extra lines of info
|
||||
lines += 8; // spatial audio processing adds 1 spacing line and 7 extra lines of info
|
||||
}
|
||||
|
||||
drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10);
|
||||
|
@ -577,6 +577,14 @@ void Stats::display(
|
|||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||
|
||||
sprintf(reflectionsStatus, "Comb Filter Window: %5.3f ms, Allowed: %d, Suppressed: %d",
|
||||
audioReflector->getCombFilterWindow(),
|
||||
audioReflector->getEchoesInjected(),
|
||||
audioReflector->getEchoesSuppressed());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue