mirror of
https://github.com/overte-org/overte.git
synced 2025-07-16 14:37:02 +02:00
remove audio reflector
This commit is contained in:
parent
fd69b6b5b4
commit
eda7432868
7 changed files with 1 additions and 2085 deletions
|
@ -1,844 +0,0 @@
|
||||||
//
|
|
||||||
// audioReflectorTools.js
|
|
||||||
// hifi
|
|
||||||
//
|
|
||||||
// Created by Brad Hefta-Gaub on 2/14/14.
|
|
||||||
// Copyright (c) 2014 HighFidelity, Inc. All rights reserved.
|
|
||||||
//
|
|
||||||
// Tools for manipulating the attributes of the AudioReflector behavior
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
Script.include("libraries/globals.js");
|
|
||||||
|
|
||||||
var delayScale = 100.0;
|
|
||||||
var fanoutScale = 10.0;
|
|
||||||
var speedScale = 20;
|
|
||||||
var factorScale = 5.0;
|
|
||||||
var localFactorScale = 1.0;
|
|
||||||
var reflectiveScale = 100.0;
|
|
||||||
var diffusionScale = 100.0;
|
|
||||||
var absorptionScale = 100.0;
|
|
||||||
var combFilterScale = 50.0;
|
|
||||||
var originalScale = 2.0;
|
|
||||||
var echoesScale = 2.0;
|
|
||||||
|
|
||||||
// these three properties are bound together, if you change one, the others will also change
|
|
||||||
var reflectiveRatio = AudioReflector.getReflectiveRatio();
|
|
||||||
var diffusionRatio = AudioReflector.getDiffusionRatio();
|
|
||||||
var absorptionRatio = AudioReflector.getAbsorptionRatio();
|
|
||||||
|
|
||||||
var reflectiveThumbX;
|
|
||||||
var diffusionThumbX;
|
|
||||||
var absorptionThumbX;
|
|
||||||
|
|
||||||
function setReflectiveRatio(reflective) {
|
|
||||||
var total = diffusionRatio + absorptionRatio + (reflective / reflectiveScale);
|
|
||||||
diffusionRatio = diffusionRatio / total;
|
|
||||||
absorptionRatio = absorptionRatio / total;
|
|
||||||
reflectiveRatio = (reflective / reflectiveScale) / total;
|
|
||||||
updateRatioValues();
|
|
||||||
}
|
|
||||||
|
|
||||||
function setDiffusionRatio(diffusion) {
|
|
||||||
var total = (diffusion / diffusionScale) + absorptionRatio + reflectiveRatio;
|
|
||||||
diffusionRatio = (diffusion / diffusionScale) / total;
|
|
||||||
absorptionRatio = absorptionRatio / total;
|
|
||||||
reflectiveRatio = reflectiveRatio / total;
|
|
||||||
updateRatioValues();
|
|
||||||
}
|
|
||||||
|
|
||||||
function setAbsorptionRatio(absorption) {
|
|
||||||
var total = diffusionRatio + (absorption / absorptionScale) + reflectiveRatio;
|
|
||||||
diffusionRatio = diffusionRatio / total;
|
|
||||||
absorptionRatio = (absorption / absorptionScale) / total;
|
|
||||||
reflectiveRatio = reflectiveRatio / total;
|
|
||||||
updateRatioValues();
|
|
||||||
}
|
|
||||||
|
|
||||||
function updateRatioSliders() {
|
|
||||||
reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * reflectiveRatio);
|
|
||||||
diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * diffusionRatio);
|
|
||||||
absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * absorptionRatio);
|
|
||||||
|
|
||||||
Overlays.editOverlay(reflectiveThumb, { x: reflectiveThumbX } );
|
|
||||||
Overlays.editOverlay(diffusionThumb, { x: diffusionThumbX } );
|
|
||||||
Overlays.editOverlay(absorptionThumb, { x: absorptionThumbX } );
|
|
||||||
}
|
|
||||||
|
|
||||||
function updateRatioValues() {
|
|
||||||
AudioReflector.setReflectiveRatio(reflectiveRatio);
|
|
||||||
AudioReflector.setDiffusionRatio(diffusionRatio);
|
|
||||||
AudioReflector.setAbsorptionRatio(absorptionRatio);
|
|
||||||
}
|
|
||||||
|
|
||||||
var topY = 250;
|
|
||||||
var sliderHeight = 35;
|
|
||||||
|
|
||||||
var delayY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
var delayLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: delayY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 12,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Delay:"
|
|
||||||
});
|
|
||||||
|
|
||||||
var delaySlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: delayY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var delayMinThumbX = 110;
|
|
||||||
var delayMaxThumbX = delayMinThumbX + 110;
|
|
||||||
var delayThumbX = delayMinThumbX + ((delayMaxThumbX - delayMinThumbX) * (AudioReflector.getPreDelay() / delayScale));
|
|
||||||
var delayThumb = Overlays.addOverlay("image", {
|
|
||||||
x: delayThumbX,
|
|
||||||
y: delayY + 9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 255, green: 0, blue: 0},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
var fanoutY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var fanoutLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: fanoutY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 12,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Fanout:"
|
|
||||||
});
|
|
||||||
|
|
||||||
var fanoutSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: fanoutY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var fanoutMinThumbX = 110;
|
|
||||||
var fanoutMaxThumbX = fanoutMinThumbX + 110;
|
|
||||||
var fanoutThumbX = fanoutMinThumbX + ((fanoutMaxThumbX - fanoutMinThumbX) * (AudioReflector.getDiffusionFanout() / fanoutScale));
|
|
||||||
var fanoutThumb = Overlays.addOverlay("image", {
|
|
||||||
x: fanoutThumbX,
|
|
||||||
y: fanoutY + 9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 255, green: 255, blue: 0},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var speedY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var speedLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: speedY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Speed\nin ms/m:"
|
|
||||||
});
|
|
||||||
|
|
||||||
var speedSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: speedY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var speedMinThumbX = 110;
|
|
||||||
var speedMaxThumbX = speedMinThumbX + 110;
|
|
||||||
var speedThumbX = speedMinThumbX + ((speedMaxThumbX - speedMinThumbX) * (AudioReflector.getSoundMsPerMeter() / speedScale));
|
|
||||||
var speedThumb = Overlays.addOverlay("image", {
|
|
||||||
x: speedThumbX,
|
|
||||||
y: speedY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 0, green: 255, blue: 0},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
var factorY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var factorLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: factorY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Attenuation\nFactor:"
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var factorSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: factorY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var factorMinThumbX = 110;
|
|
||||||
var factorMaxThumbX = factorMinThumbX + 110;
|
|
||||||
var factorThumbX = factorMinThumbX + ((factorMaxThumbX - factorMinThumbX) * (AudioReflector.getDistanceAttenuationScalingFactor() / factorScale));
|
|
||||||
var factorThumb = Overlays.addOverlay("image", {
|
|
||||||
x: factorThumbX,
|
|
||||||
y: factorY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 0, green: 0, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
var localFactorY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var localFactorLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: localFactorY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Local\nFactor:"
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var localFactorSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: localFactorY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var localFactorMinThumbX = 110;
|
|
||||||
var localFactorMaxThumbX = localFactorMinThumbX + 110;
|
|
||||||
var localFactorThumbX = localFactorMinThumbX + ((localFactorMaxThumbX - localFactorMinThumbX) * (AudioReflector.getLocalAudioAttenuationFactor() / localFactorScale));
|
|
||||||
var localFactorThumb = Overlays.addOverlay("image", {
|
|
||||||
x: localFactorThumbX,
|
|
||||||
y: localFactorY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 0, green: 128, blue: 128},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
var combFilterY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var combFilterLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: combFilterY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Comb Filter\nWindow:"
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var combFilterSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: combFilterY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var combFilterMinThumbX = 110;
|
|
||||||
var combFilterMaxThumbX = combFilterMinThumbX + 110;
|
|
||||||
var combFilterThumbX = combFilterMinThumbX + ((combFilterMaxThumbX - combFilterMinThumbX) * (AudioReflector.getCombFilterWindow() / combFilterScale));
|
|
||||||
var combFilterThumb = Overlays.addOverlay("image", {
|
|
||||||
x: combFilterThumbX,
|
|
||||||
y: combFilterY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 128, green: 128, blue: 0},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var reflectiveY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var reflectiveLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: reflectiveY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Reflective\nRatio:"
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var reflectiveSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: reflectiveY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var reflectiveMinThumbX = 110;
|
|
||||||
var reflectiveMaxThumbX = reflectiveMinThumbX + 110;
|
|
||||||
reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * AudioReflector.getReflectiveRatio());
|
|
||||||
var reflectiveThumb = Overlays.addOverlay("image", {
|
|
||||||
x: reflectiveThumbX,
|
|
||||||
y: reflectiveY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
var diffusionY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var diffusionLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: diffusionY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Diffusion\nRatio:"
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var diffusionSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: diffusionY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var diffusionMinThumbX = 110;
|
|
||||||
var diffusionMaxThumbX = diffusionMinThumbX + 110;
|
|
||||||
diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * AudioReflector.getDiffusionRatio());
|
|
||||||
var diffusionThumb = Overlays.addOverlay("image", {
|
|
||||||
x: diffusionThumbX,
|
|
||||||
y: diffusionY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 0, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
var absorptionY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var absorptionLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: absorptionY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Absorption\nRatio:"
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var absorptionSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: absorptionY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var absorptionMinThumbX = 110;
|
|
||||||
var absorptionMaxThumbX = absorptionMinThumbX + 110;
|
|
||||||
absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * AudioReflector.getAbsorptionRatio());
|
|
||||||
var absorptionThumb = Overlays.addOverlay("image", {
|
|
||||||
x: absorptionThumbX,
|
|
||||||
y: absorptionY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 255, green: 0, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
var originalY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var originalLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: originalY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Original\nMix:"
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var originalSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: originalY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var originalMinThumbX = 110;
|
|
||||||
var originalMaxThumbX = originalMinThumbX + 110;
|
|
||||||
var originalThumbX = originalMinThumbX + ((originalMaxThumbX - originalMinThumbX) * (AudioReflector.getOriginalSourceAttenuation() / originalScale));
|
|
||||||
var originalThumb = Overlays.addOverlay("image", {
|
|
||||||
x: originalThumbX,
|
|
||||||
y: originalY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 128, green: 128, blue: 0},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
var echoesY = topY;
|
|
||||||
topY += sliderHeight;
|
|
||||||
|
|
||||||
var echoesLabel = Overlays.addOverlay("text", {
|
|
||||||
x: 40,
|
|
||||||
y: echoesY,
|
|
||||||
width: 60,
|
|
||||||
height: sliderHeight,
|
|
||||||
color: { red: 0, green: 0, blue: 0},
|
|
||||||
textColor: { red: 255, green: 255, blue: 255},
|
|
||||||
topMargin: 6,
|
|
||||||
leftMargin: 5,
|
|
||||||
text: "Echoes\nMix:"
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var echoesSlider = Overlays.addOverlay("image", {
|
|
||||||
// alternate form of expressing bounds
|
|
||||||
bounds: { x: 100, y: echoesY, width: 150, height: sliderHeight},
|
|
||||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/slider.png",
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
var echoesMinThumbX = 110;
|
|
||||||
var echoesMaxThumbX = echoesMinThumbX + 110;
|
|
||||||
var echoesThumbX = echoesMinThumbX + ((echoesMaxThumbX - echoesMinThumbX) * (AudioReflector.getEchoesAttenuation() / echoesScale));
|
|
||||||
var echoesThumb = Overlays.addOverlay("image", {
|
|
||||||
x: echoesThumbX,
|
|
||||||
y: echoesY+9,
|
|
||||||
width: 18,
|
|
||||||
height: 17,
|
|
||||||
imageURL: HIFI_PUBLIC_BUCKET + "images/thumb.png",
|
|
||||||
color: { red: 128, green: 128, blue: 0},
|
|
||||||
alpha: 1
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
// When our script shuts down, we should clean up all of our overlays
|
|
||||||
function scriptEnding() {
|
|
||||||
Overlays.deleteOverlay(factorLabel);
|
|
||||||
Overlays.deleteOverlay(factorThumb);
|
|
||||||
Overlays.deleteOverlay(factorSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(combFilterLabel);
|
|
||||||
Overlays.deleteOverlay(combFilterThumb);
|
|
||||||
Overlays.deleteOverlay(combFilterSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(localFactorLabel);
|
|
||||||
Overlays.deleteOverlay(localFactorThumb);
|
|
||||||
Overlays.deleteOverlay(localFactorSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(speedLabel);
|
|
||||||
Overlays.deleteOverlay(speedThumb);
|
|
||||||
Overlays.deleteOverlay(speedSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(delayLabel);
|
|
||||||
Overlays.deleteOverlay(delayThumb);
|
|
||||||
Overlays.deleteOverlay(delaySlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(fanoutLabel);
|
|
||||||
Overlays.deleteOverlay(fanoutThumb);
|
|
||||||
Overlays.deleteOverlay(fanoutSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(reflectiveLabel);
|
|
||||||
Overlays.deleteOverlay(reflectiveThumb);
|
|
||||||
Overlays.deleteOverlay(reflectiveSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(diffusionLabel);
|
|
||||||
Overlays.deleteOverlay(diffusionThumb);
|
|
||||||
Overlays.deleteOverlay(diffusionSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(absorptionLabel);
|
|
||||||
Overlays.deleteOverlay(absorptionThumb);
|
|
||||||
Overlays.deleteOverlay(absorptionSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(echoesLabel);
|
|
||||||
Overlays.deleteOverlay(echoesThumb);
|
|
||||||
Overlays.deleteOverlay(echoesSlider);
|
|
||||||
|
|
||||||
Overlays.deleteOverlay(originalLabel);
|
|
||||||
Overlays.deleteOverlay(originalThumb);
|
|
||||||
Overlays.deleteOverlay(originalSlider);
|
|
||||||
|
|
||||||
}
|
|
||||||
Script.scriptEnding.connect(scriptEnding);
|
|
||||||
|
|
||||||
|
|
||||||
var count = 0;
|
|
||||||
|
|
||||||
// Our update() function is called at approximately 60fps, and we will use it to animate our various overlays
|
|
||||||
function update(deltaTime) {
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
Script.update.connect(update);
|
|
||||||
|
|
||||||
|
|
||||||
// The slider is handled in the mouse event callbacks.
|
|
||||||
var movingSliderDelay = false;
|
|
||||||
var movingSliderFanout = false;
|
|
||||||
var movingSliderSpeed = false;
|
|
||||||
var movingSliderFactor = false;
|
|
||||||
var movingSliderCombFilter = false;
|
|
||||||
var movingSliderLocalFactor = false;
|
|
||||||
var movingSliderReflective = false;
|
|
||||||
var movingSliderDiffusion = false;
|
|
||||||
var movingSliderAbsorption = false;
|
|
||||||
var movingSliderOriginal = false;
|
|
||||||
var movingSliderEchoes = false;
|
|
||||||
|
|
||||||
var thumbClickOffsetX = 0;
|
|
||||||
function mouseMoveEvent(event) {
|
|
||||||
if (movingSliderDelay) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < delayMinThumbX) {
|
|
||||||
newThumbX = delayMinThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > delayMaxThumbX) {
|
|
||||||
newThumbX = delayMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(delayThumb, { x: newThumbX } );
|
|
||||||
var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale;
|
|
||||||
AudioReflector.setPreDelay(delay);
|
|
||||||
}
|
|
||||||
if (movingSliderFanout) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < fanoutMinThumbX) {
|
|
||||||
newThumbX = fanoutMinThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > fanoutMaxThumbX) {
|
|
||||||
newThumbX = fanoutMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(fanoutThumb, { x: newThumbX } );
|
|
||||||
var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale);
|
|
||||||
AudioReflector.setDiffusionFanout(fanout);
|
|
||||||
}
|
|
||||||
if (movingSliderSpeed) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < speedMinThumbX) {
|
|
||||||
newThumbX = speedMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > speedMaxThumbX) {
|
|
||||||
newThumbX = speedMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(speedThumb, { x: newThumbX } );
|
|
||||||
var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale;
|
|
||||||
AudioReflector.setSoundMsPerMeter(speed);
|
|
||||||
}
|
|
||||||
if (movingSliderFactor) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < factorMinThumbX) {
|
|
||||||
newThumbX = factorMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > factorMaxThumbX) {
|
|
||||||
newThumbX = factorMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(factorThumb, { x: newThumbX } );
|
|
||||||
var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale;
|
|
||||||
AudioReflector.setDistanceAttenuationScalingFactor(factor);
|
|
||||||
}
|
|
||||||
if (movingSliderCombFilter) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < combFilterMinThumbX) {
|
|
||||||
newThumbX = combFilterMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > combFilterMaxThumbX) {
|
|
||||||
newThumbX = combFilterMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(combFilterThumb, { x: newThumbX } );
|
|
||||||
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
|
|
||||||
AudioReflector.setCombFilterWindow(combFilter);
|
|
||||||
}
|
|
||||||
if (movingSliderLocalFactor) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < localFactorMinThumbX) {
|
|
||||||
newThumbX = localFactorMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > localFactorMaxThumbX) {
|
|
||||||
newThumbX = localFactorMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(localFactorThumb, { x: newThumbX } );
|
|
||||||
var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale;
|
|
||||||
AudioReflector.setLocalAudioAttenuationFactor(localFactor);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (movingSliderAbsorption) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < absorptionMinThumbX) {
|
|
||||||
newThumbX = absorptionMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > absorptionMaxThumbX) {
|
|
||||||
newThumbX = absorptionMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(absorptionThumb, { x: newThumbX } );
|
|
||||||
var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale;
|
|
||||||
setAbsorptionRatio(absorption);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (movingSliderReflective) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < reflectiveMinThumbX) {
|
|
||||||
newThumbX = reflectiveMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > reflectiveMaxThumbX) {
|
|
||||||
newThumbX = reflectiveMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(reflectiveThumb, { x: newThumbX } );
|
|
||||||
var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale;
|
|
||||||
setReflectiveRatio(reflective);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (movingSliderDiffusion) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < diffusionMinThumbX) {
|
|
||||||
newThumbX = diffusionMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > diffusionMaxThumbX) {
|
|
||||||
newThumbX = diffusionMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(diffusionThumb, { x: newThumbX } );
|
|
||||||
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
|
|
||||||
setDiffusionRatio(diffusion);
|
|
||||||
}
|
|
||||||
if (movingSliderEchoes) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < echoesMinThumbX) {
|
|
||||||
newThumbX = echoesMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > echoesMaxThumbX) {
|
|
||||||
newThumbX = echoesMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(echoesThumb, { x: newThumbX } );
|
|
||||||
var echoes = ((newThumbX - echoesMinThumbX) / (echoesMaxThumbX - echoesMinThumbX)) * echoesScale;
|
|
||||||
AudioReflector.setEchoesAttenuation(echoes);
|
|
||||||
}
|
|
||||||
if (movingSliderOriginal) {
|
|
||||||
newThumbX = event.x - thumbClickOffsetX;
|
|
||||||
if (newThumbX < originalMinThumbX) {
|
|
||||||
newThumbX = originalMminThumbX;
|
|
||||||
}
|
|
||||||
if (newThumbX > originalMaxThumbX) {
|
|
||||||
newThumbX = originalMaxThumbX;
|
|
||||||
}
|
|
||||||
Overlays.editOverlay(originalThumb, { x: newThumbX } );
|
|
||||||
var original = ((newThumbX - originalMinThumbX) / (originalMaxThumbX - originalMinThumbX)) * originalScale;
|
|
||||||
AudioReflector.setOriginalSourceAttenuation(original);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// we also handle click detection in our mousePressEvent()
|
|
||||||
function mousePressEvent(event) {
|
|
||||||
var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y});
|
|
||||||
if (clickedOverlay == delayThumb) {
|
|
||||||
movingSliderDelay = true;
|
|
||||||
thumbClickOffsetX = event.x - delayThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == fanoutThumb) {
|
|
||||||
movingSliderFanout = true;
|
|
||||||
thumbClickOffsetX = event.x - fanoutThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == speedThumb) {
|
|
||||||
movingSliderSpeed = true;
|
|
||||||
thumbClickOffsetX = event.x - speedThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == factorThumb) {
|
|
||||||
movingSliderFactor = true;
|
|
||||||
thumbClickOffsetX = event.x - factorThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == localFactorThumb) {
|
|
||||||
movingSliderLocalFactor = true;
|
|
||||||
thumbClickOffsetX = event.x - localFactorThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == combFilterThumb) {
|
|
||||||
movingSliderCombFilter = true;
|
|
||||||
thumbClickOffsetX = event.x - combFilterThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == diffusionThumb) {
|
|
||||||
movingSliderDiffusion = true;
|
|
||||||
thumbClickOffsetX = event.x - diffusionThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == absorptionThumb) {
|
|
||||||
movingSliderAbsorption = true;
|
|
||||||
thumbClickOffsetX = event.x - absorptionThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == reflectiveThumb) {
|
|
||||||
movingSliderReflective = true;
|
|
||||||
thumbClickOffsetX = event.x - reflectiveThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == originalThumb) {
|
|
||||||
movingSliderOriginal = true;
|
|
||||||
thumbClickOffsetX = event.x - originalThumbX;
|
|
||||||
}
|
|
||||||
if (clickedOverlay == echoesThumb) {
|
|
||||||
movingSliderEchoes = true;
|
|
||||||
thumbClickOffsetX = event.x - echoesThumbX;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function mouseReleaseEvent(event) {
|
|
||||||
if (movingSliderDelay) {
|
|
||||||
movingSliderDelay = false;
|
|
||||||
var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale;
|
|
||||||
AudioReflector.setPreDelay(delay);
|
|
||||||
delayThumbX = newThumbX;
|
|
||||||
}
|
|
||||||
if (movingSliderFanout) {
|
|
||||||
movingSliderFanout = false;
|
|
||||||
var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale);
|
|
||||||
AudioReflector.setDiffusionFanout(fanout);
|
|
||||||
fanoutThumbX = newThumbX;
|
|
||||||
}
|
|
||||||
if (movingSliderSpeed) {
|
|
||||||
movingSliderSpeed = false;
|
|
||||||
var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale;
|
|
||||||
AudioReflector.setSoundMsPerMeter(speed);
|
|
||||||
speedThumbX = newThumbX;
|
|
||||||
}
|
|
||||||
if (movingSliderFactor) {
|
|
||||||
movingSliderFactor = false;
|
|
||||||
var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale;
|
|
||||||
AudioReflector.setDistanceAttenuationScalingFactor(factor);
|
|
||||||
factorThumbX = newThumbX;
|
|
||||||
}
|
|
||||||
if (movingSliderCombFilter) {
|
|
||||||
movingSliderCombFilter = false;
|
|
||||||
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
|
|
||||||
AudioReflector.setCombFilterWindow(combFilter);
|
|
||||||
combFilterThumbX = newThumbX;
|
|
||||||
}
|
|
||||||
if (movingSliderLocalFactor) {
|
|
||||||
movingSliderLocalFactor = false;
|
|
||||||
var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale;
|
|
||||||
AudioReflector.setLocalAudioAttenuationFactor(localFactor);
|
|
||||||
localFactorThumbX = newThumbX;
|
|
||||||
}
|
|
||||||
if (movingSliderReflective) {
|
|
||||||
movingSliderReflective = false;
|
|
||||||
var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale;
|
|
||||||
setReflectiveRatio(reflective);
|
|
||||||
reflectiveThumbX = newThumbX;
|
|
||||||
updateRatioSliders();
|
|
||||||
}
|
|
||||||
if (movingSliderDiffusion) {
|
|
||||||
movingSliderDiffusion = false;
|
|
||||||
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
|
|
||||||
setDiffusionRatio(diffusion);
|
|
||||||
diffusionThumbX = newThumbX;
|
|
||||||
updateRatioSliders();
|
|
||||||
}
|
|
||||||
if (movingSliderAbsorption) {
|
|
||||||
movingSliderAbsorption = false;
|
|
||||||
var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale;
|
|
||||||
setAbsorptionRatio(absorption);
|
|
||||||
absorptionThumbX = newThumbX;
|
|
||||||
updateRatioSliders();
|
|
||||||
}
|
|
||||||
if (movingSliderEchoes) {
|
|
||||||
movingSliderEchoes = false;
|
|
||||||
var echoes = ((newThumbX - echoesMinThumbX) / (echoesMaxThumbX - echoesMinThumbX)) * echoesScale;
|
|
||||||
AudioReflector.setEchoesAttenuation(echoes);
|
|
||||||
echoesThumbX = newThumbX;
|
|
||||||
}
|
|
||||||
if (movingSliderOriginal) {
|
|
||||||
movingSliderOriginal = false;
|
|
||||||
var original = ((newThumbX - originalMinThumbX) / (originalMaxThumbX - originalMinThumbX)) * originalScale;
|
|
||||||
AudioReflector.setOriginalSourceAttenuation(original);
|
|
||||||
originalThumbX = newThumbX;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Controller.mouseMoveEvent.connect(mouseMoveEvent);
|
|
||||||
Controller.mousePressEvent.connect(mousePressEvent);
|
|
||||||
Controller.mouseReleaseEvent.connect(mouseReleaseEvent);
|
|
||||||
|
|
|
@ -2034,17 +2034,6 @@ void Application::init() {
|
||||||
connect(_rearMirrorTools, SIGNAL(shrinkView()), SLOT(shrinkMirrorView()));
|
connect(_rearMirrorTools, SIGNAL(shrinkView()), SLOT(shrinkMirrorView()));
|
||||||
connect(_rearMirrorTools, SIGNAL(resetView()), SLOT(resetSensors()));
|
connect(_rearMirrorTools, SIGNAL(resetView()), SLOT(resetSensors()));
|
||||||
|
|
||||||
// set up our audio reflector
|
|
||||||
_audioReflector.setMyAvatar(getAvatar());
|
|
||||||
_audioReflector.setVoxels(_voxels.getTree());
|
|
||||||
_audioReflector.setAudio(getAudio());
|
|
||||||
_audioReflector.setAvatarManager(&_avatarManager);
|
|
||||||
|
|
||||||
connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection);
|
|
||||||
connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection);
|
|
||||||
connect(getAudio(), &Audio::preProcessOriginalInboundAudio, &_audioReflector,
|
|
||||||
&AudioReflector::preProcessOriginalInboundAudio,Qt::DirectConnection);
|
|
||||||
|
|
||||||
connect(getAudio(), &Audio::muteToggled, AudioDeviceScriptingInterface::getInstance(),
|
connect(getAudio(), &Audio::muteToggled, AudioDeviceScriptingInterface::getInstance(),
|
||||||
&AudioDeviceScriptingInterface::muteToggled, Qt::DirectConnection);
|
&AudioDeviceScriptingInterface::muteToggled, Qt::DirectConnection);
|
||||||
|
|
||||||
|
@ -3064,12 +3053,6 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly, RenderAr
|
||||||
glColor3f(1,0,0);
|
glColor3f(1,0,0);
|
||||||
_geometryCache.renderSphere(originSphereRadius, 15, 15);
|
_geometryCache.renderSphere(originSphereRadius, 15, 15);
|
||||||
|
|
||||||
// draw the audio reflector overlay
|
|
||||||
{
|
|
||||||
PerformanceTimer perfTimer("audio");
|
|
||||||
_audioReflector.render();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Draw voxels
|
// Draw voxels
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) {
|
||||||
PerformanceTimer perfTimer("voxels");
|
PerformanceTimer perfTimer("voxels");
|
||||||
|
@ -3997,7 +3980,6 @@ void Application::registerScriptEngineWithApplicationServices(ScriptEngine* scri
|
||||||
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
||||||
scriptEngine->registerGlobalObject("AnimationCache", &_animationCache);
|
scriptEngine->registerGlobalObject("AnimationCache", &_animationCache);
|
||||||
scriptEngine->registerGlobalObject("SoundCache", &SoundCache::getInstance());
|
scriptEngine->registerGlobalObject("SoundCache", &SoundCache::getInstance());
|
||||||
scriptEngine->registerGlobalObject("AudioReflector", &_audioReflector);
|
|
||||||
scriptEngine->registerGlobalObject("Account", AccountScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("Account", AccountScriptingInterface::getInstance());
|
||||||
scriptEngine->registerGlobalObject("Metavoxels", &_metavoxels);
|
scriptEngine->registerGlobalObject("Metavoxels", &_metavoxels);
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,6 @@
|
||||||
|
|
||||||
#include "MainWindow.h"
|
#include "MainWindow.h"
|
||||||
#include "Audio.h"
|
#include "Audio.h"
|
||||||
#include "AudioReflector.h"
|
|
||||||
#include "Camera.h"
|
#include "Camera.h"
|
||||||
#include "DatagramProcessor.h"
|
#include "DatagramProcessor.h"
|
||||||
#include "Environment.h"
|
#include "Environment.h"
|
||||||
|
@ -193,7 +192,6 @@ public:
|
||||||
bool isThrottleRendering() const { return _glWidget->isThrottleRendering(); }
|
bool isThrottleRendering() const { return _glWidget->isThrottleRendering(); }
|
||||||
MyAvatar* getAvatar() { return _myAvatar; }
|
MyAvatar* getAvatar() { return _myAvatar; }
|
||||||
Audio* getAudio() { return &_audio; }
|
Audio* getAudio() { return &_audio; }
|
||||||
const AudioReflector* getAudioReflector() const { return &_audioReflector; }
|
|
||||||
Camera* getCamera() { return &_myCamera; }
|
Camera* getCamera() { return &_myCamera; }
|
||||||
ViewFrustum* getViewFrustum() { return &_viewFrustum; }
|
ViewFrustum* getViewFrustum() { return &_viewFrustum; }
|
||||||
ViewFrustum* getDisplayViewFrustum() { return &_displayViewFrustum; }
|
ViewFrustum* getDisplayViewFrustum() { return &_displayViewFrustum; }
|
||||||
|
@ -635,7 +633,6 @@ private:
|
||||||
Overlays _overlays;
|
Overlays _overlays;
|
||||||
ApplicationOverlay _applicationOverlay;
|
ApplicationOverlay _applicationOverlay;
|
||||||
|
|
||||||
AudioReflector _audioReflector;
|
|
||||||
RunningScriptsWidget* _runningScriptsWidget;
|
RunningScriptsWidget* _runningScriptsWidget;
|
||||||
QHash<QString, ScriptEngine*> _scriptEnginesHash;
|
QHash<QString, ScriptEngine*> _scriptEnginesHash;
|
||||||
bool _runningScriptsWidgetWasVisible;
|
bool _runningScriptsWidgetWasVisible;
|
||||||
|
|
|
@ -1,868 +0,0 @@
|
||||||
//
|
|
||||||
// AudioReflector.cpp
|
|
||||||
// interface
|
|
||||||
//
|
|
||||||
// Created by Brad Hefta-Gaub on 4/2/2014
|
|
||||||
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
|
|
||||||
//
|
|
||||||
|
|
||||||
#include <QMutexLocker>
|
|
||||||
|
|
||||||
#include "AudioReflector.h"
|
|
||||||
#include "Menu.h"
|
|
||||||
|
|
||||||
const float DEFAULT_PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections
|
|
||||||
const float DEFAULT_MS_DELAY_PER_METER = 3.0f;
|
|
||||||
const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f;
|
|
||||||
const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f;
|
|
||||||
const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is this long
|
|
||||||
const int DEFAULT_DIFFUSION_FANOUT = 5;
|
|
||||||
const unsigned int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10;
|
|
||||||
const float DEFAULT_LOCAL_ATTENUATION_FACTOR = 0.125;
|
|
||||||
const float DEFAULT_COMB_FILTER_WINDOW = 0.05f; //ms delay differential to avoid
|
|
||||||
|
|
||||||
const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point
|
|
||||||
|
|
||||||
const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed
|
|
||||||
const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused
|
|
||||||
const float DEFAULT_ORIGINAL_ATTENUATION = 1.0f;
|
|
||||||
const float DEFAULT_ECHO_ATTENUATION = 1.0f;
|
|
||||||
|
|
||||||
AudioReflector::AudioReflector(QObject* parent) :
|
|
||||||
QObject(parent),
|
|
||||||
_preDelay(DEFAULT_PRE_DELAY),
|
|
||||||
_soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
|
|
||||||
_distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR),
|
|
||||||
_localAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
|
|
||||||
_combFilterWindow(DEFAULT_COMB_FILTER_WINDOW),
|
|
||||||
_diffusionFanout(DEFAULT_DIFFUSION_FANOUT),
|
|
||||||
_absorptionRatio(DEFAULT_ABSORPTION_RATIO),
|
|
||||||
_diffusionRatio(DEFAULT_DIFFUSION_RATIO),
|
|
||||||
_originalSourceAttenuation(DEFAULT_ORIGINAL_ATTENUATION),
|
|
||||||
_allEchoesAttenuation(DEFAULT_ECHO_ATTENUATION),
|
|
||||||
_withDiffusion(false),
|
|
||||||
_lastPreDelay(DEFAULT_PRE_DELAY),
|
|
||||||
_lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
|
|
||||||
_lastDistanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR),
|
|
||||||
_lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
|
|
||||||
_lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT),
|
|
||||||
_lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO),
|
|
||||||
_lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO),
|
|
||||||
_lastDontDistanceAttenuate(false),
|
|
||||||
_lastAlternateDistanceAttenuate(false)
|
|
||||||
{
|
|
||||||
_reflections = 0;
|
|
||||||
_diffusionPathCount = 0;
|
|
||||||
_officialAverageAttenuation = _averageAttenuation = 0.0f;
|
|
||||||
_officialMaxAttenuation = _maxAttenuation = 0.0f;
|
|
||||||
_officialMinAttenuation = _minAttenuation = 0.0f;
|
|
||||||
_officialAverageDelay = _averageDelay = 0;
|
|
||||||
_officialMaxDelay = _maxDelay = 0;
|
|
||||||
_officialMinDelay = _minDelay = 0;
|
|
||||||
_inboundEchoesCount = 0;
|
|
||||||
_inboundEchoesSuppressedCount = 0;
|
|
||||||
_localEchoesCount = 0;
|
|
||||||
_localEchoesSuppressedCount = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool AudioReflector::haveAttributesChanged() {
|
|
||||||
|
|
||||||
// Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
|
|
||||||
bool withDiffusion = true;
|
|
||||||
|
|
||||||
// Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
|
|
||||||
bool dontDistanceAttenuate = false;
|
|
||||||
|
|
||||||
//Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
|
|
||||||
bool alternateDistanceAttenuate = false;
|
|
||||||
|
|
||||||
bool attributesChange = (_withDiffusion != withDiffusion
|
|
||||||
|| _lastPreDelay != _preDelay
|
|
||||||
|| _lastSoundMsPerMeter != _soundMsPerMeter
|
|
||||||
|| _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor
|
|
||||||
|| _lastDiffusionFanout != _diffusionFanout
|
|
||||||
|| _lastAbsorptionRatio != _absorptionRatio
|
|
||||||
|| _lastDiffusionRatio != _diffusionRatio
|
|
||||||
|| _lastDontDistanceAttenuate != dontDistanceAttenuate
|
|
||||||
|| _lastAlternateDistanceAttenuate != alternateDistanceAttenuate);
|
|
||||||
|
|
||||||
if (attributesChange) {
|
|
||||||
_withDiffusion = withDiffusion;
|
|
||||||
_lastPreDelay = _preDelay;
|
|
||||||
_lastSoundMsPerMeter = _soundMsPerMeter;
|
|
||||||
_lastDistanceAttenuationScalingFactor = _distanceAttenuationScalingFactor;
|
|
||||||
_lastDiffusionFanout = _diffusionFanout;
|
|
||||||
_lastAbsorptionRatio = _absorptionRatio;
|
|
||||||
_lastDiffusionRatio = _diffusionRatio;
|
|
||||||
_lastDontDistanceAttenuate = dontDistanceAttenuate;
|
|
||||||
_lastAlternateDistanceAttenuate = alternateDistanceAttenuate;
|
|
||||||
}
|
|
||||||
|
|
||||||
return attributesChange;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::render() {
|
|
||||||
|
|
||||||
// if we're not set up yet, or we're not processing spatial audio, then exit early
|
|
||||||
if (!_myAvatar || !_audio->getProcessSpatialAudio()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// use this oportunity to calculate our reflections
|
|
||||||
calculateAllReflections();
|
|
||||||
|
|
||||||
// only render if we've been asked to do so
|
|
||||||
bool renderPaths = false; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)
|
|
||||||
if (renderPaths) {
|
|
||||||
drawRays();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// delay = 1ms per foot
|
|
||||||
// = 3ms per meter
|
|
||||||
float AudioReflector::getDelayFromDistance(float distance) {
|
|
||||||
float delay = (_soundMsPerMeter * distance);
|
|
||||||
bool includePreDelay = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)
|
|
||||||
if (includePreDelay) {
|
|
||||||
delay += _preDelay;
|
|
||||||
}
|
|
||||||
return delay;
|
|
||||||
}
|
|
||||||
|
|
||||||
// attenuation = from the Audio Mixer
|
|
||||||
float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
|
|
||||||
|
|
||||||
|
|
||||||
//!Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
|
|
||||||
bool doDistanceAttenuation = true;
|
|
||||||
|
|
||||||
//!Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
|
|
||||||
bool originalFormula = true;
|
|
||||||
|
|
||||||
float distanceCoefficient = 1.0f;
|
|
||||||
|
|
||||||
if (doDistanceAttenuation) {
|
|
||||||
|
|
||||||
if (originalFormula) {
|
|
||||||
const float DISTANCE_SCALE = 2.5f;
|
|
||||||
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
|
||||||
const float DISTANCE_LOG_BASE = 2.5f;
|
|
||||||
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
|
||||||
|
|
||||||
float distanceSquareToSource = distance * distance;
|
|
||||||
|
|
||||||
// calculate the distance coefficient using the distance to this node
|
|
||||||
distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
|
||||||
DISTANCE_SCALE_LOG +
|
|
||||||
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
|
||||||
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
|
|
||||||
} else {
|
|
||||||
|
|
||||||
// From Fred: If we wanted something that would produce a tail that could go up to 5 seconds in a
|
|
||||||
// really big room, that would suggest the sound still has to be in the audible after traveling about
|
|
||||||
// 1500 meters. If it’s a sound of average volume, we probably have about 30 db, or 5 base2 orders
|
|
||||||
// of magnitude we can drop down before the sound becomes inaudible. (That’s approximate headroom
|
|
||||||
// based on a few sloppy assumptions.) So we could try a factor like 1 / (2^(D/300)) for starters.
|
|
||||||
// 1 / (2^(D/300))
|
|
||||||
const float DISTANCE_BASE = 2.0f;
|
|
||||||
const float DISTANCE_DENOMINATOR = 300.0f;
|
|
||||||
const float DISTANCE_NUMERATOR = 300.0f;
|
|
||||||
distanceCoefficient = DISTANCE_NUMERATOR / powf(DISTANCE_BASE, (distance / DISTANCE_DENOMINATOR ));
|
|
||||||
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return distanceCoefficient;
|
|
||||||
}
|
|
||||||
|
|
||||||
glm::vec3 AudioReflector::getFaceNormal(BoxFace face) {
|
|
||||||
// Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces);
|
|
||||||
bool wantSlightRandomness = true;
|
|
||||||
glm::vec3 faceNormal;
|
|
||||||
const float MIN_RANDOM_LENGTH = 0.99f;
|
|
||||||
const float MAX_RANDOM_LENGTH = 1.0f;
|
|
||||||
const float NON_RANDOM_LENGTH = 1.0f;
|
|
||||||
float normalLength = wantSlightRandomness ? randFloatInRange(MIN_RANDOM_LENGTH, MAX_RANDOM_LENGTH) : NON_RANDOM_LENGTH;
|
|
||||||
float remainder = (1.0f - normalLength)/2.0f;
|
|
||||||
float remainderSignA = randomSign();
|
|
||||||
float remainderSignB = randomSign();
|
|
||||||
|
|
||||||
if (face == MIN_X_FACE) {
|
|
||||||
faceNormal = glm::vec3(-normalLength, remainder * remainderSignA, remainder * remainderSignB);
|
|
||||||
} else if (face == MAX_X_FACE) {
|
|
||||||
faceNormal = glm::vec3(normalLength, remainder * remainderSignA, remainder * remainderSignB);
|
|
||||||
} else if (face == MIN_Y_FACE) {
|
|
||||||
faceNormal = glm::vec3(remainder * remainderSignA, -normalLength, remainder * remainderSignB);
|
|
||||||
} else if (face == MAX_Y_FACE) {
|
|
||||||
faceNormal = glm::vec3(remainder * remainderSignA, normalLength, remainder * remainderSignB);
|
|
||||||
} else if (face == MIN_Z_FACE) {
|
|
||||||
faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -normalLength);
|
|
||||||
} else if (face == MAX_Z_FACE) {
|
|
||||||
faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, normalLength);
|
|
||||||
}
|
|
||||||
return faceNormal;
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up our buffers for our attenuated and delayed samples
|
|
||||||
const int NUMBER_OF_CHANNELS = 2;
|
|
||||||
|
|
||||||
void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint,
|
|
||||||
const QByteArray& samples, unsigned int sampleTime, int sampleRate) {
|
|
||||||
|
|
||||||
bool wantEarSeparation = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
|
|
||||||
bool wantStereo = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource);
|
|
||||||
glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() :
|
|
||||||
_myAvatar->getHead()->getPosition();
|
|
||||||
glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() :
|
|
||||||
_myAvatar->getHead()->getPosition();
|
|
||||||
|
|
||||||
int totalNumberOfSamples = samples.size() / sizeof(int16_t);
|
|
||||||
int totalNumberOfStereoSamples = samples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS);
|
|
||||||
|
|
||||||
const int16_t* originalSamplesData = (const int16_t*)samples.constData();
|
|
||||||
QByteArray attenuatedLeftSamples;
|
|
||||||
QByteArray attenuatedRightSamples;
|
|
||||||
attenuatedLeftSamples.resize(samples.size());
|
|
||||||
attenuatedRightSamples.resize(samples.size());
|
|
||||||
|
|
||||||
int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data();
|
|
||||||
int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data();
|
|
||||||
|
|
||||||
// calculate the distance to the ears
|
|
||||||
float rightEarDistance = glm::distance(audiblePoint.location, rightEarPosition);
|
|
||||||
float leftEarDistance = glm::distance(audiblePoint.location, leftEarPosition);
|
|
||||||
|
|
||||||
float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay;
|
|
||||||
float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay;
|
|
||||||
float averageEarDelayMsecs = (leftEarDelayMsecs + rightEarDelayMsecs) / 2.0f;
|
|
||||||
|
|
||||||
bool safeToInject = true; // assume the best
|
|
||||||
|
|
||||||
// check to see if this new injection point would be within the comb filter
|
|
||||||
// suppression window for any of the existing known delays
|
|
||||||
QMap<float, float>& knownDelays = (source == INBOUND_AUDIO) ? _inboundAudioDelays : _localAudioDelays;
|
|
||||||
QMap<float, float>::const_iterator lowerBound = knownDelays.lowerBound(averageEarDelayMsecs - _combFilterWindow);
|
|
||||||
if (lowerBound != knownDelays.end()) {
|
|
||||||
float closestFound = lowerBound.value();
|
|
||||||
float deltaToClosest = (averageEarDelayMsecs - closestFound);
|
|
||||||
if (deltaToClosest > -_combFilterWindow && deltaToClosest < _combFilterWindow) {
|
|
||||||
safeToInject = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// keep track of any of our suppressed echoes so we can report them in our statistics
|
|
||||||
if (!safeToInject) {
|
|
||||||
QVector<float>& suppressedEchoes = (source == INBOUND_AUDIO) ? _inboundEchoesSuppressed : _localEchoesSuppressed;
|
|
||||||
suppressedEchoes << averageEarDelayMsecs;
|
|
||||||
} else {
|
|
||||||
knownDelays[averageEarDelayMsecs] = averageEarDelayMsecs;
|
|
||||||
|
|
||||||
_totalDelay += rightEarDelayMsecs + leftEarDelayMsecs;
|
|
||||||
_delayCount += 2;
|
|
||||||
_maxDelay = std::max(_maxDelay,rightEarDelayMsecs);
|
|
||||||
_maxDelay = std::max(_maxDelay,leftEarDelayMsecs);
|
|
||||||
_minDelay = std::min(_minDelay,rightEarDelayMsecs);
|
|
||||||
_minDelay = std::min(_minDelay,leftEarDelayMsecs);
|
|
||||||
|
|
||||||
int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
|
|
||||||
int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
|
|
||||||
|
|
||||||
float rightEarAttenuation = audiblePoint.attenuation *
|
|
||||||
getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance);
|
|
||||||
|
|
||||||
float leftEarAttenuation = audiblePoint.attenuation *
|
|
||||||
getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance);
|
|
||||||
|
|
||||||
_totalAttenuation += rightEarAttenuation + leftEarAttenuation;
|
|
||||||
_attenuationCount += 2;
|
|
||||||
_maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation);
|
|
||||||
_maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation);
|
|
||||||
_minAttenuation = std::min(_minAttenuation,rightEarAttenuation);
|
|
||||||
_minAttenuation = std::min(_minAttenuation,leftEarAttenuation);
|
|
||||||
|
|
||||||
// run through the samples, and attenuate them
|
|
||||||
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
|
|
||||||
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
|
|
||||||
int16_t rightSample = leftSample;
|
|
||||||
if (wantStereo) {
|
|
||||||
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
|
|
||||||
}
|
|
||||||
|
|
||||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] =
|
|
||||||
leftSample * leftEarAttenuation * _allEchoesAttenuation;
|
|
||||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
|
|
||||||
|
|
||||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
|
|
||||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] =
|
|
||||||
rightSample * rightEarAttenuation * _allEchoesAttenuation;
|
|
||||||
}
|
|
||||||
|
|
||||||
// now inject the attenuated array with the appropriate delay
|
|
||||||
unsigned int sampleTimeLeft = sampleTime + leftEarDelay;
|
|
||||||
unsigned int sampleTimeRight = sampleTime + rightEarDelay;
|
|
||||||
|
|
||||||
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
|
|
||||||
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
|
|
||||||
|
|
||||||
_injectedEchoes++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void AudioReflector::preProcessOriginalInboundAudio(unsigned int sampleTime,
|
|
||||||
QByteArray& samples, const QAudioFormat& format) {
|
|
||||||
|
|
||||||
if (_originalSourceAttenuation != 1.0f) {
|
|
||||||
int numberOfSamples = (samples.size() / sizeof(int16_t));
|
|
||||||
int16_t* sampleData = (int16_t*)samples.data();
|
|
||||||
for (int i = 0; i < numberOfSamples; i++) {
|
|
||||||
sampleData[i] = sampleData[i] * _originalSourceAttenuation;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
|
||||||
bool processLocalAudio = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)
|
|
||||||
if (processLocalAudio) {
|
|
||||||
const int NUM_CHANNELS_INPUT = 1;
|
|
||||||
const int NUM_CHANNELS_OUTPUT = 2;
|
|
||||||
const int EXPECTED_SAMPLE_RATE = 24000;
|
|
||||||
if (format.channelCount() == NUM_CHANNELS_INPUT && format.sampleRate() == EXPECTED_SAMPLE_RATE) {
|
|
||||||
QAudioFormat outputFormat = format;
|
|
||||||
outputFormat.setChannelCount(NUM_CHANNELS_OUTPUT);
|
|
||||||
QByteArray stereoInputData(samples.size() * NUM_CHANNELS_OUTPUT, 0);
|
|
||||||
int numberOfSamples = (samples.size() / sizeof(int16_t));
|
|
||||||
int16_t* monoSamples = (int16_t*)samples.data();
|
|
||||||
int16_t* stereoSamples = (int16_t*)stereoInputData.data();
|
|
||||||
|
|
||||||
for (int i = 0; i < numberOfSamples; i++) {
|
|
||||||
stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor;
|
|
||||||
stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor;
|
|
||||||
}
|
|
||||||
_localAudioDelays.clear();
|
|
||||||
_localEchoesSuppressed.clear();
|
|
||||||
echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat);
|
|
||||||
_localEchoesCount = _localAudioDelays.size();
|
|
||||||
_localEchoesSuppressedCount = _localEchoesSuppressed.size();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
|
||||||
_inboundAudioDelays.clear();
|
|
||||||
_inboundEchoesSuppressed.clear();
|
|
||||||
echoAudio(INBOUND_AUDIO, sampleTime, samples, format);
|
|
||||||
_inboundEchoesCount = _inboundAudioDelays.size();
|
|
||||||
_inboundEchoesSuppressedCount = _inboundEchoesSuppressed.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
|
||||||
QMutexLocker locker(&_mutex);
|
|
||||||
|
|
||||||
_maxDelay = 0;
|
|
||||||
_maxAttenuation = 0.0f;
|
|
||||||
_minDelay = std::numeric_limits<int>::max();
|
|
||||||
_minAttenuation = std::numeric_limits<float>::max();
|
|
||||||
_totalDelay = 0.0f;
|
|
||||||
_delayCount = 0;
|
|
||||||
_totalAttenuation = 0.0f;
|
|
||||||
_attenuationCount = 0;
|
|
||||||
|
|
||||||
// depending on if we're processing local or external audio, pick the correct points vector
|
|
||||||
QVector<AudiblePoint>& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
|
|
||||||
|
|
||||||
int injectCalls = 0;
|
|
||||||
_injectedEchoes = 0;
|
|
||||||
foreach(const AudiblePoint& audiblePoint, audiblePoints) {
|
|
||||||
injectCalls++;
|
|
||||||
injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate());
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
qDebug() << "injectCalls=" << injectCalls;
|
|
||||||
qDebug() << "_injectedEchoes=" << _injectedEchoes;
|
|
||||||
*/
|
|
||||||
|
|
||||||
_averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount;
|
|
||||||
_averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount;
|
|
||||||
|
|
||||||
if (_reflections == 0) {
|
|
||||||
_minDelay = 0.0f;
|
|
||||||
_minAttenuation = 0.0f;
|
|
||||||
}
|
|
||||||
|
|
||||||
_officialMaxDelay = _maxDelay;
|
|
||||||
_officialMinDelay = _minDelay;
|
|
||||||
_officialMaxAttenuation = _maxAttenuation;
|
|
||||||
_officialMinAttenuation = _minAttenuation;
|
|
||||||
_officialAverageDelay = _averageDelay;
|
|
||||||
_officialAverageAttenuation = _averageAttenuation;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) {
|
|
||||||
glDisable(GL_LIGHTING);
|
|
||||||
glLineWidth(2.0);
|
|
||||||
|
|
||||||
// Draw the vector itself
|
|
||||||
glBegin(GL_LINES);
|
|
||||||
glColor3f(color.x,color.y,color.z);
|
|
||||||
glVertex3f(start.x, start.y, start.z);
|
|
||||||
glVertex3f(end.x, end.y, end.z);
|
|
||||||
glEnd();
|
|
||||||
|
|
||||||
glEnable(GL_LIGHTING);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
AudioPath::AudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& direction,
|
|
||||||
float attenuation, float delay, float distance,bool isDiffusion, int bounceCount) :
|
|
||||||
|
|
||||||
source(source),
|
|
||||||
isDiffusion(isDiffusion),
|
|
||||||
startPoint(origin),
|
|
||||||
startDirection(direction),
|
|
||||||
startDelay(delay),
|
|
||||||
startAttenuation(attenuation),
|
|
||||||
|
|
||||||
lastPoint(origin),
|
|
||||||
lastDirection(direction),
|
|
||||||
lastDistance(distance),
|
|
||||||
lastDelay(delay),
|
|
||||||
lastAttenuation(attenuation),
|
|
||||||
bounceCount(bounceCount),
|
|
||||||
|
|
||||||
finalized(false),
|
|
||||||
reflections()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection,
|
|
||||||
float initialAttenuation, float initialDelay, float initialDistance, bool isDiffusion) {
|
|
||||||
|
|
||||||
AudioPath* path = new AudioPath(source, origin, initialDirection, initialAttenuation, initialDelay,
|
|
||||||
initialDistance, isDiffusion, 0);
|
|
||||||
|
|
||||||
QVector<AudioPath*>& audioPaths = source == INBOUND_AUDIO ? _inboundAudioPaths : _localAudioPaths;
|
|
||||||
|
|
||||||
audioPaths.push_back(path);
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: This is a prototype of an eventual utility that will identify the speaking sources for the inbound audio
|
|
||||||
// stream. It's not currently called but will be added soon.
|
|
||||||
void AudioReflector::identifyAudioSources() {
|
|
||||||
// looking for audio sources....
|
|
||||||
foreach (const AvatarSharedPointer& avatarPointer, _avatarManager->getAvatarHash()) {
|
|
||||||
Avatar* avatar = static_cast<Avatar*>(avatarPointer.data());
|
|
||||||
if (!avatar->isInitialized()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
qDebug() << "avatar["<< avatar <<"] loudness:" << avatar->getAudioLoudness();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::calculateAllReflections() {
|
|
||||||
// only recalculate when we've moved, or if the attributes have changed
|
|
||||||
// TODO: what about case where new voxels are added in front of us???
|
|
||||||
bool wantHeadOrientation = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
|
|
||||||
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientationInWorldFrame() : _myAvatar->getOrientation();
|
|
||||||
glm::vec3 origin = _myAvatar->getHead()->getPosition();
|
|
||||||
glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition();
|
|
||||||
|
|
||||||
bool shouldRecalc = _reflections == 0
|
|
||||||
|| !isSimilarPosition(origin, _origin)
|
|
||||||
|| !isSimilarOrientation(orientation, _orientation)
|
|
||||||
|| !isSimilarPosition(listenerPosition, _listenerPosition)
|
|
||||||
|| haveAttributesChanged();
|
|
||||||
|
|
||||||
if (shouldRecalc) {
|
|
||||||
QMutexLocker locker(&_mutex);
|
|
||||||
quint64 start = usecTimestampNow();
|
|
||||||
_origin = origin;
|
|
||||||
_orientation = orientation;
|
|
||||||
_listenerPosition = listenerPosition;
|
|
||||||
analyzePaths(); // actually does the work
|
|
||||||
quint64 end = usecTimestampNow();
|
|
||||||
const bool wantDebugging = false;
|
|
||||||
if (wantDebugging) {
|
|
||||||
qDebug() << "newCalculateAllReflections() elapsed=" << (end - start);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::drawRays() {
|
|
||||||
const glm::vec3 RED(1,0,0);
|
|
||||||
const glm::vec3 GREEN(0,1,0);
|
|
||||||
const glm::vec3 BLUE(0,0,1);
|
|
||||||
const glm::vec3 CYAN(0,1,1);
|
|
||||||
|
|
||||||
int diffusionNumber = 0;
|
|
||||||
|
|
||||||
QMutexLocker locker(&_mutex);
|
|
||||||
|
|
||||||
// draw the paths for inbound audio
|
|
||||||
foreach(AudioPath* const& path, _inboundAudioPaths) {
|
|
||||||
// if this is an original reflection, draw it in RED
|
|
||||||
if (path->isDiffusion) {
|
|
||||||
diffusionNumber++;
|
|
||||||
drawPath(path, GREEN);
|
|
||||||
} else {
|
|
||||||
drawPath(path, RED);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool processLocalAudio = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)
|
|
||||||
if (processLocalAudio) {
|
|
||||||
// draw the paths for local audio
|
|
||||||
foreach(AudioPath* const& path, _localAudioPaths) {
|
|
||||||
// if this is an original reflection, draw it in RED
|
|
||||||
if (path->isDiffusion) {
|
|
||||||
diffusionNumber++;
|
|
||||||
drawPath(path, CYAN);
|
|
||||||
} else {
|
|
||||||
drawPath(path, BLUE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) {
|
|
||||||
glm::vec3 start = path->startPoint;
|
|
||||||
glm::vec3 color = originalColor;
|
|
||||||
const float COLOR_ADJUST_PER_BOUNCE = 0.75f;
|
|
||||||
|
|
||||||
foreach (glm::vec3 end, path->reflections) {
|
|
||||||
drawVector(start, end, color);
|
|
||||||
start = end;
|
|
||||||
color = color * COLOR_ADJUST_PER_BOUNCE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::clearPaths() {
|
|
||||||
// clear our inbound audio paths
|
|
||||||
foreach(AudioPath* const& path, _inboundAudioPaths) {
|
|
||||||
delete path;
|
|
||||||
}
|
|
||||||
_inboundAudioPaths.clear();
|
|
||||||
_inboundAudiblePoints.clear(); // clear our inbound audible points
|
|
||||||
|
|
||||||
// clear our local audio paths
|
|
||||||
foreach(AudioPath* const& path, _localAudioPaths) {
|
|
||||||
delete path;
|
|
||||||
}
|
|
||||||
_localAudioPaths.clear();
|
|
||||||
_localAudiblePoints.clear(); // clear our local audible points
|
|
||||||
}
|
|
||||||
|
|
||||||
// Here's how this works: we have an array of AudioPaths, we loop on all of our currently calculating audio
|
|
||||||
// paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it
|
|
||||||
// is considered finalized.
|
|
||||||
// If the ray hits a surface, then, based on the characteristics of that surface, it will calculate the new
|
|
||||||
// attenuation, path length, and delay for the primary path. For surfaces that have diffusion, it will also create
|
|
||||||
// fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation
|
|
||||||
// of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop.
|
|
||||||
void AudioReflector::analyzePaths() {
|
|
||||||
clearPaths();
|
|
||||||
|
|
||||||
// add our initial paths
|
|
||||||
glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT);
|
|
||||||
glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP);
|
|
||||||
glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT);
|
|
||||||
glm::vec3 left = -right;
|
|
||||||
glm::vec3 down = -up;
|
|
||||||
glm::vec3 back = -front;
|
|
||||||
glm::vec3 frontRightUp = glm::normalize(front + right + up);
|
|
||||||
glm::vec3 frontLeftUp = glm::normalize(front + left + up);
|
|
||||||
glm::vec3 backRightUp = glm::normalize(back + right + up);
|
|
||||||
glm::vec3 backLeftUp = glm::normalize(back + left + up);
|
|
||||||
glm::vec3 frontRightDown = glm::normalize(front + right + down);
|
|
||||||
glm::vec3 frontLeftDown = glm::normalize(front + left + down);
|
|
||||||
glm::vec3 backRightDown = glm::normalize(back + right + down);
|
|
||||||
glm::vec3 backLeftDown = glm::normalize(back + left + down);
|
|
||||||
|
|
||||||
float initialAttenuation = 1.0f;
|
|
||||||
|
|
||||||
bool wantPreDelay = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)
|
|
||||||
float preDelay = wantPreDelay ? _preDelay : 0.0f;
|
|
||||||
|
|
||||||
// NOTE: we're still calculating our initial paths based on the listeners position. But the analysis code has been
|
|
||||||
// updated to support individual sound sources (which is how we support diffusion), we can use this new paradigm to
|
|
||||||
// add support for individual sound sources, and more directional sound sources
|
|
||||||
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, front, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, right, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, up, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, down, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, back, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, left, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, backRightUp, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, backLeftUp, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, backRightDown, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(INBOUND_AUDIO, _origin, backLeftDown, initialAttenuation, preDelay);
|
|
||||||
|
|
||||||
// the original paths for the local audio are directional to the front of the origin
|
|
||||||
addAudioPath(LOCAL_AUDIO, _origin, front, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(LOCAL_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(LOCAL_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(LOCAL_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay);
|
|
||||||
addAudioPath(LOCAL_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay);
|
|
||||||
|
|
||||||
// loop through all our audio paths and keep analyzing them until they complete
|
|
||||||
int steps = 0;
|
|
||||||
int acitvePaths = _inboundAudioPaths.size() + _localAudioPaths.size(); // when we start, all paths are active
|
|
||||||
while(acitvePaths > 0) {
|
|
||||||
acitvePaths = analyzePathsSingleStep();
|
|
||||||
steps++;
|
|
||||||
}
|
|
||||||
_reflections = _inboundAudiblePoints.size() + _localAudiblePoints.size();
|
|
||||||
_diffusionPathCount = countDiffusionPaths();
|
|
||||||
}
|
|
||||||
|
|
||||||
int AudioReflector::countDiffusionPaths() {
|
|
||||||
int diffusionCount = 0;
|
|
||||||
|
|
||||||
foreach(AudioPath* const& path, _inboundAudioPaths) {
|
|
||||||
if (path->isDiffusion) {
|
|
||||||
diffusionCount++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
foreach(AudioPath* const& path, _localAudioPaths) {
|
|
||||||
if (path->isDiffusion) {
|
|
||||||
diffusionCount++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return diffusionCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
int AudioReflector::analyzePathsSingleStep() {
|
|
||||||
// iterate all the active sound paths, calculate one step per active path
|
|
||||||
int activePaths = 0;
|
|
||||||
|
|
||||||
QVector<AudioPath*>* pathsLists[] = { &_inboundAudioPaths, &_localAudioPaths };
|
|
||||||
|
|
||||||
for(unsigned int i = 0; i < sizeof(pathsLists) / sizeof(pathsLists[0]); i++) {
|
|
||||||
|
|
||||||
QVector<AudioPath*>& pathList = *pathsLists[i];
|
|
||||||
|
|
||||||
foreach(AudioPath* const& path, pathList) {
|
|
||||||
|
|
||||||
glm::vec3 start = path->lastPoint;
|
|
||||||
glm::vec3 direction = path->lastDirection;
|
|
||||||
OctreeElement* elementHit; // output from findRayIntersection
|
|
||||||
float distance; // output from findRayIntersection
|
|
||||||
BoxFace face; // output from findRayIntersection
|
|
||||||
|
|
||||||
if (!path->finalized) {
|
|
||||||
activePaths++;
|
|
||||||
|
|
||||||
if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) {
|
|
||||||
path->finalized = true;
|
|
||||||
} else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) {
|
|
||||||
// TODO: we need to decide how we want to handle locking on the ray intersection, if we force lock,
|
|
||||||
// we get an accurate picture, but it could prevent rendering of the voxels. If we trylock (default),
|
|
||||||
// we might not get ray intersections where they may exist, but we can't really detect that case...
|
|
||||||
// add last parameter of Octree::Lock to force locking
|
|
||||||
handlePathPoint(path, distance, elementHit, face);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// If we didn't intersect, but this was a diffusion ray, then we will go ahead and cast a short ray out
|
|
||||||
// from our last known point, in the last known direction, and leave that sound source hanging there
|
|
||||||
if (path->isDiffusion) {
|
|
||||||
const float MINIMUM_RANDOM_DISTANCE = 0.25f;
|
|
||||||
const float MAXIMUM_RANDOM_DISTANCE = 0.5f;
|
|
||||||
float distance = randFloatInRange(MINIMUM_RANDOM_DISTANCE, MAXIMUM_RANDOM_DISTANCE);
|
|
||||||
handlePathPoint(path, distance, NULL, UNKNOWN_FACE);
|
|
||||||
} else {
|
|
||||||
path->finalized = true; // if it doesn't intersect, then it is finished
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return activePaths;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face) {
|
|
||||||
glm::vec3 start = path->lastPoint;
|
|
||||||
glm::vec3 direction = path->lastDirection;
|
|
||||||
glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT));
|
|
||||||
|
|
||||||
float currentReflectiveAttenuation = path->lastAttenuation; // only the reflective components
|
|
||||||
float currentDelay = path->lastDelay; // start with our delay so far
|
|
||||||
float pathDistance = path->lastDistance;
|
|
||||||
|
|
||||||
pathDistance += glm::distance(start, end);
|
|
||||||
|
|
||||||
float toListenerDistance = glm::distance(end, _listenerPosition);
|
|
||||||
|
|
||||||
// adjust our current delay by just the delay from the most recent ray
|
|
||||||
currentDelay += getDelayFromDistance(distance);
|
|
||||||
|
|
||||||
// now we know the current attenuation for the "perfect" reflection case, but we now incorporate
|
|
||||||
// our surface materials to determine how much of this ray is absorbed, reflected, and diffused
|
|
||||||
SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit);
|
|
||||||
|
|
||||||
float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio;
|
|
||||||
float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio;
|
|
||||||
|
|
||||||
bool wantDiffusions = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
|
|
||||||
int fanout = wantDiffusions ? _diffusionFanout : 0;
|
|
||||||
|
|
||||||
float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / (float)fanout;
|
|
||||||
|
|
||||||
// total delay includes the bounce back to listener
|
|
||||||
float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance);
|
|
||||||
float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance + pathDistance);
|
|
||||||
|
|
||||||
// if our resulting partial diffusion attenuation, is still above our minimum attenuation
|
|
||||||
// then we add new paths for each diffusion point
|
|
||||||
if ((partialDiffusionAttenuation * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT
|
|
||||||
&& totalDelay < MAXIMUM_DELAY_MS) {
|
|
||||||
|
|
||||||
// diffusions fan out from random places on the semisphere of the collision point
|
|
||||||
for(int i = 0; i < fanout; i++) {
|
|
||||||
glm::vec3 diffusion;
|
|
||||||
|
|
||||||
// We're creating a random normal here. But we want it to be relatively dramatic compared to how we handle
|
|
||||||
// our slightly random surface normals.
|
|
||||||
const float MINIMUM_RANDOM_LENGTH = 0.5f;
|
|
||||||
const float MAXIMUM_RANDOM_LENGTH = 1.0f;
|
|
||||||
float randomness = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
|
|
||||||
float remainder = (1.0f - randomness)/2.0f;
|
|
||||||
float remainderSignA = randomSign();
|
|
||||||
float remainderSignB = randomSign();
|
|
||||||
|
|
||||||
if (face == MIN_X_FACE) {
|
|
||||||
diffusion = glm::vec3(-randomness, remainder * remainderSignA, remainder * remainderSignB);
|
|
||||||
} else if (face == MAX_X_FACE) {
|
|
||||||
diffusion = glm::vec3(randomness, remainder * remainderSignA, remainder * remainderSignB);
|
|
||||||
} else if (face == MIN_Y_FACE) {
|
|
||||||
diffusion = glm::vec3(remainder * remainderSignA, -randomness, remainder * remainderSignB);
|
|
||||||
} else if (face == MAX_Y_FACE) {
|
|
||||||
diffusion = glm::vec3(remainder * remainderSignA, randomness, remainder * remainderSignB);
|
|
||||||
} else if (face == MIN_Z_FACE) {
|
|
||||||
diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -randomness);
|
|
||||||
} else if (face == MAX_Z_FACE) {
|
|
||||||
diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, randomness);
|
|
||||||
} else if (face == UNKNOWN_FACE) {
|
|
||||||
float randomnessX = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
|
|
||||||
float randomnessY = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
|
|
||||||
float randomnessZ = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
|
|
||||||
diffusion = glm::vec3(direction.x * randomnessX, direction.y * randomnessY, direction.z * randomnessZ);
|
|
||||||
}
|
|
||||||
|
|
||||||
diffusion = glm::normalize(diffusion);
|
|
||||||
|
|
||||||
// add new audio path for these diffusions, the new path's source is the same as the original source
|
|
||||||
addAudioPath(path->source, end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance, true);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const bool wantDebugging = false;
|
|
||||||
if (wantDebugging) {
|
|
||||||
if ((partialDiffusionAttenuation * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) {
|
|
||||||
qDebug() << "too quiet to diffuse";
|
|
||||||
qDebug() << " partialDiffusionAttenuation=" << partialDiffusionAttenuation;
|
|
||||||
qDebug() << " toListenerAttenuation=" << toListenerAttenuation;
|
|
||||||
qDebug() << " result=" << (partialDiffusionAttenuation * toListenerAttenuation);
|
|
||||||
qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT;
|
|
||||||
}
|
|
||||||
if (totalDelay > MAXIMUM_DELAY_MS) {
|
|
||||||
qDebug() << "too delayed to diffuse";
|
|
||||||
qDebug() << " totalDelay=" << totalDelay;
|
|
||||||
qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if our reflective attenuation is above our minimum, then add our reflection point and
|
|
||||||
// allow our path to continue
|
|
||||||
if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT
|
|
||||||
&& totalDelay < MAXIMUM_DELAY_MS) {
|
|
||||||
|
|
||||||
// add this location, as the reflective attenuation as well as the total diffusion attenuation
|
|
||||||
// NOTE: we add the delay to the audible point, not back to the listener. The additional delay
|
|
||||||
// and attenuation to the listener is recalculated at the point where we actually inject the
|
|
||||||
// audio so that it can be adjusted to ear position
|
|
||||||
AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance};
|
|
||||||
|
|
||||||
QVector<AudiblePoint>& audiblePoints = path->source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
|
|
||||||
|
|
||||||
audiblePoints.push_back(point);
|
|
||||||
|
|
||||||
// add this location to the path points, so we can visualize it
|
|
||||||
path->reflections.push_back(end);
|
|
||||||
|
|
||||||
// now, if our reflective attenuation is over our minimum then keep going...
|
|
||||||
if (reflectiveAttenuation * toListenerAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) {
|
|
||||||
glm::vec3 faceNormal = getFaceNormal(face);
|
|
||||||
path->lastDirection = glm::normalize(glm::reflect(direction,faceNormal));
|
|
||||||
path->lastPoint = end;
|
|
||||||
path->lastAttenuation = reflectiveAttenuation;
|
|
||||||
path->lastDelay = currentDelay;
|
|
||||||
path->lastDistance = pathDistance;
|
|
||||||
path->bounceCount++;
|
|
||||||
} else {
|
|
||||||
path->finalized = true; // if we're too quiet, then we're done
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const bool wantDebugging = false;
|
|
||||||
if (wantDebugging) {
|
|
||||||
if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) {
|
|
||||||
qDebug() << "too quiet to add audible point";
|
|
||||||
qDebug() << " reflectiveAttenuation + totalDiffusionAttenuation=" << (reflectiveAttenuation + totalDiffusionAttenuation);
|
|
||||||
qDebug() << " toListenerAttenuation=" << toListenerAttenuation;
|
|
||||||
qDebug() << " result=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation);
|
|
||||||
qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT;
|
|
||||||
}
|
|
||||||
if (totalDelay > MAXIMUM_DELAY_MS) {
|
|
||||||
qDebug() << "too delayed to add audible point";
|
|
||||||
qDebug() << " totalDelay=" << totalDelay;
|
|
||||||
qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
path->finalized = true; // if we're too quiet, then we're done
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: eventually we will add support for different surface characteristics based on the element
|
|
||||||
// that is hit, which is why we pass in the elementHit to this helper function. But for now, all
|
|
||||||
// surfaces have the same characteristics
|
|
||||||
SurfaceCharacteristics AudioReflector::getSurfaceCharacteristics(OctreeElement* elementHit) {
|
|
||||||
SurfaceCharacteristics result = { getReflectiveRatio(), _absorptionRatio, _diffusionRatio };
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::setReflectiveRatio(float ratio) {
|
|
||||||
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
|
|
||||||
float currentReflectiveRatio = (1.0f - (_absorptionRatio + _diffusionRatio));
|
|
||||||
float halfDifference = (safeRatio - currentReflectiveRatio) / 2.0f;
|
|
||||||
|
|
||||||
// evenly distribute the difference between the two other ratios
|
|
||||||
_absorptionRatio -= halfDifference;
|
|
||||||
_diffusionRatio -= halfDifference;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::setAbsorptionRatio(float ratio) {
|
|
||||||
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
|
|
||||||
_absorptionRatio = safeRatio;
|
|
||||||
const float MAX_COMBINED_RATIO = 1.0f;
|
|
||||||
if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) {
|
|
||||||
_diffusionRatio = MAX_COMBINED_RATIO - _absorptionRatio;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioReflector::setDiffusionRatio(float ratio) {
|
|
||||||
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
|
|
||||||
_diffusionRatio = safeRatio;
|
|
||||||
const float MAX_COMBINED_RATIO = 1.0f;
|
|
||||||
if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) {
|
|
||||||
_absorptionRatio = MAX_COMBINED_RATIO - _diffusionRatio;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,254 +0,0 @@
|
||||||
//
|
|
||||||
// AudioReflector.h
|
|
||||||
// interface
|
|
||||||
//
|
|
||||||
// Created by Brad Hefta-Gaub on 4/2/2014
|
|
||||||
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
|
|
||||||
//
|
|
||||||
// Distributed under the Apache License, Version 2.0.
|
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
|
||||||
//
|
|
||||||
|
|
||||||
#ifndef interface_AudioReflector_h
|
|
||||||
#define interface_AudioReflector_h
|
|
||||||
|
|
||||||
#include <QMutex>
|
|
||||||
|
|
||||||
#include <VoxelTree.h>
|
|
||||||
|
|
||||||
#include "Audio.h"
|
|
||||||
#include "avatar/MyAvatar.h"
|
|
||||||
#include "avatar/AvatarManager.h"
|
|
||||||
|
|
||||||
enum AudioSource {
|
|
||||||
LOCAL_AUDIO,
|
|
||||||
INBOUND_AUDIO
|
|
||||||
};
|
|
||||||
|
|
||||||
class AudioPath {
|
|
||||||
public:
|
|
||||||
AudioPath(AudioSource source = INBOUND_AUDIO, const glm::vec3& origin = glm::vec3(0.0f),
|
|
||||||
const glm::vec3& direction = glm::vec3(0.0f), float attenuation = 1.0f,
|
|
||||||
float delay = 0.0f, float distance = 0.0f, bool isDiffusion = false, int bounceCount = 0);
|
|
||||||
|
|
||||||
AudioSource source;
|
|
||||||
bool isDiffusion;
|
|
||||||
glm::vec3 startPoint;
|
|
||||||
glm::vec3 startDirection;
|
|
||||||
float startDelay;
|
|
||||||
float startAttenuation;
|
|
||||||
|
|
||||||
glm::vec3 lastPoint;
|
|
||||||
glm::vec3 lastDirection;
|
|
||||||
float lastDistance;
|
|
||||||
float lastDelay;
|
|
||||||
float lastAttenuation;
|
|
||||||
unsigned int bounceCount;
|
|
||||||
|
|
||||||
bool finalized;
|
|
||||||
QVector<glm::vec3> reflections;
|
|
||||||
};
|
|
||||||
|
|
||||||
class AudiblePoint {
|
|
||||||
public:
|
|
||||||
glm::vec3 location; /// location of the audible point
|
|
||||||
float delay; /// includes total delay including pre delay to the point of the audible location, not to the listener's ears
|
|
||||||
float attenuation; /// only the reflective & diffusive portion of attenuation, doesn't include distance attenuation
|
|
||||||
float distance; /// includes total distance to the point of the audible location, not to the listener's ears
|
|
||||||
};
|
|
||||||
|
|
||||||
class SurfaceCharacteristics {
|
|
||||||
public:
|
|
||||||
float reflectiveRatio;
|
|
||||||
float absorptionRatio;
|
|
||||||
float diffusionRatio;
|
|
||||||
};
|
|
||||||
|
|
||||||
class AudioReflector : public QObject {
|
|
||||||
Q_OBJECT
|
|
||||||
public:
|
|
||||||
AudioReflector(QObject* parent = NULL);
|
|
||||||
|
|
||||||
// setup functions to configure the resources used by the AudioReflector
|
|
||||||
void setVoxels(VoxelTree* voxels) { _voxels = voxels; }
|
|
||||||
void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; }
|
|
||||||
void setAudio(Audio* audio) { _audio = audio; }
|
|
||||||
void setAvatarManager(AvatarManager* avatarManager) { _avatarManager = avatarManager; }
|
|
||||||
|
|
||||||
void render(); /// must be called in the application render loop
|
|
||||||
|
|
||||||
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
|
|
||||||
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
|
||||||
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
|
||||||
|
|
||||||
public slots:
|
|
||||||
// statistics
|
|
||||||
int getReflections() const { return _reflections; }
|
|
||||||
float getAverageDelayMsecs() const { return _officialAverageDelay; }
|
|
||||||
float getAverageAttenuation() const { return _officialAverageAttenuation; }
|
|
||||||
float getMaxDelayMsecs() const { return _officialMaxDelay; }
|
|
||||||
float getMaxAttenuation() const { return _officialMaxAttenuation; }
|
|
||||||
float getMinDelayMsecs() const { return _officialMinDelay; }
|
|
||||||
float getMinAttenuation() const { return _officialMinAttenuation; }
|
|
||||||
float getDelayFromDistance(float distance);
|
|
||||||
int getDiffusionPathCount() const { return _diffusionPathCount; }
|
|
||||||
int getEchoesInjected() const { return _inboundEchoesCount + _localEchoesCount; }
|
|
||||||
int getEchoesSuppressed() const { return _inboundEchoesSuppressedCount + _localEchoesSuppressedCount; }
|
|
||||||
|
|
||||||
/// ms of delay added to all echos
|
|
||||||
float getPreDelay() const { return _preDelay; }
|
|
||||||
void setPreDelay(float preDelay) { _preDelay = preDelay; }
|
|
||||||
|
|
||||||
/// ms per meter that sound travels, larger means slower, which sounds bigger
|
|
||||||
float getSoundMsPerMeter() const { return _soundMsPerMeter; }
|
|
||||||
void setSoundMsPerMeter(float soundMsPerMeter) { _soundMsPerMeter = soundMsPerMeter; }
|
|
||||||
|
|
||||||
/// scales attenuation to be louder or softer than the default distance attenuation
|
|
||||||
float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; }
|
|
||||||
void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; }
|
|
||||||
|
|
||||||
/// scales attenuation of local audio to be louder or softer than the default attenuation
|
|
||||||
float getLocalAudioAttenuationFactor() const { return _localAudioAttenuationFactor; }
|
|
||||||
void setLocalAudioAttenuationFactor(float factor) { _localAudioAttenuationFactor = factor; }
|
|
||||||
|
|
||||||
/// ms window in which we will suppress echoes to reduce comb filter effects
|
|
||||||
float getCombFilterWindow() const { return _combFilterWindow; }
|
|
||||||
void setCombFilterWindow(float value) { _combFilterWindow = value; }
|
|
||||||
|
|
||||||
/// number of points of diffusion from each reflection point, as fanout increases there are more chances for secondary
|
|
||||||
/// echoes, but each diffusion ray is quieter and therefore more likely to be below the sound floor
|
|
||||||
int getDiffusionFanout() const { return _diffusionFanout; }
|
|
||||||
void setDiffusionFanout(int fanout) { _diffusionFanout = fanout; }
|
|
||||||
|
|
||||||
/// ratio 0.0 - 1.0 of amount of each ray that is absorbed upon hitting a surface
|
|
||||||
float getAbsorptionRatio() const { return _absorptionRatio; }
|
|
||||||
void setAbsorptionRatio(float ratio);
|
|
||||||
|
|
||||||
// ratio 0.0 - 1.0 of amount of each ray that is diffused upon hitting a surface
|
|
||||||
float getDiffusionRatio() const { return _diffusionRatio; }
|
|
||||||
void setDiffusionRatio(float ratio);
|
|
||||||
|
|
||||||
// remaining ratio 0.0 - 1.0 of amount of each ray that is cleanly reflected upon hitting a surface
|
|
||||||
float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); }
|
|
||||||
void setReflectiveRatio(float ratio);
|
|
||||||
|
|
||||||
// wet/dry mix - these don't affect any reflection calculations, only the final mix volumes
|
|
||||||
float getOriginalSourceAttenuation() const { return _originalSourceAttenuation; }
|
|
||||||
void setOriginalSourceAttenuation(float value) { _originalSourceAttenuation = value; }
|
|
||||||
float getEchoesAttenuation() const { return _allEchoesAttenuation; }
|
|
||||||
void setEchoesAttenuation(float value) { _allEchoesAttenuation = value; }
|
|
||||||
|
|
||||||
signals:
|
|
||||||
|
|
||||||
private:
|
|
||||||
VoxelTree* _voxels; // used to access voxel scene
|
|
||||||
MyAvatar* _myAvatar; // access to listener
|
|
||||||
Audio* _audio; // access to audio API
|
|
||||||
AvatarManager* _avatarManager; // access to avatar manager API
|
|
||||||
|
|
||||||
// Helpers for drawing
|
|
||||||
void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color);
|
|
||||||
|
|
||||||
// helper for generically calculating attenuation based on distance
|
|
||||||
float getDistanceAttenuationCoefficient(float distance);
|
|
||||||
|
|
||||||
// statistics
|
|
||||||
int _reflections;
|
|
||||||
int _diffusionPathCount;
|
|
||||||
int _delayCount;
|
|
||||||
float _totalDelay;
|
|
||||||
float _averageDelay;
|
|
||||||
float _maxDelay;
|
|
||||||
float _minDelay;
|
|
||||||
float _officialAverageDelay;
|
|
||||||
float _officialMaxDelay;
|
|
||||||
float _officialMinDelay;
|
|
||||||
int _attenuationCount;
|
|
||||||
float _totalAttenuation;
|
|
||||||
float _averageAttenuation;
|
|
||||||
float _maxAttenuation;
|
|
||||||
float _minAttenuation;
|
|
||||||
float _officialAverageAttenuation;
|
|
||||||
float _officialMaxAttenuation;
|
|
||||||
float _officialMinAttenuation;
|
|
||||||
|
|
||||||
|
|
||||||
glm::vec3 _listenerPosition;
|
|
||||||
glm::vec3 _origin;
|
|
||||||
glm::quat _orientation;
|
|
||||||
|
|
||||||
QVector<AudioPath*> _inboundAudioPaths; /// audio paths we're processing for inbound audio
|
|
||||||
QVector<AudiblePoint> _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths
|
|
||||||
QMap<float, float> _inboundAudioDelays; /// delay times for currently injected audio points
|
|
||||||
QVector<float> _inboundEchoesSuppressed; /// delay times for currently injected audio points
|
|
||||||
int _inboundEchoesCount;
|
|
||||||
int _inboundEchoesSuppressedCount;
|
|
||||||
|
|
||||||
QVector<AudioPath*> _localAudioPaths; /// audio paths we're processing for local audio
|
|
||||||
QVector<AudiblePoint> _localAudiblePoints; /// the audible points that have been calculated from the local audio paths
|
|
||||||
QMap<float, float> _localAudioDelays; /// delay times for currently injected audio points
|
|
||||||
QVector<float> _localEchoesSuppressed; /// delay times for currently injected audio points
|
|
||||||
int _localEchoesCount;
|
|
||||||
int _localEchoesSuppressedCount;
|
|
||||||
|
|
||||||
// adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties,
|
|
||||||
// as well as diffusion sound sources
|
|
||||||
void addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation,
|
|
||||||
float initialDelay, float initialDistance = 0.0f, bool isDiffusion = false);
|
|
||||||
|
|
||||||
// helper that handles audioPath analysis
|
|
||||||
int analyzePathsSingleStep();
|
|
||||||
void handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face);
|
|
||||||
void clearPaths();
|
|
||||||
void analyzePaths();
|
|
||||||
void drawRays();
|
|
||||||
void drawPath(AudioPath* path, const glm::vec3& originalColor);
|
|
||||||
void calculateAllReflections();
|
|
||||||
int countDiffusionPaths();
|
|
||||||
glm::vec3 getFaceNormal(BoxFace face);
|
|
||||||
void identifyAudioSources();
|
|
||||||
|
|
||||||
void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
|
|
||||||
void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
|
||||||
|
|
||||||
// return the surface characteristics of the element we hit
|
|
||||||
SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit = NULL);
|
|
||||||
|
|
||||||
|
|
||||||
QMutex _mutex;
|
|
||||||
|
|
||||||
float _preDelay;
|
|
||||||
float _soundMsPerMeter;
|
|
||||||
float _distanceAttenuationScalingFactor;
|
|
||||||
float _localAudioAttenuationFactor;
|
|
||||||
float _combFilterWindow;
|
|
||||||
int _diffusionFanout; // number of points of diffusion from each reflection point
|
|
||||||
|
|
||||||
// all elements have the same material for now...
|
|
||||||
float _absorptionRatio;
|
|
||||||
float _diffusionRatio;
|
|
||||||
float _reflectiveRatio;
|
|
||||||
|
|
||||||
// wet/dry mix - these don't affect any reflection calculations, only the final mix volumes
|
|
||||||
float _originalSourceAttenuation; /// each sample of original signal will be multiplied by this
|
|
||||||
float _allEchoesAttenuation; /// each sample of all echo signals will be multiplied by this
|
|
||||||
|
|
||||||
// remember the last known values at calculation
|
|
||||||
bool haveAttributesChanged();
|
|
||||||
|
|
||||||
bool _withDiffusion;
|
|
||||||
float _lastPreDelay;
|
|
||||||
float _lastSoundMsPerMeter;
|
|
||||||
float _lastDistanceAttenuationScalingFactor;
|
|
||||||
float _lastLocalAudioAttenuationFactor;
|
|
||||||
int _lastDiffusionFanout;
|
|
||||||
float _lastAbsorptionRatio;
|
|
||||||
float _lastDiffusionRatio;
|
|
||||||
bool _lastDontDistanceAttenuate;
|
|
||||||
bool _lastAlternateDistanceAttenuate;
|
|
||||||
|
|
||||||
int _injectedEchoes;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
#endif // interface_AudioReflector_h
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <MetavoxelClientManager.h>
|
#include <MetavoxelClientManager.h>
|
||||||
|
|
||||||
#include "renderer/ProgramObject.h"
|
#include "renderer/ProgramObject.h"
|
||||||
|
#include "renderer/TextureCache.h"
|
||||||
|
|
||||||
class HeightfieldBaseLayerBatch;
|
class HeightfieldBaseLayerBatch;
|
||||||
class HeightfieldSplatBatch;
|
class HeightfieldSplatBatch;
|
||||||
|
|
|
@ -669,104 +669,6 @@ void Stats::display(
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, (char*)voxelStats.str().c_str(), color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, (char*)voxelStats.str().c_str(), color);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_expanded && wantSpatialProcessing) {
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE; // space one line...
|
|
||||||
|
|
||||||
const AudioReflector* audioReflector = Application::getInstance()->getAudioReflector();
|
|
||||||
|
|
||||||
// add some reflection stats
|
|
||||||
char reflectionsStatus[128];
|
|
||||||
|
|
||||||
bool includeOriginal = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)
|
|
||||||
bool separateEars = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)
|
|
||||||
bool stereoSource = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource)
|
|
||||||
bool randomSurfaces = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces)
|
|
||||||
|
|
||||||
sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s, Normals: %s",
|
|
||||||
audioReflector->getReflections(),
|
|
||||||
(includeOriginal ? "included" : "silent"),
|
|
||||||
(separateEars ? "two" : "one"),
|
|
||||||
(stereoSource ? "stereo" : "mono"),
|
|
||||||
(randomSurfaces ? "random" : "regular")
|
|
||||||
);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
|
||||||
|
|
||||||
bool wantPreDelay = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)
|
|
||||||
float preDelay = wantPreDelay ? audioReflector->getPreDelay() : 0.0f;
|
|
||||||
|
|
||||||
sprintf(reflectionsStatus, "Delay: pre: %6.3f, average %6.3f, max %6.3f, min %6.3f, speed: %6.3f",
|
|
||||||
preDelay,
|
|
||||||
audioReflector->getAverageDelayMsecs(),
|
|
||||||
audioReflector->getMaxDelayMsecs(),
|
|
||||||
audioReflector->getMinDelayMsecs(),
|
|
||||||
audioReflector->getSoundMsPerMeter());
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
|
||||||
|
|
||||||
//Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
|
|
||||||
bool distanceAttenuationDisabled = false;
|
|
||||||
|
|
||||||
// Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
|
|
||||||
bool alternateDistanceAttenuationEnabled = false;
|
|
||||||
|
|
||||||
sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, %s: %5.3f",
|
|
||||||
audioReflector->getAverageAttenuation(),
|
|
||||||
audioReflector->getMaxAttenuation(),
|
|
||||||
audioReflector->getMinAttenuation(),
|
|
||||||
(distanceAttenuationDisabled ? "Distance Factor [DISABLED]" :
|
|
||||||
alternateDistanceAttenuationEnabled ? "Distance Factor [ALTERNATE]" : "Distance Factor [STANARD]"),
|
|
||||||
audioReflector->getDistanceAttenuationScalingFactor());
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
|
||||||
|
|
||||||
bool localAudio = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio);
|
|
||||||
sprintf(reflectionsStatus, "Local Audio: %s Attenuation: %5.3f", (localAudio ? "yes" : "no"),
|
|
||||||
audioReflector->getLocalAudioAttenuationFactor());
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
|
||||||
|
|
||||||
bool diffusionEnabled = true; //Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
|
|
||||||
int fanout = diffusionEnabled ? audioReflector->getDiffusionFanout() : 0;
|
|
||||||
int diffusionPaths = diffusionEnabled ? audioReflector->getDiffusionPathCount() : 0;
|
|
||||||
sprintf(reflectionsStatus, "Diffusion: %s, Fanout: %d, Paths: %d",
|
|
||||||
(diffusionEnabled ? "yes" : "no"), fanout, diffusionPaths);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
|
||||||
|
|
||||||
const float AS_PERCENT = 100.0f;
|
|
||||||
float reflectiveRatio = audioReflector->getReflectiveRatio() * AS_PERCENT;
|
|
||||||
float diffusionRatio = audioReflector->getDiffusionRatio() * AS_PERCENT;
|
|
||||||
float absorptionRatio = audioReflector->getAbsorptionRatio() * AS_PERCENT;
|
|
||||||
sprintf(reflectionsStatus, "Ratios: Reflective: %5.3f, Diffusion: %5.3f, Absorption: %5.3f",
|
|
||||||
reflectiveRatio, diffusionRatio, absorptionRatio);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
|
||||||
|
|
||||||
sprintf(reflectionsStatus, "Comb Filter Window: %5.3f ms, Allowed: %d, Suppressed: %d",
|
|
||||||
audioReflector->getCombFilterWindow(),
|
|
||||||
audioReflector->getEchoesInjected(),
|
|
||||||
audioReflector->getEchoesSuppressed());
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
|
||||||
|
|
||||||
sprintf(reflectionsStatus, "Wet/Dry Mix: Original: %5.3f Echoes: %5.3f",
|
|
||||||
audioReflector->getOriginalSourceAttenuation(),
|
|
||||||
audioReflector->getEchoesAttenuation());
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, 0.10f, 0.0f, 2.0f, reflectionsStatus, color);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Stats::setMetavoxelStats(int internal, int leaves, int sendProgress,
|
void Stats::setMetavoxelStats(int internal, int leaves, int sendProgress,
|
||||||
|
|
Loading…
Reference in a new issue