diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js new file mode 100644 index 0000000000..3cc6a1a21e --- /dev/null +++ b/examples/audioReflectorTools.js @@ -0,0 +1,711 @@ +// +// audioReflectorTools.js +// hifi +// +// Created by Brad Hefta-Gaub on 2/14/14. +// Copyright (c) 2014 HighFidelity, Inc. All rights reserved. +// +// Tools for manipulating the attributes of the AudioReflector behavior +// +// + + +var delayScale = 100.0; +var fanoutScale = 10.0; +var speedScale = 20; +var factorScale = 5.0; +var localFactorScale = 1.0; +var reflectiveScale = 100.0; +var diffusionScale = 100.0; +var absorptionScale = 100.0; +var combFilterScale = 50.0; + +// these three properties are bound together, if you change one, the others will also change +var reflectiveRatio = AudioReflector.getReflectiveRatio(); +var diffusionRatio = AudioReflector.getDiffusionRatio(); +var absorptionRatio = AudioReflector.getAbsorptionRatio(); + +var reflectiveThumbX; +var diffusionThumbX; +var absorptionThumbX; + +function setReflectiveRatio(reflective) { + var total = diffusionRatio + absorptionRatio + (reflective / reflectiveScale); + diffusionRatio = diffusionRatio / total; + absorptionRatio = absorptionRatio / total; + reflectiveRatio = (reflective / reflectiveScale) / total; + updateRatioValues(); +} + +function setDiffusionRatio(diffusion) { + var total = (diffusion / diffusionScale) + absorptionRatio + reflectiveRatio; + diffusionRatio = (diffusion / diffusionScale) / total; + absorptionRatio = absorptionRatio / total; + reflectiveRatio = reflectiveRatio / total; + updateRatioValues(); +} + +function setAbsorptionRatio(absorption) { + var total = diffusionRatio + (absorption / absorptionScale) + reflectiveRatio; + diffusionRatio = diffusionRatio / total; + absorptionRatio = (absorption / absorptionScale) / total; + reflectiveRatio = reflectiveRatio / total; + updateRatioValues(); +} + +function updateRatioSliders() { + reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * reflectiveRatio); + diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * diffusionRatio); + absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * absorptionRatio); + + Overlays.editOverlay(reflectiveThumb, { x: reflectiveThumbX } ); + Overlays.editOverlay(diffusionThumb, { x: diffusionThumbX } ); + Overlays.editOverlay(absorptionThumb, { x: absorptionThumbX } ); +} + +function updateRatioValues() { + AudioReflector.setReflectiveRatio(reflectiveRatio); + AudioReflector.setDiffusionRatio(diffusionRatio); + AudioReflector.setAbsorptionRatio(absorptionRatio); +} + +var topY = 250; +var sliderHeight = 35; + +var delayY = topY; +topY += sliderHeight; +var delayLabel = Overlays.addOverlay("text", { + x: 40, + y: delayY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 12, + leftMargin: 5, + text: "Delay:" + }); + +var delaySlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: delayY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var delayMinThumbX = 110; +var delayMaxThumbX = delayMinThumbX + 110; +var delayThumbX = delayMinThumbX + ((delayMaxThumbX - delayMinThumbX) * (AudioReflector.getPreDelay() / delayScale)); +var delayThumb = Overlays.addOverlay("image", { + x: delayThumbX, + y: delayY + 9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 0, blue: 0}, + alpha: 1 + }); + +var fanoutY = topY; +topY += sliderHeight; + +var fanoutLabel = Overlays.addOverlay("text", { + x: 40, + y: fanoutY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 12, + leftMargin: 5, + text: "Fanout:" + }); + +var fanoutSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: fanoutY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var fanoutMinThumbX = 110; +var fanoutMaxThumbX = fanoutMinThumbX + 110; +var fanoutThumbX = fanoutMinThumbX + ((fanoutMaxThumbX - fanoutMinThumbX) * (AudioReflector.getDiffusionFanout() / fanoutScale)); +var fanoutThumb = Overlays.addOverlay("image", { + x: fanoutThumbX, + y: fanoutY + 9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 255, blue: 0}, + alpha: 1 + }); + + +var speedY = topY; +topY += sliderHeight; + +var speedLabel = Overlays.addOverlay("text", { + x: 40, + y: speedY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Speed\nin ms/m:" + }); + +var speedSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: speedY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var speedMinThumbX = 110; +var speedMaxThumbX = speedMinThumbX + 110; +var speedThumbX = speedMinThumbX + ((speedMaxThumbX - speedMinThumbX) * (AudioReflector.getSoundMsPerMeter() / speedScale)); +var speedThumb = Overlays.addOverlay("image", { + x: speedThumbX, + y: speedY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 0, green: 255, blue: 0}, + alpha: 1 + }); + +var factorY = topY; +topY += sliderHeight; + +var factorLabel = Overlays.addOverlay("text", { + x: 40, + y: factorY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Attenuation\nFactor:" + }); + + +var factorSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: factorY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var factorMinThumbX = 110; +var factorMaxThumbX = factorMinThumbX + 110; +var factorThumbX = factorMinThumbX + ((factorMaxThumbX - factorMinThumbX) * (AudioReflector.getDistanceAttenuationScalingFactor() / factorScale)); +var factorThumb = Overlays.addOverlay("image", { + x: factorThumbX, + y: factorY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 0, green: 0, blue: 255}, + alpha: 1 + }); + +var localFactorY = topY; +topY += sliderHeight; + +var localFactorLabel = Overlays.addOverlay("text", { + x: 40, + y: localFactorY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Local\nFactor:" + }); + + +var localFactorSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: localFactorY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var localFactorMinThumbX = 110; +var localFactorMaxThumbX = localFactorMinThumbX + 110; +var localFactorThumbX = localFactorMinThumbX + ((localFactorMaxThumbX - localFactorMinThumbX) * (AudioReflector.getLocalAudioAttenuationFactor() / localFactorScale)); +var localFactorThumb = Overlays.addOverlay("image", { + x: localFactorThumbX, + y: localFactorY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 0, green: 128, blue: 128}, + alpha: 1 + }); + +var combFilterY = topY; +topY += sliderHeight; + +var combFilterLabel = Overlays.addOverlay("text", { + x: 40, + y: combFilterY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Comb Filter\nWindow:" + }); + + +var combFilterSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: combFilterY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var combFilterMinThumbX = 110; +var combFilterMaxThumbX = combFilterMinThumbX + 110; +var combFilterThumbX = combFilterMinThumbX + ((combFilterMaxThumbX - combFilterMinThumbX) * (AudioReflector.getCombFilterWindow() / combFilterScale)); +var combFilterThumb = Overlays.addOverlay("image", { + x: combFilterThumbX, + y: combFilterY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 128, green: 128, blue: 0}, + alpha: 1 + }); + + +var reflectiveY = topY; +topY += sliderHeight; + +var reflectiveLabel = Overlays.addOverlay("text", { + x: 40, + y: reflectiveY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Reflective\nRatio:" + }); + + +var reflectiveSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: reflectiveY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var reflectiveMinThumbX = 110; +var reflectiveMaxThumbX = reflectiveMinThumbX + 110; +reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * AudioReflector.getReflectiveRatio()); +var reflectiveThumb = Overlays.addOverlay("image", { + x: reflectiveThumbX, + y: reflectiveY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + +var diffusionY = topY; +topY += sliderHeight; + +var diffusionLabel = Overlays.addOverlay("text", { + x: 40, + y: diffusionY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Diffusion\nRatio:" + }); + + +var diffusionSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: diffusionY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var diffusionMinThumbX = 110; +var diffusionMaxThumbX = diffusionMinThumbX + 110; +diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * AudioReflector.getDiffusionRatio()); +var diffusionThumb = Overlays.addOverlay("image", { + x: diffusionThumbX, + y: diffusionY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 0, green: 255, blue: 255}, + alpha: 1 + }); + +var absorptionY = topY; +topY += sliderHeight; + +var absorptionLabel = Overlays.addOverlay("text", { + x: 40, + y: absorptionY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Absorption\nRatio:" + }); + + +var absorptionSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: absorptionY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var absorptionMinThumbX = 110; +var absorptionMaxThumbX = absorptionMinThumbX + 110; +absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * AudioReflector.getAbsorptionRatio()); +var absorptionThumb = Overlays.addOverlay("image", { + x: absorptionThumbX, + y: absorptionY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 255, green: 0, blue: 255}, + alpha: 1 + }); + + +// When our script shuts down, we should clean up all of our overlays +function scriptEnding() { + Overlays.deleteOverlay(factorLabel); + Overlays.deleteOverlay(factorThumb); + Overlays.deleteOverlay(factorSlider); + + Overlays.deleteOverlay(combFilterLabel); + Overlays.deleteOverlay(combFilterThumb); + Overlays.deleteOverlay(combFilterSlider); + + Overlays.deleteOverlay(localFactorLabel); + Overlays.deleteOverlay(localFactorThumb); + Overlays.deleteOverlay(localFactorSlider); + + Overlays.deleteOverlay(speedLabel); + Overlays.deleteOverlay(speedThumb); + Overlays.deleteOverlay(speedSlider); + + Overlays.deleteOverlay(delayLabel); + Overlays.deleteOverlay(delayThumb); + Overlays.deleteOverlay(delaySlider); + + Overlays.deleteOverlay(fanoutLabel); + Overlays.deleteOverlay(fanoutThumb); + Overlays.deleteOverlay(fanoutSlider); + + Overlays.deleteOverlay(reflectiveLabel); + Overlays.deleteOverlay(reflectiveThumb); + Overlays.deleteOverlay(reflectiveSlider); + + Overlays.deleteOverlay(diffusionLabel); + Overlays.deleteOverlay(diffusionThumb); + Overlays.deleteOverlay(diffusionSlider); + + Overlays.deleteOverlay(absorptionLabel); + Overlays.deleteOverlay(absorptionThumb); + Overlays.deleteOverlay(absorptionSlider); + +} +Script.scriptEnding.connect(scriptEnding); + + +var count = 0; + +// Our update() function is called at approximately 60fps, and we will use it to animate our various overlays +function update(deltaTime) { + count++; +} +Script.update.connect(update); + + +// The slider is handled in the mouse event callbacks. +var movingSliderDelay = false; +var movingSliderFanout = false; +var movingSliderSpeed = false; +var movingSliderFactor = false; +var movingSliderCombFilter = false; +var movingSliderLocalFactor = false; +var movingSliderReflective = false; +var movingSliderDiffusion = false; +var movingSliderAbsorption = false; + +var thumbClickOffsetX = 0; +function mouseMoveEvent(event) { + if (movingSliderDelay) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < delayMinThumbX) { + newThumbX = delayMinThumbX; + } + if (newThumbX > delayMaxThumbX) { + newThumbX = delayMaxThumbX; + } + Overlays.editOverlay(delayThumb, { x: newThumbX } ); + var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale; + AudioReflector.setPreDelay(delay); + } + if (movingSliderFanout) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < fanoutMinThumbX) { + newThumbX = fanoutMinThumbX; + } + if (newThumbX > fanoutMaxThumbX) { + newThumbX = fanoutMaxThumbX; + } + Overlays.editOverlay(fanoutThumb, { x: newThumbX } ); + var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale); + AudioReflector.setDiffusionFanout(fanout); + } + if (movingSliderSpeed) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < speedMinThumbX) { + newThumbX = speedMminThumbX; + } + if (newThumbX > speedMaxThumbX) { + newThumbX = speedMaxThumbX; + } + Overlays.editOverlay(speedThumb, { x: newThumbX } ); + var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale; + AudioReflector.setSoundMsPerMeter(speed); + } + if (movingSliderFactor) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < factorMinThumbX) { + newThumbX = factorMminThumbX; + } + if (newThumbX > factorMaxThumbX) { + newThumbX = factorMaxThumbX; + } + Overlays.editOverlay(factorThumb, { x: newThumbX } ); + var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale; + AudioReflector.setDistanceAttenuationScalingFactor(factor); + } + if (movingSliderCombFilter) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < combFilterMinThumbX) { + newThumbX = combFilterMminThumbX; + } + if (newThumbX > combFilterMaxThumbX) { + newThumbX = combFilterMaxThumbX; + } + Overlays.editOverlay(combFilterThumb, { x: newThumbX } ); + var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale; + AudioReflector.setCombFilterWindow(combFilter); + } + + if (movingSliderLocalFactor) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < localFactorMinThumbX) { + newThumbX = localFactorMminThumbX; + } + if (newThumbX > localFactorMaxThumbX) { + newThumbX = localFactorMaxThumbX; + } + Overlays.editOverlay(localFactorThumb, { x: newThumbX } ); + var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale; + AudioReflector.setLocalAudioAttenuationFactor(localFactor); + } + + if (movingSliderAbsorption) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < absorptionMinThumbX) { + newThumbX = absorptionMminThumbX; + } + if (newThumbX > absorptionMaxThumbX) { + newThumbX = absorptionMaxThumbX; + } + Overlays.editOverlay(absorptionThumb, { x: newThumbX } ); + var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale; + setAbsorptionRatio(absorption); + } + + if (movingSliderReflective) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < reflectiveMinThumbX) { + newThumbX = reflectiveMminThumbX; + } + if (newThumbX > reflectiveMaxThumbX) { + newThumbX = reflectiveMaxThumbX; + } + Overlays.editOverlay(reflectiveThumb, { x: newThumbX } ); + var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale; + setReflectiveRatio(reflective); + } + + if (movingSliderDiffusion) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < diffusionMinThumbX) { + newThumbX = diffusionMminThumbX; + } + if (newThumbX > diffusionMaxThumbX) { + newThumbX = diffusionMaxThumbX; + } + Overlays.editOverlay(diffusionThumb, { x: newThumbX } ); + var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale; + setDiffusionRatio(diffusion); + } + +} + +// we also handle click detection in our mousePressEvent() +function mousePressEvent(event) { + var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y}); + if (clickedOverlay == delayThumb) { + movingSliderDelay = true; + thumbClickOffsetX = event.x - delayThumbX; + } + if (clickedOverlay == fanoutThumb) { + movingSliderFanout = true; + thumbClickOffsetX = event.x - fanoutThumbX; + } + if (clickedOverlay == speedThumb) { + movingSliderSpeed = true; + thumbClickOffsetX = event.x - speedThumbX; + } + if (clickedOverlay == factorThumb) { + movingSliderFactor = true; + thumbClickOffsetX = event.x - factorThumbX; + } + if (clickedOverlay == localFactorThumb) { + movingSliderLocalFactor = true; + thumbClickOffsetX = event.x - localFactorThumbX; + } + if (clickedOverlay == combFilterThumb) { + movingSliderCombFilter = true; + thumbClickOffsetX = event.x - combFilterThumbX; + } + if (clickedOverlay == diffusionThumb) { + movingSliderDiffusion = true; + thumbClickOffsetX = event.x - diffusionThumbX; + } + if (clickedOverlay == absorptionThumb) { + movingSliderAbsorption = true; + thumbClickOffsetX = event.x - absorptionThumbX; + } + if (clickedOverlay == reflectiveThumb) { + movingSliderReflective = true; + thumbClickOffsetX = event.x - reflectiveThumbX; + } +} +function mouseReleaseEvent(event) { + if (movingSliderDelay) { + movingSliderDelay = false; + var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale; + AudioReflector.setPreDelay(delay); + delayThumbX = newThumbX; + } + if (movingSliderFanout) { + movingSliderFanout = false; + var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale); + AudioReflector.setDiffusionFanout(fanout); + fanoutThumbX = newThumbX; + } + if (movingSliderSpeed) { + movingSliderSpeed = false; + var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale; + AudioReflector.setSoundMsPerMeter(speed); + speedThumbX = newThumbX; + } + if (movingSliderFactor) { + movingSliderFactor = false; + var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale; + AudioReflector.setDistanceAttenuationScalingFactor(factor); + factorThumbX = newThumbX; + } + if (movingSliderCombFilter) { + movingSliderCombFilter = false; + var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale; + AudioReflector.setCombFilterWindow(combFilter); + combFilterThumbX = newThumbX; + } + + if (movingSliderLocalFactor) { + movingSliderLocalFactor = false; + var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale; + AudioReflector.setLocalAudioAttenuationFactor(localFactor); + localFactorThumbX = newThumbX; + } + + if (movingSliderReflective) { + movingSliderReflective = false; + var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale; + setReflectiveRatio(reflective); + reflectiveThumbX = newThumbX; + updateRatioSliders(); + } + + if (movingSliderDiffusion) { + movingSliderDiffusion = false; + var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale; + setDiffusionRatio(diffusion); + diffusionThumbX = newThumbX; + updateRatioSliders(); + } + + if (movingSliderAbsorption) { + movingSliderAbsorption = false; + var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale; + setAbsorptionRatio(absorption); + absorptionThumbX = newThumbX; + updateRatioSliders(); + } +} + +Controller.mouseMoveEvent.connect(mouseMoveEvent); +Controller.mousePressEvent.connect(mousePressEvent); +Controller.mouseReleaseEvent.connect(mouseReleaseEvent); + diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index b904dfc1c1..bd7a82b439 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1674,6 +1674,15 @@ void Application::init() { connect(_rearMirrorTools, SIGNAL(restoreView()), SLOT(restoreMirrorView())); connect(_rearMirrorTools, SIGNAL(shrinkView()), SLOT(shrinkMirrorView())); connect(_rearMirrorTools, SIGNAL(resetView()), SLOT(resetSensors())); + + // set up our audio reflector + _audioReflector.setMyAvatar(getAvatar()); + _audioReflector.setVoxels(_voxels.getTree()); + _audioReflector.setAudio(getAudio()); + connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection); + connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection); + + // save settings when avatar changes connect(_myAvatar, &MyAvatar::transformChanged, this, &Application::bumpSettings); } @@ -2446,6 +2455,9 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) { // disable specular lighting for ground and voxels glMaterialfv(GL_FRONT, GL_SPECULAR, NO_SPECULAR_COLOR); + + // draw the audio reflector overlay + _audioReflector.render(); // Draw voxels if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) { @@ -3385,6 +3397,7 @@ void Application::loadScript(const QString& scriptName) { scriptEngine->registerGlobalObject("Menu", MenuScriptingInterface::getInstance()); scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance()); scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance()); + scriptEngine->registerGlobalObject("AudioReflector", &_audioReflector); QThread* workerThread = new QThread(this); diff --git a/interface/src/Application.h b/interface/src/Application.h index 6a14788caa..2656ff47ce 100644 --- a/interface/src/Application.h +++ b/interface/src/Application.h @@ -38,6 +38,7 @@ #include #include "Audio.h" +#include "AudioReflector.h" #include "BuckyBalls.h" #include "Camera.h" #include "DatagramProcessor.h" @@ -162,6 +163,7 @@ public: bool isThrottleRendering() const { return _glWidget->isThrottleRendering(); } MyAvatar* getAvatar() { return _myAvatar; } Audio* getAudio() { return &_audio; } + const AudioReflector* getAudioReflector() const { return &_audioReflector; } Camera* getCamera() { return &_myCamera; } ViewFrustum* getViewFrustum() { return &_viewFrustum; } ViewFrustum* getShadowViewFrustum() { return &_shadowViewFrustum; } @@ -513,7 +515,7 @@ private: TouchEvent _lastTouchEvent; Overlays _overlays; - + AudioReflector _audioReflector; RunningScriptsWidget* _runningScriptsWidget; QHash _scriptEnginesHash; }; diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 5dcd54050c..674eaa0d70 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include "Application.h" #include "Audio.h" @@ -87,7 +88,11 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) : _collisionSoundDuration(0.0f), _proceduralEffectSample(0), _numFramesDisplayStarve(0), - _muted(false) + _muted(false), + _processSpatialAudio(false), + _spatialAudioStart(0), + _spatialAudioFinish(0), + _spatialAudioRingBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL, true) // random access mode { // clear the array of locally injected samples memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL); @@ -398,7 +403,7 @@ void Audio::handleAudioInput() { unsigned int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio; QByteArray inputByteArray = _inputDevice->readAll(); - + if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) { // if this person wants local loopback add that to the locally injected audio @@ -406,7 +411,7 @@ void Audio::handleAudioInput() { // we didn't have the loopback output device going so set that up now _loopbackOutputDevice = _loopbackAudioOutput->start(); } - + if (_inputFormat == _outputFormat) { if (_loopbackOutputDevice) { _loopbackOutputDevice->write(inputByteArray); @@ -559,6 +564,13 @@ void Audio::handleAudioInput() { _lastInputLoudness = 0; } + // at this point we have clean monoAudioSamples, which match our target output... + // this is what we should send to our interested listeners + if (_processSpatialAudio && !_muted && _audioOutput) { + QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t)); + emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat); + } + if (_proceduralAudioOutput) { processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL); } @@ -622,7 +634,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { _totalPacketsReceived++; double timeDiff = diffclock(&_lastReceiveTime, ¤tReceiveTime); - + // Discard first few received packets for computing jitter (often they pile up on start) if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) { _stdev.addValue(timeDiff); @@ -650,6 +662,69 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { _lastReceiveTime = currentReceiveTime; } +// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo +// data we know that we will have 2x samples for each stereo time sample at the format's sample rate +void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) { + // Calculate the number of remaining samples available. The source spatial audio buffer will get + // clipped if there are insufficient samples available in the accumulation buffer. + unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable(); + + // Locate where in the accumulation buffer the new samples need to go + if (sampleTime >= _spatialAudioFinish) { + if (_spatialAudioStart == _spatialAudioFinish) { + // Nothing in the spatial audio ring buffer yet, Just do a straight copy, clipping if necessary + unsigned int sampleCount = (remaining < numSamples) ? remaining : numSamples; + if (sampleCount) { + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCount); + } + _spatialAudioFinish = _spatialAudioStart + sampleCount / _desiredOutputFormat.channelCount(); + } else { + // Spatial audio ring buffer already has data, but there is no overlap with the new sample. + // Compute the appropriate time delay and pad with silence until the new start time. + unsigned int delay = sampleTime - _spatialAudioFinish; + unsigned int delayCount = delay * _desiredOutputFormat.channelCount(); + unsigned int silentCount = (remaining < delayCount) ? remaining : delayCount; + if (silentCount) { + _spatialAudioRingBuffer.addSilentFrame(silentCount); + } + + // Recalculate the number of remaining samples + remaining -= silentCount; + unsigned int sampleCount = (remaining < numSamples) ? remaining : numSamples; + + // Copy the new spatial audio to the accumulation ring buffer + if (sampleCount) { + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCount); + } + _spatialAudioFinish += (sampleCount + silentCount) / _desiredOutputFormat.channelCount(); + } + } else { + // There is overlap between the spatial audio buffer and the new sample, mix the overlap + // Calculate the offset from the buffer's current read position, which should be located at _spatialAudioStart + unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount(); + unsigned int mixedSamplesCount = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount(); + mixedSamplesCount = (mixedSamplesCount < numSamples) ? mixedSamplesCount : numSamples; + + const int16_t* spatial = reinterpret_cast(spatialAudio.data()); + for (int i = 0; i < mixedSamplesCount; i++) { + int existingSample = _spatialAudioRingBuffer[i + offset]; + int newSample = spatial[i]; + int sumOfSamples = existingSample + newSample; + _spatialAudioRingBuffer[i + offset] = static_cast(glm::clamp(sumOfSamples, + std::numeric_limits::min(), std::numeric_limits::max())); + } + + // Copy the remaining unoverlapped spatial audio to the spatial audio buffer, if any + unsigned int nonMixedSampleCount = numSamples - mixedSamplesCount; + nonMixedSampleCount = (remaining < nonMixedSampleCount) ? remaining : nonMixedSampleCount; + if (nonMixedSampleCount) { + _spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data() + mixedSamplesCount, nonMixedSampleCount); + // Extend the finish time by the amount of unoverlapped samples + _spatialAudioFinish += nonMixedSampleCount / _desiredOutputFormat.channelCount(); + } + } +} + bool Audio::mousePressEvent(int x, int y) { if (_iconBounds.contains(x, y)) { toggleMute(); @@ -669,7 +744,7 @@ void Audio::toggleAudioNoiseReduction() { void Audio::processReceivedAudio(const QByteArray& audioByteArray) { _ringBuffer.parseData(audioByteArray); - + float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate()) * (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount()); @@ -700,13 +775,32 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { //qDebug() << "pushing " << numNetworkOutputSamples; _ringBuffer.setIsStarved(false); - // copy the samples we'll resample from the ring buffer - this also - // pushes the read pointer of the ring buffer forwards - int16_t* ringBufferSamples= new int16_t[numNetworkOutputSamples]; - _ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); - - // add the next numNetworkOutputSamples from each QByteArray - // in our _localInjectionByteArrays QVector to the localInjectedSamples + int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; + if (_processSpatialAudio) { + unsigned int sampleTime = _spatialAudioStart; + QByteArray buffer; + buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); + + _ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); + // Accumulate direct transmission of audio from sender to receiver + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { + addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); + } + + // Send audio off for spatial processing + emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat); + + // copy the samples we'll resample from the spatial audio ring buffer - this also + // pushes the read pointer of the spatial audio ring buffer forwards + _spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + + // Advance the start point for the next packet of audio to arrive + _spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount(); + } else { + // copy the samples we'll resample from the ring buffer - this also + // pushes the read pointer of the ring buffer forwards + _ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); + } // copy the packet from the RB to the output linearResampling(ringBufferSamples, @@ -756,6 +850,15 @@ void Audio::toggleToneInjection() { _toneInjectionEnabled = !_toneInjectionEnabled; } +void Audio::toggleAudioSpatialProcessing() { + _processSpatialAudio = !_processSpatialAudio; + if (_processSpatialAudio) { + _spatialAudioStart = 0; + _spatialAudioFinish = 0; + _spatialAudioRingBuffer.reset(); + } +} + // Take a pointer to the acquired microphone input samples and add procedural sounds void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) { float sample; @@ -996,6 +1099,12 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo) _proceduralAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); gettimeofday(&_lastReceiveTime, NULL); + + // setup spatial audio ringbuffer + int numFrameSamples = _outputFormat.sampleRate() * _desiredOutputFormat.channelCount(); + _spatialAudioRingBuffer.resizeForFrameSize(numFrameSamples); + _spatialAudioStart = _spatialAudioFinish = 0; + supportedFormat = true; } } diff --git a/interface/src/Audio.h b/interface/src/Audio.h index b78bcc661e..3b19d98146 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -54,8 +54,6 @@ public: void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; } int getJitterBufferSamples() { return _jitterBufferSamples; } - void lowPassFilter(int16_t* inputBuffer); - virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen); virtual void startDrumSound(float volume, float frequency, float duration, float decay); @@ -73,15 +71,19 @@ public: int getNetworkSampleRate() { return SAMPLE_RATE; } int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; } + bool getProcessSpatialAudio() const { return _processSpatialAudio; } + public slots: void start(); void stop(); void addReceivedAudioToBuffer(const QByteArray& audioByteArray); + void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples); void handleAudioInput(); void reset(); void toggleMute(); void toggleAudioNoiseReduction(); void toggleToneInjection(); + void toggleAudioSpatialProcessing(); virtual void handleAudioByteArray(const QByteArray& audioByteArray); @@ -97,6 +99,8 @@ public slots: signals: bool muteToggled(); + void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); private: @@ -162,9 +166,15 @@ private: GLuint _boxTextureId; QRect _iconBounds; - // Audio callback in class context. + /// Audio callback in class context. inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight); + + bool _processSpatialAudio; /// Process received audio by spatial audio hooks + unsigned int _spatialAudioStart; /// Start of spatial audio interval (in sample rate time base) + unsigned int _spatialAudioFinish; /// End of spatial audio interval (in sample rate time base) + AudioRingBuffer _spatialAudioRingBuffer; /// Spatially processed audio + // Process procedural audio by // 1. Echo to the local procedural output device // 2. Mix with the audio input diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp new file mode 100644 index 0000000000..9e4b97bf46 --- /dev/null +++ b/interface/src/AudioReflector.cpp @@ -0,0 +1,762 @@ +// +// AudioReflector.cpp +// interface +// +// Created by Brad Hefta-Gaub on 4/2/2014 +// Copyright (c) 2014 High Fidelity, Inc. All rights reserved. +// + +#include + +#include "AudioReflector.h" +#include "Menu.h" + +const float DEFAULT_PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections +const float DEFAULT_MS_DELAY_PER_METER = 3.0f; +const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f; +const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f; +const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is this long +const int DEFAULT_DIFFUSION_FANOUT = 5; +const int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10; +const float DEFAULT_LOCAL_ATTENUATION_FACTOR = 0.125; +const float DEFAULT_COMB_FILTER_WINDOW = 0.05f; //ms delay differential to avoid + +const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point + +const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed +const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused + +AudioReflector::AudioReflector(QObject* parent) : + QObject(parent), + _preDelay(DEFAULT_PRE_DELAY), + _soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), + _distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), + _localAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR), + _combFilterWindow(DEFAULT_COMB_FILTER_WINDOW), + _diffusionFanout(DEFAULT_DIFFUSION_FANOUT), + _absorptionRatio(DEFAULT_ABSORPTION_RATIO), + _diffusionRatio(DEFAULT_DIFFUSION_RATIO), + _withDiffusion(false), + _lastPreDelay(DEFAULT_PRE_DELAY), + _lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), + _lastDistanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR), + _lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR), + _lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT), + _lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO), + _lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO) +{ + _reflections = 0; + _diffusionPathCount = 0; + _averageAttenuation = 0.0f; + _maxAttenuation = 0.0f; + _minAttenuation = 0.0f; + _averageDelay = 0; + _maxDelay = 0; + _minDelay = 0; +} + +bool AudioReflector::haveAttributesChanged() { + bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + + bool attributesChange = (_withDiffusion != withDiffusion + || _lastPreDelay != _preDelay + || _lastSoundMsPerMeter != _soundMsPerMeter + || _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor + || _lastDiffusionFanout != _diffusionFanout + || _lastAbsorptionRatio != _absorptionRatio + || _lastDiffusionRatio != _diffusionRatio); + + if (attributesChange) { + _withDiffusion = withDiffusion; + _lastPreDelay = _preDelay; + _lastSoundMsPerMeter = _soundMsPerMeter; + _lastDistanceAttenuationScalingFactor = _distanceAttenuationScalingFactor; + _lastDiffusionFanout = _diffusionFanout; + _lastAbsorptionRatio = _absorptionRatio; + _lastDiffusionRatio = _diffusionRatio; + } + + return attributesChange; +} + +void AudioReflector::render() { + + // if we're not set up yet, or we're not processing spatial audio, then exit early + if (!_myAvatar || !_audio->getProcessSpatialAudio()) { + return; + } + + // use this oportunity to calculate our reflections + calculateAllReflections(); + + // only render if we've been asked to do so + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)) { + drawRays(); + } +} + +// delay = 1ms per foot +// = 3ms per meter +float AudioReflector::getDelayFromDistance(float distance) { + float delay = (_soundMsPerMeter * distance); + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) { + delay += _preDelay; + } + return delay; +} + +// attenuation = from the Audio Mixer +float AudioReflector::getDistanceAttenuationCoefficient(float distance) { + const float DISTANCE_SCALE = 2.5f; + const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; + const float DISTANCE_LOG_BASE = 2.5f; + const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE); + + float distanceSquareToSource = distance * distance; + + // calculate the distance coefficient using the distance to this node + float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR, + DISTANCE_SCALE_LOG + + (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); + + distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor()); + + return distanceCoefficient; +} + +glm::vec3 AudioReflector::getFaceNormal(BoxFace face) { + bool wantSlightRandomness = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces); + glm::vec3 faceNormal; + const float MIN_RANDOM_LENGTH = 0.99f; + const float MAX_RANDOM_LENGTH = 1.0f; + const float NON_RANDOM_LENGTH = 1.0f; + float normalLength = wantSlightRandomness ? randFloatInRange(MIN_RANDOM_LENGTH, MAX_RANDOM_LENGTH) : NON_RANDOM_LENGTH; + float remainder = (1.0f - normalLength)/2.0f; + float remainderSignA = randomSign(); + float remainderSignB = randomSign(); + + if (face == MIN_X_FACE) { + faceNormal = glm::vec3(-normalLength, remainder * remainderSignA, remainder * remainderSignB); + } else if (face == MAX_X_FACE) { + faceNormal = glm::vec3(normalLength, remainder * remainderSignA, remainder * remainderSignB); + } else if (face == MIN_Y_FACE) { + faceNormal = glm::vec3(remainder * remainderSignA, -normalLength, remainder * remainderSignB); + } else if (face == MAX_Y_FACE) { + faceNormal = glm::vec3(remainder * remainderSignA, normalLength, remainder * remainderSignB); + } else if (face == MIN_Z_FACE) { + faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -normalLength); + } else if (face == MAX_Z_FACE) { + faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, normalLength); + } + return faceNormal; +} + +// set up our buffers for our attenuated and delayed samples +const int NUMBER_OF_CHANNELS = 2; + +void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, + const QByteArray& samples, unsigned int sampleTime, int sampleRate) { + + bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars); + bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource); + glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() : + _myAvatar->getHead()->getPosition(); + glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() : + _myAvatar->getHead()->getPosition(); + + int totalNumberOfSamples = samples.size() / sizeof(int16_t); + int totalNumberOfStereoSamples = samples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS); + + const int16_t* originalSamplesData = (const int16_t*)samples.constData(); + QByteArray attenuatedLeftSamples; + QByteArray attenuatedRightSamples; + attenuatedLeftSamples.resize(samples.size()); + attenuatedRightSamples.resize(samples.size()); + + int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data(); + int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data(); + + // calculate the distance to the ears + float rightEarDistance = glm::distance(audiblePoint.location, rightEarPosition); + float leftEarDistance = glm::distance(audiblePoint.location, leftEarPosition); + + float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay; + float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay; + float averageEarDelayMsecs = (leftEarDelayMsecs + rightEarDelayMsecs) / 2.0f; + + bool safeToInject = true; // assume the best + + // check to see if this new injection point would be within the comb filter + // suppression window for any of the existing known delays + QMap& knownDelays = (source == INBOUND_AUDIO) ? _inboundAudioDelays : _localAudioDelays; + QMap::const_iterator lowerBound = knownDelays.lowerBound(averageEarDelayMsecs - _combFilterWindow); + if (lowerBound != knownDelays.end()) { + float closestFound = lowerBound.value(); + float deltaToClosest = (averageEarDelayMsecs - closestFound); + if (deltaToClosest > -_combFilterWindow && deltaToClosest < _combFilterWindow) { + safeToInject = false; + } + } + + // keep track of any of our suppressed echoes so we can report them in our statistics + if (!safeToInject) { + QVector& suppressedEchoes = (source == INBOUND_AUDIO) ? _inboundEchoesSuppressed : _localEchoesSuppressed; + suppressedEchoes << averageEarDelayMsecs; + } else { + knownDelays[averageEarDelayMsecs] = averageEarDelayMsecs; + + _totalDelay += rightEarDelayMsecs + leftEarDelayMsecs; + _delayCount += 2; + _maxDelay = std::max(_maxDelay,rightEarDelayMsecs); + _maxDelay = std::max(_maxDelay,leftEarDelayMsecs); + _minDelay = std::min(_minDelay,rightEarDelayMsecs); + _minDelay = std::min(_minDelay,leftEarDelayMsecs); + + int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND; + + float rightEarAttenuation = audiblePoint.attenuation * + getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance); + + float leftEarAttenuation = audiblePoint.attenuation * + getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance); + + _totalAttenuation += rightEarAttenuation + leftEarAttenuation; + _attenuationCount += 2; + _maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation); + _maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation); + _minAttenuation = std::min(_minAttenuation,rightEarAttenuation); + _minAttenuation = std::min(_minAttenuation,leftEarAttenuation); + + // run through the samples, and attenuate them + for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) { + int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS]; + int16_t rightSample = leftSample; + if (wantStereo) { + rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; + } + + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; + + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; + } + + // now inject the attenuated array with the appropriate delay + unsigned int sampleTimeLeft = sampleTime + leftEarDelay; + unsigned int sampleTimeRight = sampleTime + rightEarDelay; + + _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); + _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); + } +} + +void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) { + const int NUM_CHANNELS_INPUT = 1; + const int NUM_CHANNELS_OUTPUT = 2; + const int EXPECTED_SAMPLE_RATE = 24000; + if (format.channelCount() == NUM_CHANNELS_INPUT && format.sampleRate() == EXPECTED_SAMPLE_RATE) { + QAudioFormat outputFormat = format; + outputFormat.setChannelCount(NUM_CHANNELS_OUTPUT); + QByteArray stereoInputData(samples.size() * NUM_CHANNELS_OUTPUT, 0); + int numberOfSamples = (samples.size() / sizeof(int16_t)); + int16_t* monoSamples = (int16_t*)samples.data(); + int16_t* stereoSamples = (int16_t*)stereoInputData.data(); + + for (int i = 0; i < numberOfSamples; i++) { + stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor; + stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor; + } + _localAudioDelays.clear(); + _localEchoesSuppressed.clear(); + echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat); + } + } +} + +void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + _inboundAudioDelays.clear(); + _inboundEchoesSuppressed.clear(); + echoAudio(INBOUND_AUDIO, sampleTime, samples, format); +} + +void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + _maxDelay = 0; + _maxAttenuation = 0.0f; + _minDelay = std::numeric_limits::max(); + _minAttenuation = std::numeric_limits::max(); + _totalDelay = 0.0f; + _delayCount = 0; + _totalAttenuation = 0.0f; + _attenuationCount = 0; + + QMutexLocker locker(&_mutex); + + // depending on if we're processing local or external audio, pick the correct points vector + QVector& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints; + + foreach(const AudiblePoint& audiblePoint, audiblePoints) { + injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate()); + } + + _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; + _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; + + if (_reflections == 0) { + _minDelay = 0.0f; + _minAttenuation = 0.0f; + } +} + +void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) { + glDisable(GL_LIGHTING); + glLineWidth(2.0); + + // Draw the vector itself + glBegin(GL_LINES); + glColor3f(color.x,color.y,color.z); + glVertex3f(start.x, start.y, start.z); + glVertex3f(end.x, end.y, end.z); + glEnd(); + + glEnable(GL_LIGHTING); +} + + + +AudioPath::AudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& direction, + float attenuation, float delay, float distance,bool isDiffusion, int bounceCount) : + + source(source), + isDiffusion(isDiffusion), + startPoint(origin), + startDirection(direction), + startDelay(delay), + startAttenuation(attenuation), + + lastPoint(origin), + lastDirection(direction), + lastDistance(distance), + lastDelay(delay), + lastAttenuation(attenuation), + bounceCount(bounceCount), + + finalized(false), + reflections() +{ +} + +void AudioReflector::addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, + float initialAttenuation, float initialDelay, float initialDistance, bool isDiffusion) { + + AudioPath* path = new AudioPath(source, origin, initialDirection, initialAttenuation, initialDelay, + initialDistance, isDiffusion, 0); + + QVector& audioPaths = source == INBOUND_AUDIO ? _inboundAudioPaths : _localAudioPaths; + + audioPaths.push_back(path); +} + +void AudioReflector::calculateAllReflections() { + // only recalculate when we've moved, or if the attributes have changed + // TODO: what about case where new voxels are added in front of us??? + bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented); + glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation(); + glm::vec3 origin = _myAvatar->getHead()->getPosition(); + glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition(); + + bool shouldRecalc = _reflections == 0 + || !isSimilarPosition(origin, _origin) + || !isSimilarOrientation(orientation, _orientation) + || !isSimilarPosition(listenerPosition, _listenerPosition) + || haveAttributesChanged(); + + if (shouldRecalc) { + QMutexLocker locker(&_mutex); + quint64 start = usecTimestampNow(); + _origin = origin; + _orientation = orientation; + _listenerPosition = listenerPosition; + analyzePaths(); // actually does the work + quint64 end = usecTimestampNow(); + const bool wantDebugging = false; + if (wantDebugging) { + qDebug() << "newCalculateAllReflections() elapsed=" << (end - start); + } + } +} + +void AudioReflector::drawRays() { + const glm::vec3 RED(1,0,0); + const glm::vec3 GREEN(0,1,0); + const glm::vec3 BLUE(0,0,1); + const glm::vec3 CYAN(0,1,1); + + int diffusionNumber = 0; + + QMutexLocker locker(&_mutex); + + // draw the paths for inbound audio + foreach(AudioPath* const& path, _inboundAudioPaths) { + // if this is an original reflection, draw it in RED + if (path->isDiffusion) { + diffusionNumber++; + drawPath(path, GREEN); + } else { + drawPath(path, RED); + } + } + + if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) { + // draw the paths for local audio + foreach(AudioPath* const& path, _localAudioPaths) { + // if this is an original reflection, draw it in RED + if (path->isDiffusion) { + diffusionNumber++; + drawPath(path, CYAN); + } else { + drawPath(path, BLUE); + } + } + } +} + +void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) { + glm::vec3 start = path->startPoint; + glm::vec3 color = originalColor; + const float COLOR_ADJUST_PER_BOUNCE = 0.75f; + + foreach (glm::vec3 end, path->reflections) { + drawVector(start, end, color); + start = end; + color = color * COLOR_ADJUST_PER_BOUNCE; + } +} + +void AudioReflector::clearPaths() { + // clear our inbound audio paths + foreach(AudioPath* const& path, _inboundAudioPaths) { + delete path; + } + _inboundAudioPaths.clear(); + _inboundAudiblePoints.clear(); // clear our inbound audible points + + // clear our local audio paths + foreach(AudioPath* const& path, _localAudioPaths) { + delete path; + } + _localAudioPaths.clear(); + _localAudiblePoints.clear(); // clear our local audible points +} + +// Here's how this works: we have an array of AudioPaths, we loop on all of our currently calculating audio +// paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it +// is considered finalized. +// If the ray hits a surface, then, based on the characteristics of that surface, it will calculate the new +// attenuation, path length, and delay for the primary path. For surfaces that have diffusion, it will also create +// fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation +// of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop. +void AudioReflector::analyzePaths() { + clearPaths(); + + // add our initial paths + glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT); + glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP); + glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT); + glm::vec3 left = -right; + glm::vec3 down = -up; + glm::vec3 back = -front; + glm::vec3 frontRightUp = glm::normalize(front + right + up); + glm::vec3 frontLeftUp = glm::normalize(front + left + up); + glm::vec3 backRightUp = glm::normalize(back + right + up); + glm::vec3 backLeftUp = glm::normalize(back + left + up); + glm::vec3 frontRightDown = glm::normalize(front + right + down); + glm::vec3 frontLeftDown = glm::normalize(front + left + down); + glm::vec3 backRightDown = glm::normalize(back + right + down); + glm::vec3 backLeftDown = glm::normalize(back + left + down); + + float initialAttenuation = 1.0f; + + float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? _preDelay : 0.0f; + + // NOTE: we're still calculating our initial paths based on the listeners position. But the analysis code has been + // updated to support individual sound sources (which is how we support diffusion), we can use this new paradigm to + // add support for individual sound sources, and more directional sound sources + + addAudioPath(INBOUND_AUDIO, _origin, front, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, right, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, up, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, down, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, back, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, left, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, backRightUp, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, backLeftUp, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, backRightDown, initialAttenuation, preDelay); + addAudioPath(INBOUND_AUDIO, _origin, backLeftDown, initialAttenuation, preDelay); + + // the original paths for the local audio are directional to the front of the origin + addAudioPath(LOCAL_AUDIO, _origin, front, initialAttenuation, preDelay); + addAudioPath(LOCAL_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay); + addAudioPath(LOCAL_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay); + addAudioPath(LOCAL_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay); + addAudioPath(LOCAL_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay); + + // loop through all our audio paths and keep analyzing them until they complete + int steps = 0; + int acitvePaths = _inboundAudioPaths.size() + _localAudioPaths.size(); // when we start, all paths are active + while(acitvePaths > 0) { + acitvePaths = analyzePathsSingleStep(); + steps++; + } + _reflections = _inboundAudiblePoints.size() + _localAudiblePoints.size(); + _diffusionPathCount = countDiffusionPaths(); +} + +int AudioReflector::countDiffusionPaths() { + int diffusionCount = 0; + + foreach(AudioPath* const& path, _inboundAudioPaths) { + if (path->isDiffusion) { + diffusionCount++; + } + } + foreach(AudioPath* const& path, _localAudioPaths) { + if (path->isDiffusion) { + diffusionCount++; + } + } + return diffusionCount; +} + +int AudioReflector::analyzePathsSingleStep() { + // iterate all the active sound paths, calculate one step per active path + int activePaths = 0; + + QVector* pathsLists[] = { &_inboundAudioPaths, &_localAudioPaths }; + + for(int i = 0; i < sizeof(pathsLists) / sizeof(pathsLists[0]); i ++) { + + QVector& pathList = *pathsLists[i]; + + foreach(AudioPath* const& path, pathList) { + + glm::vec3 start = path->lastPoint; + glm::vec3 direction = path->lastDirection; + OctreeElement* elementHit; // output from findRayIntersection + float distance; // output from findRayIntersection + BoxFace face; // output from findRayIntersection + + if (!path->finalized) { + activePaths++; + + if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) { + path->finalized = true; + } else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) { + // TODO: we need to decide how we want to handle locking on the ray intersection, if we force lock, + // we get an accurate picture, but it could prevent rendering of the voxels. If we trylock (default), + // we might not get ray intersections where they may exist, but we can't really detect that case... + // add last parameter of Octree::Lock to force locking + handlePathPoint(path, distance, elementHit, face); + + } else { + // If we didn't intersect, but this was a diffusion ray, then we will go ahead and cast a short ray out + // from our last known point, in the last known direction, and leave that sound source hanging there + if (path->isDiffusion) { + const float MINIMUM_RANDOM_DISTANCE = 0.25f; + const float MAXIMUM_RANDOM_DISTANCE = 0.5f; + float distance = randFloatInRange(MINIMUM_RANDOM_DISTANCE, MAXIMUM_RANDOM_DISTANCE); + handlePathPoint(path, distance, NULL, UNKNOWN_FACE); + } else { + path->finalized = true; // if it doesn't intersect, then it is finished + } + } + } + } + } + return activePaths; +} + +void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face) { + glm::vec3 start = path->lastPoint; + glm::vec3 direction = path->lastDirection; + glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT)); + + float currentReflectiveAttenuation = path->lastAttenuation; // only the reflective components + float currentDelay = path->lastDelay; // start with our delay so far + float pathDistance = path->lastDistance; + + pathDistance += glm::distance(start, end); + + float toListenerDistance = glm::distance(end, _listenerPosition); + + // adjust our current delay by just the delay from the most recent ray + currentDelay += getDelayFromDistance(distance); + + // now we know the current attenuation for the "perfect" reflection case, but we now incorporate + // our surface materials to determine how much of this ray is absorbed, reflected, and diffused + SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit); + + float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio; + float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio; + + bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + int fanout = wantDiffusions ? _diffusionFanout : 0; + + float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / (float)fanout; + + // total delay includes the bounce back to listener + float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance); + float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance + pathDistance); + + // if our resulting partial diffusion attenuation, is still above our minimum attenuation + // then we add new paths for each diffusion point + if ((partialDiffusionAttenuation * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT + && totalDelay < MAXIMUM_DELAY_MS) { + + // diffusions fan out from random places on the semisphere of the collision point + for(int i = 0; i < fanout; i++) { + glm::vec3 diffusion; + + // We're creating a random normal here. But we want it to be relatively dramatic compared to how we handle + // our slightly random surface normals. + const float MINIMUM_RANDOM_LENGTH = 0.5f; + const float MAXIMUM_RANDOM_LENGTH = 1.0f; + float randomness = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH); + float remainder = (1.0f - randomness)/2.0f; + float remainderSignA = randomSign(); + float remainderSignB = randomSign(); + + if (face == MIN_X_FACE) { + diffusion = glm::vec3(-randomness, remainder * remainderSignA, remainder * remainderSignB); + } else if (face == MAX_X_FACE) { + diffusion = glm::vec3(randomness, remainder * remainderSignA, remainder * remainderSignB); + } else if (face == MIN_Y_FACE) { + diffusion = glm::vec3(remainder * remainderSignA, -randomness, remainder * remainderSignB); + } else if (face == MAX_Y_FACE) { + diffusion = glm::vec3(remainder * remainderSignA, randomness, remainder * remainderSignB); + } else if (face == MIN_Z_FACE) { + diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -randomness); + } else if (face == MAX_Z_FACE) { + diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, randomness); + } else if (face == UNKNOWN_FACE) { + float randomnessX = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH); + float randomnessY = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH); + float randomnessZ = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH); + diffusion = glm::vec3(direction.x * randomnessX, direction.y * randomnessY, direction.z * randomnessZ); + } + + diffusion = glm::normalize(diffusion); + + // add new audio path for these diffusions, the new path's source is the same as the original source + addAudioPath(path->source, end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance, true); + } + } else { + const bool wantDebugging = false; + if (wantDebugging) { + if ((partialDiffusionAttenuation * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) { + qDebug() << "too quiet to diffuse"; + qDebug() << " partialDiffusionAttenuation=" << partialDiffusionAttenuation; + qDebug() << " toListenerAttenuation=" << toListenerAttenuation; + qDebug() << " result=" << (partialDiffusionAttenuation * toListenerAttenuation); + qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT; + } + if (totalDelay > MAXIMUM_DELAY_MS) { + qDebug() << "too delayed to diffuse"; + qDebug() << " totalDelay=" << totalDelay; + qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS; + } + } + } + + // if our reflective attenuation is above our minimum, then add our reflection point and + // allow our path to continue + if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT + && totalDelay < MAXIMUM_DELAY_MS) { + + // add this location, as the reflective attenuation as well as the total diffusion attenuation + // NOTE: we add the delay to the audible point, not back to the listener. The additional delay + // and attenuation to the listener is recalculated at the point where we actually inject the + // audio so that it can be adjusted to ear position + AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance}; + + QVector& audiblePoints = path->source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints; + + audiblePoints.push_back(point); + + // add this location to the path points, so we can visualize it + path->reflections.push_back(end); + + // now, if our reflective attenuation is over our minimum then keep going... + if (reflectiveAttenuation * toListenerAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) { + glm::vec3 faceNormal = getFaceNormal(face); + path->lastDirection = glm::normalize(glm::reflect(direction,faceNormal)); + path->lastPoint = end; + path->lastAttenuation = reflectiveAttenuation; + path->lastDelay = currentDelay; + path->lastDistance = pathDistance; + path->bounceCount++; + } else { + path->finalized = true; // if we're too quiet, then we're done + } + } else { + const bool wantDebugging = false; + if (wantDebugging) { + if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) { + qDebug() << "too quiet to add audible point"; + qDebug() << " reflectiveAttenuation + totalDiffusionAttenuation=" << (reflectiveAttenuation + totalDiffusionAttenuation); + qDebug() << " toListenerAttenuation=" << toListenerAttenuation; + qDebug() << " result=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation); + qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT; + } + if (totalDelay > MAXIMUM_DELAY_MS) { + qDebug() << "too delayed to add audible point"; + qDebug() << " totalDelay=" << totalDelay; + qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS; + } + } + path->finalized = true; // if we're too quiet, then we're done + } +} + +// TODO: eventually we will add support for different surface characteristics based on the element +// that is hit, which is why we pass in the elementHit to this helper function. But for now, all +// surfaces have the same characteristics +SurfaceCharacteristics AudioReflector::getSurfaceCharacteristics(OctreeElement* elementHit) { + SurfaceCharacteristics result = { getReflectiveRatio(), _absorptionRatio, _diffusionRatio }; + return result; +} + +void AudioReflector::setReflectiveRatio(float ratio) { + float safeRatio = std::max(0.0f, std::min(ratio, 1.0f)); + float currentReflectiveRatio = (1.0f - (_absorptionRatio + _diffusionRatio)); + float halfDifference = (safeRatio - currentReflectiveRatio) / 2.0f; + + // evenly distribute the difference between the two other ratios + _absorptionRatio -= halfDifference; + _diffusionRatio -= halfDifference; +} + +void AudioReflector::setAbsorptionRatio(float ratio) { + float safeRatio = std::max(0.0f, std::min(ratio, 1.0f)); + _absorptionRatio = safeRatio; + const float MAX_COMBINED_RATIO = 1.0f; + if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) { + _diffusionRatio = MAX_COMBINED_RATIO - _absorptionRatio; + } +} + +void AudioReflector::setDiffusionRatio(float ratio) { + float safeRatio = std::max(0.0f, std::min(ratio, 1.0f)); + _diffusionRatio = safeRatio; + const float MAX_COMBINED_RATIO = 1.0f; + if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) { + _absorptionRatio = MAX_COMBINED_RATIO - _diffusionRatio; + } +} + diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h new file mode 100644 index 0000000000..2408b70a96 --- /dev/null +++ b/interface/src/AudioReflector.h @@ -0,0 +1,222 @@ +// +// AudioReflector.h +// interface +// +// Created by Brad Hefta-Gaub on 4/2/2014 +// Copyright (c) 2014 High Fidelity, Inc. All rights reserved. +// + +#ifndef interface_AudioReflector_h +#define interface_AudioReflector_h + +#include + +#include + +#include "Audio.h" +#include "avatar/MyAvatar.h" + +enum AudioSource { + LOCAL_AUDIO, + INBOUND_AUDIO +}; + +class AudioPath { +public: + AudioPath(AudioSource source = INBOUND_AUDIO, const glm::vec3& origin = glm::vec3(0.0f), + const glm::vec3& direction = glm::vec3(0.0f), float attenuation = 1.0f, + float delay = 0.0f, float distance = 0.0f, bool isDiffusion = false, int bounceCount = 0); + + AudioSource source; + bool isDiffusion; + glm::vec3 startPoint; + glm::vec3 startDirection; + float startDelay; + float startAttenuation; + + glm::vec3 lastPoint; + glm::vec3 lastDirection; + float lastDistance; + float lastDelay; + float lastAttenuation; + unsigned int bounceCount; + + bool finalized; + QVector reflections; +}; + +class AudiblePoint { +public: + glm::vec3 location; /// location of the audible point + float delay; /// includes total delay including pre delay to the point of the audible location, not to the listener's ears + float attenuation; /// only the reflective & diffusive portion of attenuation, doesn't include distance attenuation + float distance; /// includes total distance to the point of the audible location, not to the listener's ears +}; + +class SurfaceCharacteristics { +public: + float reflectiveRatio; + float absorptionRatio; + float diffusionRatio; +}; + +class AudioReflector : public QObject { + Q_OBJECT +public: + AudioReflector(QObject* parent = NULL); + + // setup functions to configure the resources used by the AudioReflector + void setVoxels(VoxelTree* voxels) { _voxels = voxels; } + void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; } + void setAudio(Audio* audio) { _audio = audio; } + + void render(); /// must be called in the application render loop + + void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + +public slots: + // statistics + int getReflections() const { return _reflections; } + float getAverageDelayMsecs() const { return _averageDelay; } + float getAverageAttenuation() const { return _averageAttenuation; } + float getMaxDelayMsecs() const { return _maxDelay; } + float getMaxAttenuation() const { return _maxAttenuation; } + float getMinDelayMsecs() const { return _minDelay; } + float getMinAttenuation() const { return _minAttenuation; } + float getDelayFromDistance(float distance); + int getDiffusionPathCount() const { return _diffusionPathCount; } + int getEchoesInjected() const { return _inboundAudioDelays.size() + _localAudioDelays.size(); } + int getEchoesSuppressed() const { return _inboundEchoesSuppressed.size() + _localEchoesSuppressed.size(); } + + /// ms of delay added to all echos + float getPreDelay() const { return _preDelay; } + void setPreDelay(float preDelay) { _preDelay = preDelay; } + + /// ms per meter that sound travels, larger means slower, which sounds bigger + float getSoundMsPerMeter() const { return _soundMsPerMeter; } + void setSoundMsPerMeter(float soundMsPerMeter) { _soundMsPerMeter = soundMsPerMeter; } + + /// scales attenuation to be louder or softer than the default distance attenuation + float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; } + void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; } + + /// scales attenuation of local audio to be louder or softer than the default attenuation + float getLocalAudioAttenuationFactor() const { return _localAudioAttenuationFactor; } + void setLocalAudioAttenuationFactor(float factor) { _localAudioAttenuationFactor = factor; } + + /// ms window in which we will suppress echoes to reduce comb filter effects + float getCombFilterWindow() const { return _combFilterWindow; } + void setCombFilterWindow(float value) { _combFilterWindow = value; } + + /// number of points of diffusion from each reflection point, as fanout increases there are more chances for secondary + /// echoes, but each diffusion ray is quieter and therefore more likely to be below the sound floor + int getDiffusionFanout() const { return _diffusionFanout; } + void setDiffusionFanout(int fanout) { _diffusionFanout = fanout; } + + /// ratio 0.0 - 1.0 of amount of each ray that is absorbed upon hitting a surface + float getAbsorptionRatio() const { return _absorptionRatio; } + void setAbsorptionRatio(float ratio); + + // ratio 0.0 - 1.0 of amount of each ray that is diffused upon hitting a surface + float getDiffusionRatio() const { return _diffusionRatio; } + void setDiffusionRatio(float ratio); + + // remaining ratio 0.0 - 1.0 of amount of each ray that is cleanly reflected upon hitting a surface + float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); } + void setReflectiveRatio(float ratio); + +signals: + +private: + VoxelTree* _voxels; // used to access voxel scene + MyAvatar* _myAvatar; // access to listener + Audio* _audio; // access to audio API + + // Helpers for drawing + void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); + + // helper for generically calculating attenuation based on distance + float getDistanceAttenuationCoefficient(float distance); + + // statistics + int _reflections; + int _diffusionPathCount; + int _delayCount; + float _totalDelay; + float _averageDelay; + float _maxDelay; + float _minDelay; + int _attenuationCount; + float _totalAttenuation; + float _averageAttenuation; + float _maxAttenuation; + float _minAttenuation; + + glm::vec3 _listenerPosition; + glm::vec3 _origin; + glm::quat _orientation; + + QVector _inboundAudioPaths; /// audio paths we're processing for inbound audio + QVector _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths + QMap _inboundAudioDelays; /// delay times for currently injected audio points + QVector _inboundEchoesSuppressed; /// delay times for currently injected audio points + + QVector _localAudioPaths; /// audio paths we're processing for local audio + QVector _localAudiblePoints; /// the audible points that have been calculated from the local audio paths + QMap _localAudioDelays; /// delay times for currently injected audio points + QVector _localEchoesSuppressed; /// delay times for currently injected audio points + + // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, + // as well as diffusion sound sources + void addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation, + float initialDelay, float initialDistance = 0.0f, bool isDiffusion = false); + + // helper that handles audioPath analysis + int analyzePathsSingleStep(); + void handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face); + void clearPaths(); + void analyzePaths(); + void drawRays(); + void drawPath(AudioPath* path, const glm::vec3& originalColor); + void calculateAllReflections(); + int countDiffusionPaths(); + glm::vec3 getFaceNormal(BoxFace face); + + void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); + void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); + + // return the surface characteristics of the element we hit + SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit = NULL); + + + QMutex _mutex; + + float _preDelay; + float _soundMsPerMeter; + float _distanceAttenuationScalingFactor; + float _localAudioAttenuationFactor; + float _combFilterWindow; + + int _diffusionFanout; // number of points of diffusion from each reflection point + + // all elements have the same material for now... + float _absorptionRatio; + float _diffusionRatio; + float _reflectiveRatio; + + // remember the last known values at calculation + bool haveAttributesChanged(); + + bool _withDiffusion; + float _lastPreDelay; + float _lastSoundMsPerMeter; + float _lastDistanceAttenuationScalingFactor; + float _lastLocalAudioAttenuationFactor; + int _lastDiffusionFanout; + float _lastAbsorptionRatio; + float _lastDiffusionRatio; +}; + + +#endif // interface_AudioReflector_h diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 9aa1b9b9a3..85c3d5b0c3 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -349,8 +349,8 @@ Menu::Menu() : QMenu* renderDebugMenu = developerMenu->addMenu("Render Debugging Tools"); - addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings, Qt::CTRL | Qt::SHIFT | Qt::Key_P); - addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings, Qt::CTRL | Qt::SHIFT | Qt::Key_S); + addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings); + addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings); addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::CullSharedFaces, @@ -361,7 +361,7 @@ Menu::Menu() : addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::ShowCulledSharedFaces, - Qt::CTRL | Qt::SHIFT | Qt::Key_X, + 0, false, appInstance->getVoxels(), SLOT(showCulledSharedFaces())); @@ -385,6 +385,50 @@ Menu::Menu() : appInstance->getAudio(), SLOT(toggleToneInjection())); + QMenu* spatialAudioMenu = audioDebugMenu->addMenu("Spatial Audio"); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessing, + Qt::CTRL | Qt::SHIFT | Qt::Key_M, + false, + appInstance->getAudio(), + SLOT(toggleAudioSpatialProcessing())); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingIncludeOriginal, + Qt::CTRL | Qt::SHIFT | Qt::Key_O, + true); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSeparateEars, + Qt::CTRL | Qt::SHIFT | Qt::Key_E, + true); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingPreDelay, + Qt::CTRL | Qt::SHIFT | Qt::Key_D, + true); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingStereoSource, + Qt::CTRL | Qt::SHIFT | Qt::Key_S, + true); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingHeadOriented, + Qt::CTRL | Qt::SHIFT | Qt::Key_H, + true); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingWithDiffusions, + Qt::CTRL | Qt::SHIFT | Qt::Key_W, + true); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingRenderPaths, + Qt::CTRL | Qt::SHIFT | Qt::Key_R, + true); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces, + Qt::CTRL | Qt::SHIFT | Qt::Key_X, + true); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingProcessLocalAudio, + Qt::CTRL | Qt::SHIFT | Qt::Key_A, + true); + addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, Qt::CTRL | Qt::SHIFT | Qt::Key_V, this, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index 40ed8efdc7..a62f54b0c6 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -257,6 +257,18 @@ namespace MenuOption { const QString Atmosphere = "Atmosphere"; const QString AudioNoiseReduction = "Audio Noise Reduction"; const QString AudioToneInjection = "Inject Test Tone"; + + const QString AudioSpatialProcessing = "Audio Spatial Processing"; + const QString AudioSpatialProcessingHeadOriented = "Head Oriented"; + const QString AudioSpatialProcessingIncludeOriginal = "Includes Network Original"; + const QString AudioSpatialProcessingPreDelay = "Add Pre-Delay"; + const QString AudioSpatialProcessingProcessLocalAudio = "Process Local Audio"; + const QString AudioSpatialProcessingRenderPaths = "Render Paths"; + const QString AudioSpatialProcessingSeparateEars = "Separate Ears"; + const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces"; + const QString AudioSpatialProcessingStereoSource = "Stereo Source"; + const QString AudioSpatialProcessingWithDiffusions = "With Diffusions"; + const QString Avatars = "Avatars"; const QString Bandwidth = "Bandwidth Display"; const QString BandwidthDetails = "Bandwidth Details"; diff --git a/interface/src/avatar/Head.h b/interface/src/avatar/Head.h index 5f0c6519ef..8c58b73ebd 100644 --- a/interface/src/avatar/Head.h +++ b/interface/src/avatar/Head.h @@ -30,6 +30,8 @@ enum eyeContactTargets { MOUTH }; +const float EYE_EAR_GAP = 0.08f; + class Avatar; class ProgramObject; @@ -73,6 +75,11 @@ public: glm::quat getEyeRotation(const glm::vec3& eyePosition) const; + const glm::vec3& getRightEyePosition() const { return _rightEyePosition; } + const glm::vec3& getLeftEyePosition() const { return _leftEyePosition; } + glm::vec3 getRightEarPosition() const { return _rightEyePosition + (getRightDirection() * EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); } + glm::vec3 getLeftEarPosition() const { return _leftEyePosition + (getRightDirection() * -EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); } + FaceModel& getFaceModel() { return _faceModel; } const FaceModel& getFaceModel() const { return _faceModel; } diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index 0c4d8a35fd..64616cbdf8 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -293,6 +293,7 @@ void Stats::display( glm::vec3 avatarPos = myAvatar->getPosition(); lines = _expanded ? 5 : 3; + drawBackground(backgroundColor, horizontalOffset, 0, _geoStatsWidth, lines * STATS_PELS_PER_LINE + 10); horizontalOffset += 5; @@ -341,6 +342,10 @@ void Stats::display( VoxelSystem* voxels = Application::getInstance()->getVoxels(); lines = _expanded ? 12 : 3; + if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { + lines += 8; // spatial audio processing adds 1 spacing line and 7 extra lines of info + } + drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10); horizontalOffset += 5; @@ -497,5 +502,89 @@ void Stats::display( voxelStats << "LOD: You can see " << qPrintable(displayLODDetails.trimmed()); verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, (char*)voxelStats.str().c_str(), color); - } + } + + + if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { + verticalOffset += STATS_PELS_PER_LINE; // space one line... + + const AudioReflector* audioReflector = Application::getInstance()->getAudioReflector(); + + // add some reflection stats + char reflectionsStatus[128]; + + sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s, Normals: %s", + audioReflector->getReflections(), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal) + ? "included" : "silent"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars) + ? "two" : "one"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource) + ? "stereo" : "mono"), + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces) + ? "random" : "regular") + ); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? + audioReflector->getPreDelay() : 0.0f; + + sprintf(reflectionsStatus, "Delay: pre: %6.3f, average %6.3f, max %6.3f, min %6.3f, speed: %6.3f", + preDelay, + audioReflector->getAverageDelayMsecs(), + audioReflector->getMaxDelayMsecs(), + audioReflector->getMinDelayMsecs(), + audioReflector->getSoundMsPerMeter()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, Factor: %5.3f", + audioReflector->getAverageAttenuation(), + audioReflector->getMaxAttenuation(), + audioReflector->getMinAttenuation(), + audioReflector->getDistanceAttenuationScalingFactor()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + sprintf(reflectionsStatus, "Local Audio: %s Attenuation: %5.3f", + (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio) + ? "yes" : "no"), + audioReflector->getLocalAudioAttenuationFactor()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + bool diffusionEnabled = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + int fanout = diffusionEnabled ? audioReflector->getDiffusionFanout() : 0; + int diffusionPaths = diffusionEnabled ? audioReflector->getDiffusionPathCount() : 0; + sprintf(reflectionsStatus, "Diffusion: %s, Fanout: %d, Paths: %d", + (diffusionEnabled ? "yes" : "no"), fanout, diffusionPaths); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + const float AS_PERCENT = 100.0f; + float reflectiveRatio = audioReflector->getReflectiveRatio() * AS_PERCENT; + float diffusionRatio = audioReflector->getDiffusionRatio() * AS_PERCENT; + float absorptionRatio = audioReflector->getAbsorptionRatio() * AS_PERCENT; + sprintf(reflectionsStatus, "Ratios: Reflective: %5.3f, Diffusion: %5.3f, Absorption: %5.3f", + reflectiveRatio, diffusionRatio, absorptionRatio); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + sprintf(reflectionsStatus, "Comb Filter Window: %5.3f ms, Allowed: %d, Suppressed: %d", + audioReflector->getCombFilterWindow(), + audioReflector->getEchoesInjected(), + audioReflector->getEchoesSuppressed()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + } + } diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 603a8c89ac..9b50ed0bcb 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -18,15 +18,19 @@ #include "AudioRingBuffer.h" -AudioRingBuffer::AudioRingBuffer(int numFrameSamples) : +AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode) : NodeData(), _sampleCapacity(numFrameSamples * RING_BUFFER_LENGTH_FRAMES), _numFrameSamples(numFrameSamples), _isStarved(true), - _hasStarted(false) + _hasStarted(false), + _randomAccessMode(randomAccessMode) { if (numFrameSamples) { _buffer = new int16_t[_sampleCapacity]; + if (_randomAccessMode) { + memset(_buffer, 0, _sampleCapacity * sizeof(int16_t)); + } _nextOutput = _buffer; _endOfLastWrite = _buffer; } else { @@ -50,6 +54,9 @@ void AudioRingBuffer::resizeForFrameSize(qint64 numFrameSamples) { delete[] _buffer; _sampleCapacity = numFrameSamples * RING_BUFFER_LENGTH_FRAMES; _buffer = new int16_t[_sampleCapacity]; + if (_randomAccessMode) { + memset(_buffer, 0, _sampleCapacity * sizeof(int16_t)); + } _nextOutput = _buffer; _endOfLastWrite = _buffer; } @@ -68,18 +75,34 @@ qint64 AudioRingBuffer::readData(char *data, qint64 maxSize) { // only copy up to the number of samples we have available int numReadSamples = std::min((unsigned) (maxSize / sizeof(int16_t)), samplesAvailable()); + // If we're in random access mode, then we consider our number of available read samples slightly + // differently. Namely, if anything has been written, we say we have as many samples as they ask for + // otherwise we say we have nothing available + if (_randomAccessMode) { + numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : 0; + } + if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) { // we're going to need to do two reads to get this data, it wraps around the edge // read to the end of the buffer int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput; memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t)); + if (_randomAccessMode) { + memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it + } // read the rest from the beginning of the buffer memcpy(data + (numSamplesToEnd * sizeof(int16_t)), _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t)); + if (_randomAccessMode) { + memset(_buffer, 0, (numReadSamples - numSamplesToEnd) * sizeof(int16_t)); // clear it + } } else { // read the data memcpy(data, _nextOutput, numReadSamples * sizeof(int16_t)); + if (_randomAccessMode) { + memset(_nextOutput, 0, numReadSamples * sizeof(int16_t)); // clear it + } } // push the position of _nextOutput by the number of samples read @@ -128,6 +151,10 @@ int16_t& AudioRingBuffer::operator[](const int index) { return *shiftedPositionAccomodatingWrap(_nextOutput, index); } +const int16_t& AudioRingBuffer::operator[] (const int index) const { + return *shiftedPositionAccomodatingWrap(_nextOutput, index); +} + void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) { _nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples); } diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index ef4427d633..04cc67c8ac 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -39,7 +39,7 @@ const int MIN_SAMPLE_VALUE = std::numeric_limits::min(); class AudioRingBuffer : public NodeData { Q_OBJECT public: - AudioRingBuffer(int numFrameSamples); + AudioRingBuffer(int numFrameSamples, bool randomAccessMode = false); ~AudioRingBuffer(); void reset(); @@ -50,8 +50,8 @@ public: int parseData(const QByteArray& packet); // assume callers using this will never wrap around the end - const int16_t* getNextOutput() { return _nextOutput; } - const int16_t* getBuffer() { return _buffer; } + const int16_t* getNextOutput() const { return _nextOutput; } + const int16_t* getBuffer() const { return _buffer; } qint64 readSamples(int16_t* destination, qint64 maxSamples); qint64 writeSamples(const int16_t* source, qint64 maxSamples); @@ -60,6 +60,7 @@ public: qint64 writeData(const char* data, qint64 maxSize); int16_t& operator[](const int index); + const int16_t& operator[] (const int index) const; void shiftReadPosition(unsigned int numSamples); @@ -87,6 +88,7 @@ protected: int16_t* _buffer; bool _isStarved; bool _hasStarted; + bool _randomAccessMode; /// will this ringbuffer be used for random access? if so, do some special processing }; #endif // hifi_AudioRingBuffer_h diff --git a/libraries/octree/src/AABox.h b/libraries/octree/src/AABox.h index 093a111a69..1aa0849b70 100644 --- a/libraries/octree/src/AABox.h +++ b/libraries/octree/src/AABox.h @@ -23,7 +23,8 @@ enum BoxFace { MIN_Y_FACE, MAX_Y_FACE, MIN_Z_FACE, - MAX_Z_FACE + MAX_Z_FACE, + UNKNOWN_FACE }; enum BoxVertex { diff --git a/libraries/octree/src/ViewFrustum.cpp b/libraries/octree/src/ViewFrustum.cpp index 9b77d39fdc..f2d19a66ea 100644 --- a/libraries/octree/src/ViewFrustum.cpp +++ b/libraries/octree/src/ViewFrustum.cpp @@ -425,10 +425,6 @@ bool ViewFrustum::matches(const ViewFrustum& compareTo, bool debug) const { return result; } -bool isNaN(float f) { - return f != f; -} - bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const { // Compute distance between the two positions @@ -450,7 +446,7 @@ bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const float angleEyeOffsetOrientation = compareTo._eyeOffsetOrientation == _eyeOffsetOrientation ? 0.0f : glm::degrees(glm::angle(dQEyeOffsetOrientation)); if (isNaN(angleEyeOffsetOrientation)) { - angleOrientation = 0.0f; + angleEyeOffsetOrientation = 0.0f; } bool result = diff --git a/libraries/shared/src/SharedUtil.cpp b/libraries/shared/src/SharedUtil.cpp index cd98fbdbd2..956f78204f 100644 --- a/libraries/shared/src/SharedUtil.cpp +++ b/libraries/shared/src/SharedUtil.cpp @@ -51,6 +51,10 @@ float randFloatInRange (float min,float max) { return min + ((rand() % 10000)/10000.f * (max-min)); } +float randomSign() { + return randomBoolean() ? -1.0 : 1.0; +} + unsigned char randomColorValue(int miniumum) { return miniumum + (rand() % (256 - miniumum)); } @@ -659,3 +663,21 @@ glm::vec3 safeEulerAngles(const glm::quat& q) { } } +bool isNaN(float value) { + return value != value; +} + +bool isSimilarOrientation(const glm::quat& orientionA, const glm::quat& orientionB, float similarEnough) { + // Compute the angular distance between the two orientations + float angleOrientation = orientionA == orientionB ? 0.0f : glm::degrees(glm::angle(orientionA * glm::inverse(orientionB))); + if (isNaN(angleOrientation)) { + angleOrientation = 0.0f; + } + return (angleOrientation <= similarEnough); +} + +bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, float similarEnough) { + // Compute the distance between the two points + float positionDistance = glm::distance(positionA, positionB); + return (positionDistance <= similarEnough); +} diff --git a/libraries/shared/src/SharedUtil.h b/libraries/shared/src/SharedUtil.h index f41c5b8aa2..a2f98549ad 100644 --- a/libraries/shared/src/SharedUtil.h +++ b/libraries/shared/src/SharedUtil.h @@ -73,6 +73,7 @@ void usecTimestampNowForceClockSkew(int clockSkew); float randFloat(); int randIntInRange (int min, int max); float randFloatInRange (float min,float max); +float randomSign(); /// \return -1.0 or 1.0 unsigned char randomColorValue(int minimum); bool randomBoolean(); @@ -166,4 +167,14 @@ int unpackFloatVec3FromSignedTwoByteFixed(const unsigned char* sourceBuffer, glm /// \return vec3 with euler angles in radians glm::vec3 safeEulerAngles(const glm::quat& q); +/// \return bool are two orientations similar to each other +const float ORIENTATION_SIMILAR_ENOUGH = 5.0f; // 10 degrees in any direction +bool isSimilarOrientation(const glm::quat& orientionA, const glm::quat& orientionB, + float similarEnough = ORIENTATION_SIMILAR_ENOUGH); +const float POSITION_SIMILAR_ENOUGH = 0.1f; // 0.1 meter +bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, float similarEnough = POSITION_SIMILAR_ENOUGH); + +/// \return bool is the float NaN +bool isNaN(float value); + #endif // hifi_SharedUtil_h diff --git a/libraries/voxels/src/VoxelDetail.cpp b/libraries/voxels/src/VoxelDetail.cpp index 6c385c9387..f1855f5f81 100644 --- a/libraries/voxels/src/VoxelDetail.cpp +++ b/libraries/voxels/src/VoxelDetail.cpp @@ -75,6 +75,9 @@ QScriptValue rayToVoxelIntersectionResultToScriptValue(QScriptEngine* engine, co case MAX_Z_FACE: faceName = "MAX_Z_FACE"; break; + case UNKNOWN_FACE: + faceName = "UNKNOWN_FACE"; + break; } obj.setProperty("face", faceName);