mirror of
https://github.com/overte-org/overte.git
synced 2025-08-06 18:50:00 +02:00
Merge pull request #3424 from PhilipRosedale/master
Fixes to audio attenuation jumps and better sample loop script
This commit is contained in:
commit
896cb70ff6
2 changed files with 152 additions and 143 deletions
|
@ -102,7 +102,7 @@ AudioMixer::~AudioMixer() {
|
||||||
|
|
||||||
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
||||||
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
|
||||||
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
|
const float RADIUS_OF_HEAD = 0.076f;
|
||||||
|
|
||||||
int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
|
||||||
AvatarAudioStream* listeningNodeStream) {
|
AvatarAudioStream* listeningNodeStream) {
|
||||||
|
@ -112,6 +112,8 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
// Basically, we'll repeat that last frame until it has a frame to mix. Depending on how many times
|
// Basically, we'll repeat that last frame until it has a frame to mix. Depending on how many times
|
||||||
// we've repeated that frame in a row, we'll gradually fade that repeated frame into silence.
|
// we've repeated that frame in a row, we'll gradually fade that repeated frame into silence.
|
||||||
// This improves the perceived quality of the audio slightly.
|
// This improves the perceived quality of the audio slightly.
|
||||||
|
|
||||||
|
bool showDebug = false; // (randFloat() < 0.05f);
|
||||||
|
|
||||||
float repeatedFrameFadeFactor = 1.0f;
|
float repeatedFrameFadeFactor = 1.0f;
|
||||||
|
|
||||||
|
@ -140,112 +142,117 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
int numSamplesDelay = 0;
|
int numSamplesDelay = 0;
|
||||||
float weakChannelAmplitudeRatio = 1.0f;
|
float weakChannelAmplitudeRatio = 1.0f;
|
||||||
|
|
||||||
bool shouldAttenuate = (streamToAdd != listeningNodeStream);
|
bool shouldDistanceAttenuate = true;
|
||||||
|
|
||||||
if (shouldAttenuate) {
|
// Is the source that I am mixing my own?
|
||||||
|
bool sourceIsSelf = (streamToAdd == listeningNodeStream);
|
||||||
// if the two stream pointers do not match then these are different streams
|
|
||||||
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
|
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
|
||||||
|
|
||||||
float distanceBetween = glm::length(relativePosition);
|
float distanceBetween = glm::length(relativePosition);
|
||||||
|
|
||||||
if (distanceBetween < EPSILON) {
|
if (distanceBetween < EPSILON) {
|
||||||
distanceBetween = EPSILON;
|
distanceBetween = EPSILON;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
||||||
|
// according to mixer performance we have decided this does not get to be mixed in
|
||||||
|
// bail out
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
++_sumMixes;
|
||||||
|
|
||||||
|
if (streamToAdd->getListenerUnattenuatedZone()) {
|
||||||
|
shouldDistanceAttenuate = !streamToAdd->getListenerUnattenuatedZone()->contains(listeningNodeStream->getPosition());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
|
||||||
|
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
|
||||||
|
if (showDebug) {
|
||||||
|
qDebug() << "AttenuationRatio: " << reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (showDebug) {
|
||||||
|
qDebug() << "distance: " << distanceBetween;
|
||||||
|
}
|
||||||
|
|
||||||
|
glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation());
|
||||||
|
|
||||||
|
if (!sourceIsSelf && (streamToAdd->getType() == PositionalAudioStream::Microphone)) {
|
||||||
|
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
|
||||||
|
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd->getOrientation()) * relativePosition;
|
||||||
|
|
||||||
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||||
// according to mixer performance we have decided this does not get to be mixed in
|
glm::normalize(rotatedListenerPosition));
|
||||||
// bail out
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
++_sumMixes;
|
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||||
|
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||||
|
|
||||||
if (streamToAdd->getListenerUnattenuatedZone()) {
|
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||||
shouldAttenuate = !streamToAdd->getListenerUnattenuatedZone()->contains(listeningNodeStream->getPosition());
|
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
|
||||||
}
|
|
||||||
|
|
||||||
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
|
if (showDebug) {
|
||||||
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
|
qDebug() << "angleOfDelivery" << angleOfDelivery << "offAxisCoefficient: " << offAxisCoefficient;
|
||||||
}
|
|
||||||
|
|
||||||
shouldAttenuate = shouldAttenuate && distanceBetween > ATTENUATION_EPSILON_DISTANCE;
|
|
||||||
|
|
||||||
if (shouldAttenuate) {
|
|
||||||
glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation());
|
|
||||||
|
|
||||||
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
|
||||||
float radius = 0.0f;
|
|
||||||
|
|
||||||
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
|
|
||||||
radius = reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getRadius();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
|
||||||
// this is either not a spherical source, or the listener is outside the sphere
|
|
||||||
|
|
||||||
if (radius > 0) {
|
|
||||||
// this is a spherical source - the distance used for the coefficient
|
|
||||||
// needs to be the closest point on the boundary to the source
|
|
||||||
|
|
||||||
// ovveride the distance to the node with the distance to the point on the
|
|
||||||
// boundary of the sphere
|
|
||||||
distanceSquareToSource -= (radius * radius);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// calculate the angle delivery for off-axis attenuation
|
|
||||||
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd->getOrientation()) * relativePosition;
|
|
||||||
|
|
||||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
|
||||||
glm::normalize(rotatedListenerPosition));
|
|
||||||
|
|
||||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
|
||||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
|
||||||
|
|
||||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
|
||||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
|
|
||||||
|
|
||||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
|
||||||
attenuationCoefficient *= offAxisCoefficient;
|
|
||||||
}
|
|
||||||
|
|
||||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
|
||||||
|
|
||||||
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
|
|
||||||
// calculate the distance coefficient using the distance to this node
|
|
||||||
float distanceCoefficient = 1 - (logf(distanceBetween / ATTENUATION_BEGINS_AT_DISTANCE) / logf(2.0f)
|
|
||||||
* ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE);
|
|
||||||
|
|
||||||
if (distanceCoefficient < 0) {
|
|
||||||
distanceCoefficient = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// multiply the current attenuation coefficient by the distance coefficient
|
|
||||||
attenuationCoefficient *= distanceCoefficient;
|
|
||||||
}
|
|
||||||
|
|
||||||
// project the rotated source position vector onto the XZ plane
|
|
||||||
rotatedSourcePosition.y = 0.0f;
|
|
||||||
|
|
||||||
// produce an oriented angle about the y-axis
|
|
||||||
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
|
|
||||||
glm::normalize(rotatedSourcePosition),
|
|
||||||
glm::vec3(0.0f, 1.0f, 0.0f));
|
|
||||||
|
|
||||||
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
|
||||||
|
|
||||||
// figure out the number of samples of delay and the ratio of the amplitude
|
|
||||||
// in the weak channel for audio spatialization
|
|
||||||
float sinRatio = fabsf(sinf(bearingRelativeAngleToSource));
|
|
||||||
numSamplesDelay = SAMPLE_PHASE_DELAY_AT_90 * sinRatio;
|
|
||||||
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||||
|
|
||||||
|
attenuationCoefficient *= offAxisCoefficient;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (shouldDistanceAttenuate && (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE)) {
|
||||||
|
// calculate the distance coefficient using the distance to this node
|
||||||
|
float distanceCoefficient = 1 - (logf(distanceBetween / ATTENUATION_BEGINS_AT_DISTANCE) / logf(2.0f)
|
||||||
|
* ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE);
|
||||||
|
|
||||||
|
if (distanceCoefficient < 0) {
|
||||||
|
distanceCoefficient = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiply the current attenuation coefficient by the distance coefficient
|
||||||
|
attenuationCoefficient *= distanceCoefficient;
|
||||||
|
if (showDebug) {
|
||||||
|
qDebug() << "distanceCoefficient: " << distanceCoefficient;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!sourceIsSelf) {
|
||||||
|
// Compute sample delay for the two ears to create phase panning
|
||||||
|
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||||
|
|
||||||
|
// project the rotated source position vector onto the XZ plane
|
||||||
|
rotatedSourcePosition.y = 0.0f;
|
||||||
|
|
||||||
|
// produce an oriented angle about the y-axis
|
||||||
|
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||||
|
glm::normalize(rotatedSourcePosition),
|
||||||
|
glm::vec3(0.0f, 1.0f, 0.0f));
|
||||||
|
|
||||||
|
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
||||||
|
|
||||||
|
// figure out the number of samples of delay and the ratio of the amplitude
|
||||||
|
// in the weak channel for audio spatialization
|
||||||
|
float sinRatio = fabsf(sinf(bearingRelativeAngleToSource));
|
||||||
|
numSamplesDelay = SAMPLE_PHASE_DELAY_AT_90 * sinRatio;
|
||||||
|
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
||||||
|
|
||||||
|
if (distanceBetween < RADIUS_OF_HEAD) {
|
||||||
|
// Diminish phase panning if source would be inside head
|
||||||
|
numSamplesDelay *= distanceBetween / RADIUS_OF_HEAD;
|
||||||
|
weakChannelAmplitudeRatio += (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio) * distanceBetween / RADIUS_OF_HEAD;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (showDebug) {
|
||||||
|
qDebug() << "attenuation: " << attenuationCoefficient;
|
||||||
|
qDebug() << "bearingRelativeAngleToSource: " << bearingRelativeAngleToSource << " numSamplesDelay: " << numSamplesDelay;
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
|
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
|
||||||
|
|
||||||
if (!streamToAdd->isStereo() && shouldAttenuate) {
|
if (!streamToAdd->isStereo()) {
|
||||||
// this is a mono stream, which means it gets full attenuation and spatialization
|
// this is a mono stream, which means it gets full attenuation and spatialization
|
||||||
|
|
||||||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||||
|
@ -293,11 +300,7 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
} else {
|
} else {
|
||||||
int stereoDivider = streamToAdd->isStereo() ? 1 : 2;
|
int stereoDivider = streamToAdd->isStereo() ? 1 : 2;
|
||||||
|
|
||||||
if (!shouldAttenuate) {
|
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
|
||||||
attenuationCoefficient = 1.0f;
|
|
||||||
}
|
|
||||||
|
|
||||||
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
|
|
||||||
|
|
||||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
|
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
|
||||||
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationAndFade),
|
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationAndFade),
|
||||||
|
@ -305,19 +308,8 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_enableFilter && shouldAttenuate) {
|
if (!sourceIsSelf && _enableFilter) {
|
||||||
|
|
||||||
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
|
|
||||||
glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation());
|
|
||||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
|
||||||
|
|
||||||
// project the rotated source position vector onto the XZ plane
|
|
||||||
rotatedSourcePosition.y = 0.0f;
|
|
||||||
|
|
||||||
// produce an oriented angle about the y-axis
|
|
||||||
float bearingAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
|
|
||||||
glm::normalize(rotatedSourcePosition),
|
|
||||||
glm::vec3(0.0f, -1.0f, 0.0f));
|
|
||||||
const float TWO_OVER_PI = 2.0f / PI;
|
const float TWO_OVER_PI = 2.0f / PI;
|
||||||
|
|
||||||
const float ZERO_DB = 1.0f;
|
const float ZERO_DB = 1.0f;
|
||||||
|
@ -337,36 +329,41 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
float penumbraFilterGainL;
|
float penumbraFilterGainL;
|
||||||
float penumbraFilterGainR;
|
float penumbraFilterGainR;
|
||||||
|
|
||||||
// variable gain calculation broken down by quadrent
|
// variable gain calculation broken down by quadrant
|
||||||
if (bearingAngleToSource < -PI_OVER_TWO && bearingAngleToSource > -PI) {
|
if (-bearingRelativeAngleToSource < -PI_OVER_TWO && -bearingRelativeAngleToSource > -PI) {
|
||||||
penumbraFilterGainL = TWO_OVER_PI *
|
penumbraFilterGainL = TWO_OVER_PI *
|
||||||
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_180) * (bearingAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_0;
|
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_180) * (-bearingRelativeAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_0;
|
||||||
penumbraFilterGainR = TWO_OVER_PI *
|
penumbraFilterGainR = TWO_OVER_PI *
|
||||||
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_180) * (bearingAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_90;
|
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_180) * (-bearingRelativeAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_90;
|
||||||
} else if (bearingAngleToSource <= PI && bearingAngleToSource > PI_OVER_TWO) {
|
} else if (-bearingRelativeAngleToSource <= PI && -bearingRelativeAngleToSource > PI_OVER_TWO) {
|
||||||
penumbraFilterGainL = TWO_OVER_PI *
|
penumbraFilterGainL = TWO_OVER_PI *
|
||||||
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_90) * (bearingAngleToSource - PI) + FILTER_GAIN_AT_180;
|
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_90) * (-bearingRelativeAngleToSource - PI) + FILTER_GAIN_AT_180;
|
||||||
penumbraFilterGainR = TWO_OVER_PI *
|
penumbraFilterGainR = TWO_OVER_PI *
|
||||||
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_0) * (bearingAngleToSource - PI) + FILTER_GAIN_AT_180;
|
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_0) * (-bearingRelativeAngleToSource - PI) + FILTER_GAIN_AT_180;
|
||||||
} else if (bearingAngleToSource <= PI_OVER_TWO && bearingAngleToSource > 0) {
|
} else if (-bearingRelativeAngleToSource <= PI_OVER_TWO && -bearingRelativeAngleToSource > 0) {
|
||||||
penumbraFilterGainL = TWO_OVER_PI *
|
penumbraFilterGainL = TWO_OVER_PI *
|
||||||
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_0) * (bearingAngleToSource - PI_OVER_TWO) + FILTER_GAIN_AT_90;
|
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_0) * (-bearingRelativeAngleToSource - PI_OVER_TWO) + FILTER_GAIN_AT_90;
|
||||||
penumbraFilterGainR = FILTER_GAIN_AT_0;
|
penumbraFilterGainR = FILTER_GAIN_AT_0;
|
||||||
} else {
|
} else {
|
||||||
penumbraFilterGainL = FILTER_GAIN_AT_0;
|
penumbraFilterGainL = FILTER_GAIN_AT_0;
|
||||||
penumbraFilterGainR = TWO_OVER_PI *
|
penumbraFilterGainR = TWO_OVER_PI *
|
||||||
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_90) * (bearingAngleToSource) + FILTER_GAIN_AT_0;
|
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_90) * (-bearingRelativeAngleToSource) + FILTER_GAIN_AT_0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (distanceBetween < RADIUS_OF_HEAD) {
|
||||||
|
// Diminish effect if source would be inside head
|
||||||
|
penumbraFilterGainL += (1.f - penumbraFilterGainL) * (1.f - distanceBetween / RADIUS_OF_HEAD);
|
||||||
|
penumbraFilterGainR += (1.f - penumbraFilterGainR) * (1.f - distanceBetween / RADIUS_OF_HEAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
qDebug() << "avatar="
|
qDebug() << "gainL="
|
||||||
<< listeningNodeStream
|
|
||||||
<< "gainL="
|
|
||||||
<< penumbraFilterGainL
|
<< penumbraFilterGainL
|
||||||
<< "gainR="
|
<< "gainR="
|
||||||
<< penumbraFilterGainR
|
<< penumbraFilterGainR
|
||||||
<< "angle="
|
<< "angle="
|
||||||
<< bearingAngleToSource;
|
<< -bearingRelativeAngleToSource;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// set the gain on both filter channels
|
// set the gain on both filter channels
|
||||||
|
|
|
@ -5,39 +5,51 @@
|
||||||
// Created by David Rowe on 5/29/14.
|
// Created by David Rowe on 5/29/14.
|
||||||
// Copyright 2014 High Fidelity, Inc.
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
//
|
//
|
||||||
// This example script plays a sound in a continuous loop.
|
// This example script plays a sound in a continuous loop, and creates a red sphere in front of you at the origin of the sound.
|
||||||
//
|
//
|
||||||
// Distributed under the Apache License, Version 2.0.
|
// Distributed under the Apache License, Version 2.0.
|
||||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
|
// A few sample files you may want to try:
|
||||||
|
|
||||||
var sound = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Guitars/Guitar+-+Nylon+A.raw");
|
var sound = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Guitars/Guitar+-+Nylon+A.raw");
|
||||||
|
//var sound = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/220Sine.wav");
|
||||||
|
//var sound = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Cocktail+Party+Snippets/Bandcamp.wav");
|
||||||
|
|
||||||
var soundPlaying = false;
|
var soundPlaying = false;
|
||||||
|
var options = new AudioInjectionOptions();
|
||||||
|
options.position = Vec3.sum(Camera.getPosition(), Quat.getFront(MyAvatar.orientation));
|
||||||
|
options.volume = 0.5;
|
||||||
|
options.loop = true;
|
||||||
|
var playing = false;
|
||||||
|
var ball = false;
|
||||||
|
|
||||||
function keyPressEvent(event) {
|
function maybePlaySound(deltaTime) {
|
||||||
if (event.text === "1") {
|
if (sound.downloaded) {
|
||||||
if (!Audio.isInjectorPlaying(soundPlaying)) {
|
var properties = {
|
||||||
var options = new AudioInjectionOptions();
|
type: "Sphere",
|
||||||
options.position = MyAvatar.position;
|
position: options.position,
|
||||||
options.volume = 0.5;
|
dimensions: { x: 0.2, y: 0.2, z: 0.2 },
|
||||||
options.loop = true;
|
color: { red: 200, green: 0, blue: 0 }
|
||||||
soundPlaying = Audio.playSound(sound, options);
|
};
|
||||||
print("Started sound loop");
|
ball = Entities.addEntity(properties);
|
||||||
} else {
|
soundPlaying = Audio.playSound(sound, options);
|
||||||
Audio.stopInjector(soundPlaying);
|
print("Started sound looping.");
|
||||||
print("Stopped sound loop");
|
Script.update.disconnect(maybePlaySound);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function scriptEnding() {
|
function scriptEnding() {
|
||||||
if (Audio.isInjectorPlaying(soundPlaying)) {
|
if (Audio.isInjectorPlaying(soundPlaying)) {
|
||||||
Audio.stopInjector(soundPlaying);
|
Audio.stopInjector(soundPlaying);
|
||||||
print("Stopped sound loop");
|
Entities.deleteEntity(ball);
|
||||||
|
print("Stopped sound.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect a call back that happens every frame
|
// Connect a call back that happens every frame
|
||||||
Script.scriptEnding.connect(scriptEnding);
|
Script.scriptEnding.connect(scriptEnding);
|
||||||
Controller.keyPressEvent.connect(keyPressEvent);
|
Script.update.connect(maybePlaySound);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue