mirror of
https://github.com/overte-org/overte.git
synced 2025-08-13 07:28:09 +02:00
Merge branch 'master' of github.com:highfidelity/hifi into tablet-ui
This commit is contained in:
commit
d97be52542
89 changed files with 23627 additions and 1002 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -4,6 +4,10 @@ CMakeFiles/
|
|||
CMakeScripts/
|
||||
cmake_install.cmake
|
||||
build*/
|
||||
release*/
|
||||
debug*/
|
||||
gprof*/
|
||||
valgrind*/
|
||||
ext/
|
||||
Makefile
|
||||
*.user
|
||||
|
|
|
@ -351,7 +351,9 @@ void Agent::executeScript() {
|
|||
Transform audioTransform;
|
||||
audioTransform.setTranslation(scriptedAvatar->getPosition());
|
||||
audioTransform.setRotation(scriptedAvatar->getOrientation());
|
||||
AbstractAudioInterface::emitAudioPacket(audio.data(), audio.size(), audioSequenceNumber, audioTransform, PacketType::MicrophoneAudioNoEcho);
|
||||
AbstractAudioInterface::emitAudioPacket(audio.data(), audio.size(), audioSequenceNumber,
|
||||
audioTransform, scriptedAvatar->getPosition(), glm::vec3(0),
|
||||
PacketType::MicrophoneAudioNoEcho);
|
||||
});
|
||||
|
||||
auto avatarHashMap = DependencyManager::set<AvatarHashMap>();
|
||||
|
@ -424,8 +426,9 @@ void Agent::setIsListeningToAudioStream(bool isListeningToAudioStream) {
|
|||
},
|
||||
[&](const SharedNodePointer& node) {
|
||||
qDebug() << "sending KillAvatar message to Audio Mixers";
|
||||
auto packet = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID, true);
|
||||
auto packet = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID + sizeof(KillAvatarReason), true);
|
||||
packet->write(getSessionUUID().toRfc4122());
|
||||
packet->writePrimitive(KillAvatarReason::NoReason);
|
||||
nodeList->sendPacket(std::move(packet), *node);
|
||||
});
|
||||
|
||||
|
@ -475,8 +478,9 @@ void Agent::setIsAvatar(bool isAvatar) {
|
|||
},
|
||||
[&](const SharedNodePointer& node) {
|
||||
qDebug() << "sending KillAvatar message to Avatar and Audio Mixers";
|
||||
auto packet = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID, true);
|
||||
auto packet = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID + sizeof(KillAvatarReason), true);
|
||||
packet->write(getSessionUUID().toRfc4122());
|
||||
packet->writePrimitive(KillAvatarReason::NoReason);
|
||||
nodeList->sendPacket(std::move(packet), *node);
|
||||
});
|
||||
}
|
||||
|
@ -580,6 +584,8 @@ void Agent::processAgentAvatarAudio() {
|
|||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||
glm::quat headOrientation = scriptedAvatar->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||
audioPacket->writePrimitive(glm::vec3(0));
|
||||
} else if (nextSoundOutput) {
|
||||
|
||||
// write the codec
|
||||
|
@ -592,6 +598,8 @@ void Agent::processAgentAvatarAudio() {
|
|||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||
glm::quat headOrientation = scriptedAvatar->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||
audioPacket->writePrimitive(glm::vec3(0));
|
||||
|
||||
QByteArray encodedBuffer;
|
||||
if (_flushEncoder) {
|
||||
|
|
|
@ -9,38 +9,12 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
#include <memory>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <thread>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <math.h>
|
||||
#else
|
||||
#include <arpa/inet.h>
|
||||
#include <netinet/in.h>
|
||||
#include <sys/socket.h>
|
||||
#endif //_WIN32
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtx/norm.hpp>
|
||||
#include <glm/gtx/vector_angle.hpp>
|
||||
|
||||
#include <QtCore/QCoreApplication>
|
||||
#include <QtCore/QJsonArray>
|
||||
#include <QtCore/QJsonDocument>
|
||||
#include <QtCore/QJsonObject>
|
||||
#include <QtCore/QJsonValue>
|
||||
#include <QtCore/QThread>
|
||||
#include <QtNetwork/QNetworkRequest>
|
||||
#include <QtNetwork/QNetworkReply>
|
||||
|
||||
#include <LogHandler.h>
|
||||
#include <NetworkAccessManager.h>
|
||||
|
@ -67,22 +41,20 @@ static const float DEFAULT_NOISE_MUTING_THRESHOLD = 0.003f;
|
|||
static const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
|
||||
static const QString AUDIO_ENV_GROUP_KEY = "audio_env";
|
||||
static const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer";
|
||||
static const QString AUDIO_THREADING_GROUP_KEY = "audio_threading";
|
||||
|
||||
int AudioMixer::_numStaticJitterFrames{ -1 };
|
||||
bool AudioMixer::_enableFilter = true;
|
||||
|
||||
bool AudioMixer::shouldMute(float quietestFrame) {
|
||||
return (quietestFrame > _noiseMutingThreshold);
|
||||
}
|
||||
float AudioMixer::_noiseMutingThreshold{ DEFAULT_NOISE_MUTING_THRESHOLD };
|
||||
float AudioMixer::_attenuationPerDoublingInDistance{ DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE };
|
||||
float AudioMixer::_trailingSleepRatio{ 1.0f };
|
||||
float AudioMixer::_performanceThrottlingRatio{ 0.0f };
|
||||
float AudioMixer::_minAudibilityThreshold{ LOUDNESS_TO_DISTANCE_RATIO / 2.0f };
|
||||
QHash<QString, AABox> AudioMixer::_audioZones;
|
||||
QVector<AudioMixer::ZoneSettings> AudioMixer::_zoneSettings;
|
||||
QVector<AudioMixer::ReverbSettings> AudioMixer::_zoneReverbSettings;
|
||||
|
||||
AudioMixer::AudioMixer(ReceivedMessage& message) :
|
||||
ThreadedAssignment(message),
|
||||
_trailingSleepRatio(1.0f),
|
||||
_minAudibilityThreshold(LOUDNESS_TO_DISTANCE_RATIO / 2.0f),
|
||||
_performanceThrottlingRatio(0.0f),
|
||||
_attenuationPerDoublingInDistance(DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE),
|
||||
_noiseMutingThreshold(DEFAULT_NOISE_MUTING_THRESHOLD)
|
||||
{
|
||||
ThreadedAssignment(message) {
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
auto& packetReceiver = nodeList->getPacketReceiver();
|
||||
|
||||
|
@ -96,405 +68,10 @@ AudioMixer::AudioMixer(ReceivedMessage& message) :
|
|||
packetReceiver.registerListener(PacketType::KillAvatar, this, "handleKillAvatarPacket");
|
||||
packetReceiver.registerListener(PacketType::NodeMuteRequest, this, "handleNodeMuteRequestPacket");
|
||||
packetReceiver.registerListener(PacketType::RadiusIgnoreRequest, this, "handleRadiusIgnoreRequestPacket");
|
||||
|
||||
connect(nodeList.data(), &NodeList::nodeKilled, this, &AudioMixer::handleNodeKilled);
|
||||
}
|
||||
|
||||
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
||||
|
||||
const int IEEE754_MANT_BITS = 23;
|
||||
const int IEEE754_EXPN_BIAS = 127;
|
||||
|
||||
//
|
||||
// for x > 0.0f, returns log2(x)
|
||||
// for x <= 0.0f, returns large negative value
|
||||
//
|
||||
// abs |error| < 8e-3, smooth (exact for x=2^N) for NPOLY=3
|
||||
// abs |error| < 2e-4, smooth (exact for x=2^N) for NPOLY=5
|
||||
// rel |error| < 0.4 from precision loss very close to 1.0f
|
||||
//
|
||||
static inline float fastlog2(float x) {
|
||||
|
||||
union { float f; int32_t i; } mant, bits = { x };
|
||||
|
||||
// split into mantissa and exponent
|
||||
mant.i = (bits.i & ((1 << IEEE754_MANT_BITS) - 1)) | (IEEE754_EXPN_BIAS << IEEE754_MANT_BITS);
|
||||
int32_t expn = (bits.i >> IEEE754_MANT_BITS) - IEEE754_EXPN_BIAS;
|
||||
|
||||
mant.f -= 1.0f;
|
||||
|
||||
// polynomial for log2(1+x) over x=[0,1]
|
||||
//x = (-0.346555386f * mant.f + 1.346555386f) * mant.f;
|
||||
x = (((-0.0821307180f * mant.f + 0.321188984f) * mant.f - 0.677784014f) * mant.f + 1.43872575f) * mant.f;
|
||||
|
||||
return x + expn;
|
||||
}
|
||||
|
||||
//
|
||||
// for -126 <= x < 128, returns exp2(x)
|
||||
//
|
||||
// rel |error| < 3e-3, smooth (exact for x=N) for NPOLY=3
|
||||
// rel |error| < 9e-6, smooth (exact for x=N) for NPOLY=5
|
||||
//
|
||||
static inline float fastexp2(float x) {
|
||||
|
||||
union { float f; int32_t i; } xi;
|
||||
|
||||
// bias such that x > 0
|
||||
x += IEEE754_EXPN_BIAS;
|
||||
//x = MAX(x, 1.0f);
|
||||
//x = MIN(x, 254.9999f);
|
||||
|
||||
// split into integer and fraction
|
||||
xi.i = (int32_t)x;
|
||||
x -= xi.i;
|
||||
|
||||
// construct exp2(xi) as a float
|
||||
xi.i <<= IEEE754_MANT_BITS;
|
||||
|
||||
// polynomial for exp2(x) over x=[0,1]
|
||||
//x = (0.339766028f * x + 0.660233972f) * x + 1.0f;
|
||||
x = (((0.0135557472f * x + 0.0520323690f) * x + 0.241379763f) * x + 0.693032121f) * x + 1.0f;
|
||||
|
||||
return x * xi.f;
|
||||
}
|
||||
|
||||
float AudioMixer::gainForSource(const PositionalAudioStream& streamToAdd,
|
||||
const AvatarAudioStream& listeningNodeStream, const glm::vec3& relativePosition, bool isEcho) {
|
||||
float gain = 1.0f;
|
||||
|
||||
float distanceBetween = glm::length(relativePosition);
|
||||
|
||||
if (distanceBetween < EPSILON) {
|
||||
distanceBetween = EPSILON;
|
||||
}
|
||||
|
||||
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
|
||||
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
|
||||
}
|
||||
|
||||
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
|
||||
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
|
||||
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
|
||||
|
||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedListenerPosition));
|
||||
|
||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||
|
||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
|
||||
|
||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||
gain *= offAxisCoefficient;
|
||||
}
|
||||
|
||||
float attenuationPerDoublingInDistance = _attenuationPerDoublingInDistance;
|
||||
for (int i = 0; i < _zonesSettings.length(); ++i) {
|
||||
if (_audioZones[_zonesSettings[i].source].contains(streamToAdd.getPosition()) &&
|
||||
_audioZones[_zonesSettings[i].listener].contains(listeningNodeStream.getPosition())) {
|
||||
attenuationPerDoublingInDistance = _zonesSettings[i].coefficient;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
|
||||
|
||||
// translate the zone setting to gain per log2(distance)
|
||||
float g = 1.0f - attenuationPerDoublingInDistance;
|
||||
g = (g < EPSILON) ? EPSILON : g;
|
||||
g = (g > 1.0f) ? 1.0f : g;
|
||||
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
float distanceCoefficient = fastexp2(fastlog2(g) * fastlog2(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));
|
||||
|
||||
// multiply the current attenuation coefficient by the distance coefficient
|
||||
gain *= distanceCoefficient;
|
||||
}
|
||||
|
||||
return gain;
|
||||
}
|
||||
|
||||
float AudioMixer::azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
|
||||
const glm::vec3& relativePosition) {
|
||||
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
|
||||
|
||||
// Compute sample delay for the two ears to create phase panning
|
||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||
|
||||
// project the rotated source position vector onto the XZ plane
|
||||
rotatedSourcePosition.y = 0.0f;
|
||||
|
||||
static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;
|
||||
|
||||
if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) {
|
||||
// produce an oriented angle about the y-axis
|
||||
return glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedSourcePosition), glm::vec3(0.0f, -1.0f, 0.0f));
|
||||
} else {
|
||||
// there is no distance between listener and source - return no azimuth
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
|
||||
const PositionalAudioStream& streamToAdd,
|
||||
const QUuid& sourceNodeID,
|
||||
const AvatarAudioStream& listeningNodeStream) {
|
||||
|
||||
|
||||
// to reduce artifacts we calculate the gain and azimuth for every source for this listener
|
||||
// even if we are not going to end up mixing in this source
|
||||
|
||||
++_totalMixes;
|
||||
|
||||
// this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct
|
||||
|
||||
// check if this is a server echo of a source back to itself
|
||||
bool isEcho = (&streamToAdd == &listeningNodeStream);
|
||||
|
||||
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
||||
|
||||
// figure out the distance between source and listener
|
||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||
|
||||
// figure out the gain for this source at the listener
|
||||
float gain = gainForSource(streamToAdd, listeningNodeStream, relativePosition, isEcho);
|
||||
|
||||
// figure out the azimuth to this source at the listener
|
||||
float azimuth = isEcho ? 0.0f : azimuthForSource(streamToAdd, listeningNodeStream, relativePosition);
|
||||
|
||||
float repeatedFrameFadeFactor = 1.0f;
|
||||
|
||||
static const int HRTF_DATASET_INDEX = 1;
|
||||
|
||||
if (!streamToAdd.lastPopSucceeded()) {
|
||||
bool forceSilentBlock = true;
|
||||
|
||||
if (!streamToAdd.getLastPopOutput().isNull()) {
|
||||
bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);
|
||||
|
||||
// in an injector, just go silent - the injector has likely ended
|
||||
// in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
|
||||
|
||||
// we'll repeat the last block until it has a block to mix
|
||||
// and we'll gradually fade that repeated block into silence.
|
||||
|
||||
// calculate its fade factor, which depends on how many times it's already been repeated.
|
||||
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
|
||||
if (!isInjector && repeatedFrameFadeFactor > 0.0f) {
|
||||
// apply the repeatedFrameFadeFactor to the gain
|
||||
gain *= repeatedFrameFadeFactor;
|
||||
|
||||
forceSilentBlock = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (forceSilentBlock) {
|
||||
// we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
|
||||
// in this case we will call renderSilent with a forced silent block
|
||||
// this ensures the correct tail from the previously mixed block and the correct spatialization of first block
|
||||
// of any upcoming audio
|
||||
|
||||
if (!streamToAdd.isStereo() && !isEcho) {
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
// this is not done for stereo streams since they do not go through the HRTF
|
||||
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
|
||||
hrtf.renderSilent(silentMonoBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++_hrtfSilentRenders;;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// grab the stream from the ring buffer
|
||||
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();
|
||||
|
||||
if (streamToAdd.isStereo() || isEcho) {
|
||||
// this is a stereo source or server echo so we do not pass it through the HRTF
|
||||
// simply apply our calculated gain to each sample
|
||||
if (streamToAdd.isStereo()) {
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
|
||||
_mixedSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
++_manualStereoMixes;
|
||||
} else {
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
|
||||
auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
|
||||
_mixedSamples[i] += monoSample;
|
||||
_mixedSamples[i + 1] += monoSample;
|
||||
}
|
||||
|
||||
++_manualEchoMixes;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
static int16_t streamBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL];
|
||||
|
||||
streamPopOutput.readSamples(streamBlock, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// if the frame we're about to mix is silent, simply call render silent and move on
|
||||
if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
|
||||
// silent frame from source
|
||||
|
||||
// we still need to call renderSilent via the HRTF for mono source
|
||||
hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++_hrtfSilentRenders;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (_performanceThrottlingRatio > 0.0f
|
||||
&& streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= _minAudibilityThreshold) {
|
||||
// the mixer is struggling so we're going to drop off some streams
|
||||
|
||||
// we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
|
||||
hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++_hrtfStruggleRenders;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
++_hrtfRenders;
|
||||
|
||||
// mono stream, call the HRTF with our block and calculated azimuth and gain
|
||||
hrtf.render(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
}
|
||||
|
||||
bool AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_mixedSamples, 0, sizeof(_mixedSamples));
|
||||
|
||||
// loop through all other nodes that have sufficient audio to mix
|
||||
|
||||
DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
|
||||
// make sure that we have audio data for this other node
|
||||
// and that it isn't being ignored by our listening node
|
||||
// and that it isn't ignoring our listening node
|
||||
if (otherNode->getLinkedData()
|
||||
&& !node->isIgnoringNodeWithID(otherNode->getUUID()) && !otherNode->isIgnoringNodeWithID(node->getUUID())) {
|
||||
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
||||
|
||||
// check to see if we're ignoring in radius
|
||||
bool insideIgnoreRadius = false;
|
||||
if (node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) {
|
||||
AudioMixerClientData* otherData = reinterpret_cast<AudioMixerClientData*>(otherNode->getLinkedData());
|
||||
AudioMixerClientData* nodeData = reinterpret_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
float ignoreRadius = glm::min(node->getIgnoreRadius(), otherNode->getIgnoreRadius());
|
||||
if (glm::distance(nodeData->getPosition(), otherData->getPosition()) < ignoreRadius) {
|
||||
insideIgnoreRadius = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!insideIgnoreRadius) {
|
||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||
auto streamsCopy = otherNodeClientData->getAudioStreams();
|
||||
for (auto& streamPair : streamsCopy) {
|
||||
auto otherNodeStream = streamPair.second;
|
||||
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
|
||||
addStreamToMixForListeningNodeWithStream(*listenerNodeData, *otherNodeStream, otherNode->getUUID(),
|
||||
*nodeAudioStream);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// use the per listner AudioLimiter to render the mixed data...
|
||||
listenerNodeData->audioLimiter.render(_mixedSamples, _clampedSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// check for silent audio after the peak limitor has converted the samples
|
||||
bool hasAudio = false;
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
|
||||
if (_clampedSamples[i] != 0) {
|
||||
hasAudio = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return hasAudio;
|
||||
}
|
||||
|
||||
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
|
||||
// Send stream properties
|
||||
bool hasReverb = false;
|
||||
float reverbTime, wetLevel;
|
||||
// find reverb properties
|
||||
for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
|
||||
AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition();
|
||||
AABox box = _audioZones[_zoneReverbSettings[i].zone];
|
||||
if (box.contains(streamPosition)) {
|
||||
hasReverb = true;
|
||||
reverbTime = _zoneReverbSettings[i].reverbTime;
|
||||
wetLevel = _zoneReverbSettings[i].wetLevel;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
AvatarAudioStream* stream = nodeData->getAvatarAudioStream();
|
||||
bool dataChanged = (stream->hasReverb() != hasReverb) ||
|
||||
(stream->hasReverb() && (stream->getRevebTime() != reverbTime ||
|
||||
stream->getWetLevel() != wetLevel));
|
||||
if (dataChanged) {
|
||||
// Update stream
|
||||
if (hasReverb) {
|
||||
stream->setReverb(reverbTime, wetLevel);
|
||||
} else {
|
||||
stream->clearReverb();
|
||||
}
|
||||
}
|
||||
|
||||
// Send at change or every so often
|
||||
float CHANCE_OF_SEND = 0.01f;
|
||||
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
|
||||
|
||||
if (sendData) {
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
unsigned char bitset = 0;
|
||||
|
||||
int packetSize = sizeof(bitset);
|
||||
|
||||
if (hasReverb) {
|
||||
packetSize += sizeof(reverbTime) + sizeof(wetLevel);
|
||||
}
|
||||
|
||||
auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
|
||||
|
||||
if (hasReverb) {
|
||||
setAtBit(bitset, HAS_REVERB_BIT);
|
||||
}
|
||||
|
||||
envPacket->writePrimitive(bitset);
|
||||
|
||||
if (hasReverb) {
|
||||
envPacket->writePrimitive(reverbTime);
|
||||
envPacket->writePrimitive(wetLevel);
|
||||
}
|
||||
nodeList->sendPacket(std::move(envPacket), *node);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixer::handleNodeAudioPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
|
||||
getOrCreateClientData(sendingNode.data());
|
||||
DependencyManager::get<NodeList>()->updateNodeWithDataFromPacket(message, sendingNode);
|
||||
|
@ -668,8 +245,8 @@ void AudioMixer::removeHRTFsForFinishedInjector(const QUuid& streamID) {
|
|||
}
|
||||
|
||||
QString AudioMixer::percentageForMixStats(int counter) {
|
||||
if (_totalMixes > 0) {
|
||||
float mixPercentage = (float(counter) / _totalMixes) * 100.0f;
|
||||
if (_stats.totalMixes > 0) {
|
||||
float mixPercentage = (float(counter) / _stats.totalMixes) * 100.0f;
|
||||
return QString::number(mixPercentage, 'f', 2);
|
||||
} else {
|
||||
return QString("0.0");
|
||||
|
@ -683,34 +260,57 @@ void AudioMixer::sendStatsPacket() {
|
|||
return;
|
||||
}
|
||||
|
||||
// general stats
|
||||
statsObject["useDynamicJitterBuffers"] = _numStaticJitterFrames == -1;
|
||||
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
|
||||
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
|
||||
|
||||
statsObject["avg_streams_per_frame"] = (float)_sumStreams / (float)_numStatFrames;
|
||||
statsObject["avg_listeners_per_frame"] = (float)_sumListeners / (float)_numStatFrames;
|
||||
statsObject["avg_streams_per_frame"] = (float)_stats.sumStreams / (float)_numStatFrames;
|
||||
statsObject["avg_listeners_per_frame"] = (float)_stats.sumListeners / (float)_numStatFrames;
|
||||
|
||||
// timing stats
|
||||
QJsonObject timingStats;
|
||||
uint64_t timing, trailing;
|
||||
|
||||
_sleepTiming.get(timing, trailing);
|
||||
timingStats["us_per_sleep"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_sleep_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
_frameTiming.get(timing, trailing);
|
||||
timingStats["us_per_frame"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_frame_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
_prepareTiming.get(timing, trailing);
|
||||
timingStats["us_per_prepare"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_prepare_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
_mixTiming.get(timing, trailing);
|
||||
timingStats["us_per_mix"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_mix_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
_eventsTiming.get(timing, trailing);
|
||||
timingStats["us_per_events"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_events_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
// call it "avg_..." to keep it higher in the display, sorted alphabetically
|
||||
statsObject["avg_timing_stats"] = timingStats;
|
||||
|
||||
// mix stats
|
||||
QJsonObject mixStats;
|
||||
mixStats["%_hrtf_mixes"] = percentageForMixStats(_hrtfRenders);
|
||||
mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_hrtfSilentRenders);
|
||||
mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_hrtfStruggleRenders);
|
||||
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_manualStereoMixes);
|
||||
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_manualEchoMixes);
|
||||
|
||||
mixStats["total_mixes"] = _totalMixes;
|
||||
mixStats["avg_mixes_per_block"] = _totalMixes / _numStatFrames;
|
||||
mixStats["%_hrtf_mixes"] = percentageForMixStats(_stats.hrtfRenders);
|
||||
mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_stats.hrtfSilentRenders);
|
||||
mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_stats.hrtfStruggleRenders);
|
||||
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_stats.manualStereoMixes);
|
||||
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_stats.manualEchoMixes);
|
||||
|
||||
mixStats["total_mixes"] = _stats.totalMixes;
|
||||
mixStats["avg_mixes_per_block"] = _stats.totalMixes / _numStatFrames;
|
||||
|
||||
statsObject["mix_stats"] = mixStats;
|
||||
|
||||
_sumStreams = 0;
|
||||
_sumListeners = 0;
|
||||
_hrtfRenders = 0;
|
||||
_hrtfSilentRenders = 0;
|
||||
_hrtfStruggleRenders = 0;
|
||||
_manualStereoMixes = 0;
|
||||
_manualEchoMixes = 0;
|
||||
_totalMixes = 0;
|
||||
_numStatFrames = 0;
|
||||
_stats.reset();
|
||||
|
||||
// add stats for each listerner
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
@ -744,7 +344,7 @@ void AudioMixer::run() {
|
|||
|
||||
// wait until we have the domain-server settings, otherwise we bail
|
||||
DomainHandler& domainHandler = DependencyManager::get<NodeList>()->getDomainHandler();
|
||||
connect(&domainHandler, &DomainHandler::settingsReceived, this, &AudioMixer::domainSettingsRequestComplete);
|
||||
connect(&domainHandler, &DomainHandler::settingsReceived, this, &AudioMixer::start);
|
||||
connect(&domainHandler, &DomainHandler::settingsReceiveFail, this, &AudioMixer::domainSettingsRequestFailed);
|
||||
|
||||
ThreadedAssignment::commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);
|
||||
|
@ -762,202 +362,165 @@ AudioMixerClientData* AudioMixer::getOrCreateClientData(Node* node) {
|
|||
return clientData;
|
||||
}
|
||||
|
||||
void AudioMixer::domainSettingsRequestComplete() {
|
||||
void AudioMixer::start() {
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
// prepare the NodeList
|
||||
nodeList->addNodeTypeToInterestSet(NodeType::Agent);
|
||||
|
||||
nodeList->linkedDataCreateCallback = [&](Node* node) { getOrCreateClientData(node); };
|
||||
|
||||
DomainHandler& domainHandler = nodeList->getDomainHandler();
|
||||
const QJsonObject& settingsObject = domainHandler.getSettingsObject();
|
||||
// parse out any AudioMixer settings
|
||||
{
|
||||
DomainHandler& domainHandler = nodeList->getDomainHandler();
|
||||
const QJsonObject& settingsObject = domainHandler.getSettingsObject();
|
||||
parseSettingsObject(settingsObject);
|
||||
}
|
||||
|
||||
// check the settings object to see if we have anything we can parse out
|
||||
parseSettingsObject(settingsObject);
|
||||
// manageLoad state
|
||||
auto frameTimestamp = p_high_resolution_clock::time_point::min();
|
||||
unsigned int framesSinceManagement = std::numeric_limits<int>::max();
|
||||
|
||||
// queue up a connection to start broadcasting mixes now that we're ready to go
|
||||
QMetaObject::invokeMethod(this, "broadcastMixes", Qt::QueuedConnection);
|
||||
}
|
||||
|
||||
void AudioMixer::broadcastMixes() {
|
||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
||||
|
||||
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
||||
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
||||
|
||||
const float RATIO_BACK_OFF = 0.02f;
|
||||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
auto nextFrameTimestamp = p_high_resolution_clock::now();
|
||||
auto timeToSleep = std::chrono::microseconds(0);
|
||||
|
||||
int currentFrame = 1;
|
||||
int numFramesPerSecond = (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
|
||||
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
|
||||
// mix state
|
||||
unsigned int frame = 1;
|
||||
|
||||
while (!_isFinished) {
|
||||
// manage mixer load
|
||||
{
|
||||
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) +
|
||||
// ratio of frame spent sleeping / total frame time
|
||||
((CURRENT_FRAME_RATIO * timeToSleep.count()) / (float) AudioConstants::NETWORK_FRAME_USECS);
|
||||
|
||||
bool hasRatioChanged = false;
|
||||
|
||||
if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
|
||||
if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
|
||||
qDebug() << "Mixer is struggling";
|
||||
// change our min required loudness to reduce some load
|
||||
_performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));
|
||||
hasRatioChanged = true;
|
||||
} else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
|
||||
qDebug() << "Mixer is recovering";
|
||||
// back off the required loudness
|
||||
_performanceThrottlingRatio = std::max(0.0f, _performanceThrottlingRatio - RATIO_BACK_OFF);
|
||||
hasRatioChanged = true;
|
||||
}
|
||||
|
||||
if (hasRatioChanged) {
|
||||
// set out min audability threshold from the new ratio
|
||||
_minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
|
||||
framesSinceCutoffEvent = 0;
|
||||
|
||||
qDebug() << "Sleeping" << _trailingSleepRatio << "of frame";
|
||||
qDebug() << "Cutoff is" << _performanceThrottlingRatio;
|
||||
qDebug() << "Minimum audibility to be mixed is" << _minAudibilityThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hasRatioChanged) {
|
||||
++framesSinceCutoffEvent;
|
||||
}
|
||||
auto timer = _sleepTiming.timer();
|
||||
manageLoad(frameTimestamp, framesSinceManagement);
|
||||
}
|
||||
|
||||
// mix
|
||||
nodeList->eachNode([&](const SharedNodePointer& node) {
|
||||
if (node->getLinkedData()) {
|
||||
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
||||
auto timer = _frameTiming.timer();
|
||||
|
||||
// this function will attempt to pop a frame from each audio stream.
|
||||
// a pointer to the popped data is stored as a member in InboundAudioStream.
|
||||
// That's how the popped audio data will be read for mixing (but only if the pop was successful)
|
||||
_sumStreams += nodeData->checkBuffersBeforeFrameSend();
|
||||
nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) {
|
||||
// prepare frames; pop off any new audio from their streams
|
||||
{
|
||||
auto timer = _prepareTiming.timer();
|
||||
std::for_each(cbegin, cend, [&](const SharedNodePointer& node) {
|
||||
_stats.sumStreams += prepareFrame(node, frame);
|
||||
});
|
||||
}
|
||||
|
||||
// if the stream should be muted, send mute packet
|
||||
if (nodeData->getAvatarAudioStream()
|
||||
&& (shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())
|
||||
|| nodeData->shouldMuteClient())) {
|
||||
auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
|
||||
nodeList->sendPacket(std::move(mutePacket), *node);
|
||||
|
||||
// probably now we just reset the flag, once should do it (?)
|
||||
nodeData->setShouldMuteClient(false);
|
||||
}
|
||||
|
||||
if (node->getType() == NodeType::Agent && node->getActiveSocket()
|
||||
&& nodeData->getAvatarAudioStream()) {
|
||||
|
||||
bool mixHasAudio = prepareMixForListeningNode(node.data());
|
||||
|
||||
std::unique_ptr<NLPacket> mixPacket;
|
||||
|
||||
if (mixHasAudio || nodeData->shouldFlushEncoder()) {
|
||||
|
||||
int mixPacketBytes = sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE
|
||||
+ AudioConstants::NETWORK_FRAME_BYTES_STEREO;
|
||||
mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);
|
||||
|
||||
// pack sequence number
|
||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||
mixPacket->writePrimitive(sequence);
|
||||
|
||||
// write the codec
|
||||
QString codecInPacket = nodeData->getCodecName();
|
||||
mixPacket->writeString(codecInPacket);
|
||||
|
||||
QByteArray encodedBuffer;
|
||||
if (mixHasAudio) {
|
||||
QByteArray decodedBuffer(reinterpret_cast<char*>(_clampedSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
||||
nodeData->encode(decodedBuffer, encodedBuffer);
|
||||
} else {
|
||||
// time to flush, which resets the shouldFlush until next time we encode something
|
||||
nodeData->encodeFrameOfZeros(encodedBuffer);
|
||||
}
|
||||
// pack mixed audio samples
|
||||
mixPacket->write(encodedBuffer.constData(), encodedBuffer.size());
|
||||
|
||||
} else {
|
||||
int silentPacketBytes = sizeof(quint16) + sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE;
|
||||
mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes);
|
||||
|
||||
// pack sequence number
|
||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||
mixPacket->writePrimitive(sequence);
|
||||
|
||||
// write the codec
|
||||
QString codecInPacket = nodeData->getCodecName();
|
||||
mixPacket->writeString(codecInPacket);
|
||||
|
||||
// pack number of silent audio samples
|
||||
quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
||||
mixPacket->writePrimitive(numSilentSamples);
|
||||
}
|
||||
|
||||
// Send audio environment
|
||||
sendAudioEnvironmentPacket(node);
|
||||
|
||||
// send mixed audio packet
|
||||
nodeList->sendPacket(std::move(mixPacket), *node);
|
||||
nodeData->incrementOutgoingMixedAudioSequenceNumber();
|
||||
|
||||
// send an audio stream stats packet to the client approximately every second
|
||||
++currentFrame;
|
||||
currentFrame %= numFramesPerSecond;
|
||||
|
||||
if (nodeData->shouldSendStats(currentFrame)) {
|
||||
nodeData->sendAudioStreamStatsPackets(node);
|
||||
}
|
||||
|
||||
++_sumListeners;
|
||||
}
|
||||
// mix across slave threads
|
||||
{
|
||||
auto timer = _mixTiming.timer();
|
||||
_slavePool.mix(cbegin, cend, frame);
|
||||
}
|
||||
});
|
||||
|
||||
// gather stats
|
||||
_slavePool.each([&](AudioMixerSlave& slave) {
|
||||
_stats.accumulate(slave.stats);
|
||||
slave.stats.reset();
|
||||
});
|
||||
|
||||
++frame;
|
||||
++_numStatFrames;
|
||||
|
||||
// play nice with qt event-looping
|
||||
{
|
||||
// since we're a while loop we need to help qt's event processing
|
||||
auto timer = _eventsTiming.timer();
|
||||
|
||||
// since we're a while loop we need to yield to qt's event processing
|
||||
QCoreApplication::processEvents();
|
||||
|
||||
if (_isFinished) {
|
||||
// alert qt that this is finished
|
||||
// alert qt eventing that this is finished
|
||||
QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sleep until the next frame, if necessary
|
||||
{
|
||||
nextFrameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);
|
||||
void AudioMixer::manageLoad(p_high_resolution_clock::time_point& frameTimestamp, unsigned int& framesSinceCutoffEvent) {
|
||||
auto timeToSleep = std::chrono::microseconds(0);
|
||||
|
||||
auto now = p_high_resolution_clock::now();
|
||||
timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(nextFrameTimestamp - now);
|
||||
// sleep until the next frame, if necessary
|
||||
{
|
||||
// advance the next frame
|
||||
frameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);
|
||||
auto now = p_high_resolution_clock::now();
|
||||
|
||||
if (timeToSleep.count() < 0) {
|
||||
nextFrameTimestamp = now;
|
||||
timeToSleep = std::chrono::microseconds(0);
|
||||
// calculate sleep
|
||||
if (frameTimestamp < now) {
|
||||
frameTimestamp = now;
|
||||
} else {
|
||||
timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(frameTimestamp - now);
|
||||
std::this_thread::sleep_for(timeToSleep);
|
||||
}
|
||||
}
|
||||
|
||||
// manage mixer load
|
||||
{
|
||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
||||
|
||||
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
||||
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
||||
|
||||
const float RATIO_BACK_OFF = 0.02f;
|
||||
|
||||
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) +
|
||||
// ratio of frame spent sleeping / total frame time
|
||||
((CURRENT_FRAME_RATIO * timeToSleep.count()) / (float) AudioConstants::NETWORK_FRAME_USECS);
|
||||
|
||||
bool hasRatioChanged = false;
|
||||
|
||||
if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
|
||||
if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
|
||||
qDebug() << "Mixer is struggling";
|
||||
// change our min required loudness to reduce some load
|
||||
_performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));
|
||||
hasRatioChanged = true;
|
||||
} else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
|
||||
qDebug() << "Mixer is recovering";
|
||||
// back off the required loudness
|
||||
_performanceThrottlingRatio = std::max(0.0f, _performanceThrottlingRatio - RATIO_BACK_OFF);
|
||||
hasRatioChanged = true;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(timeToSleep);
|
||||
if (hasRatioChanged) {
|
||||
// set out min audability threshold from the new ratio
|
||||
_minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
|
||||
framesSinceCutoffEvent = 0;
|
||||
|
||||
qDebug() << "Sleeping" << _trailingSleepRatio << "of frame";
|
||||
qDebug() << "Cutoff is" << _performanceThrottlingRatio;
|
||||
qDebug() << "Minimum audibility to be mixed is" << _minAudibilityThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hasRatioChanged) {
|
||||
++framesSinceCutoffEvent;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int AudioMixer::prepareFrame(const SharedNodePointer& node, unsigned int frame) {
|
||||
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
|
||||
if (data == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return data->checkBuffersBeforeFrameSend();
|
||||
}
|
||||
|
||||
void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
||||
if (settingsObject.contains(AUDIO_THREADING_GROUP_KEY)) {
|
||||
QJsonObject audioThreadingGroupObject = settingsObject[AUDIO_THREADING_GROUP_KEY].toObject();
|
||||
const QString AUTO_THREADS = "auto_threads";
|
||||
bool autoThreads = audioThreadingGroupObject[AUTO_THREADS].toBool();
|
||||
if (!autoThreads) {
|
||||
bool ok;
|
||||
const QString NUM_THREADS = "num_threads";
|
||||
int numThreads = audioThreadingGroupObject[NUM_THREADS].toString().toInt(&ok);
|
||||
if (ok) {
|
||||
_slavePool.setNumThreads(numThreads);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (settingsObject.contains(AUDIO_BUFFER_GROUP_KEY)) {
|
||||
QJsonObject audioBufferGroupObject = settingsObject[AUDIO_BUFFER_GROUP_KEY].toObject();
|
||||
|
||||
|
@ -1051,14 +614,6 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
}
|
||||
}
|
||||
|
||||
const QString FILTER_KEY = "enable_filter";
|
||||
if (audioEnvGroupObject[FILTER_KEY].isBool()) {
|
||||
_enableFilter = audioEnvGroupObject[FILTER_KEY].toBool();
|
||||
}
|
||||
if (_enableFilter) {
|
||||
qDebug() << "Filter enabled";
|
||||
}
|
||||
|
||||
const QString AUDIO_ZONES = "zones";
|
||||
if (audioEnvGroupObject[AUDIO_ZONES].isObject()) {
|
||||
const QJsonObject& zones = audioEnvGroupObject[AUDIO_ZONES].toObject();
|
||||
|
@ -1116,7 +671,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
coefficientObject.contains(LISTENER) &&
|
||||
coefficientObject.contains(COEFFICIENT)) {
|
||||
|
||||
ZonesSettings settings;
|
||||
ZoneSettings settings;
|
||||
|
||||
bool ok;
|
||||
settings.source = coefficientObject.value(SOURCE).toString();
|
||||
|
@ -1126,7 +681,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
if (ok && settings.coefficient >= 0.0f && settings.coefficient <= 1.0f &&
|
||||
_audioZones.contains(settings.source) && _audioZones.contains(settings.listener)) {
|
||||
|
||||
_zonesSettings.push_back(settings);
|
||||
_zoneSettings.push_back(settings);
|
||||
qDebug() << "Added Coefficient:" << settings.source << settings.listener << settings.coefficient;
|
||||
}
|
||||
}
|
||||
|
@ -1159,6 +714,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
settings.wetLevel = wetLevel;
|
||||
|
||||
_zoneReverbSettings.push_back(settings);
|
||||
|
||||
qDebug() << "Added Reverb:" << zone << reverbTime << wetLevel;
|
||||
}
|
||||
}
|
||||
|
@ -1166,3 +722,28 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
AudioMixer::Timer::Timing::Timing(uint64_t& sum) : _sum(sum) {
|
||||
_timing = p_high_resolution_clock::now();
|
||||
}
|
||||
|
||||
AudioMixer::Timer::Timing::~Timing() {
|
||||
_sum += std::chrono::duration_cast<std::chrono::microseconds>(p_high_resolution_clock::now() - _timing).count();
|
||||
}
|
||||
|
||||
void AudioMixer::Timer::get(uint64_t& timing, uint64_t& trailing) {
|
||||
// update history
|
||||
_index = (_index + 1) % TIMER_TRAILING_SECONDS;
|
||||
uint64_t oldTiming = _history[_index];
|
||||
_history[_index] = _sum;
|
||||
|
||||
// update trailing
|
||||
_trailing -= oldTiming;
|
||||
_trailing += _sum;
|
||||
|
||||
timing = _sum;
|
||||
trailing = _trailing / TIMER_TRAILING_SECONDS;
|
||||
|
||||
// reset _sum;
|
||||
_sum = 0;
|
||||
}
|
||||
|
|
|
@ -18,31 +18,45 @@
|
|||
#include <ThreadedAssignment.h>
|
||||
#include <UUIDHasher.h>
|
||||
|
||||
#include "AudioMixerStats.h"
|
||||
#include "AudioMixerSlavePool.h"
|
||||
|
||||
class PositionalAudioStream;
|
||||
class AvatarAudioStream;
|
||||
class AudioHRTF;
|
||||
class AudioMixerClientData;
|
||||
|
||||
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
||||
|
||||
const int READ_DATAGRAMS_STATS_WINDOW_SECONDS = 30;
|
||||
|
||||
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
|
||||
class AudioMixer : public ThreadedAssignment {
|
||||
Q_OBJECT
|
||||
public:
|
||||
AudioMixer(ReceivedMessage& message);
|
||||
|
||||
public slots:
|
||||
/// threaded run of assignment
|
||||
void run() override;
|
||||
|
||||
void sendStatsPacket() override;
|
||||
struct ZoneSettings {
|
||||
QString source;
|
||||
QString listener;
|
||||
float coefficient;
|
||||
};
|
||||
struct ReverbSettings {
|
||||
QString zone;
|
||||
float reverbTime;
|
||||
float wetLevel;
|
||||
};
|
||||
|
||||
static int getStaticJitterFrames() { return _numStaticJitterFrames; }
|
||||
static bool shouldMute(float quietestFrame) { return quietestFrame > _noiseMutingThreshold; }
|
||||
static float getAttenuationPerDoublingInDistance() { return _attenuationPerDoublingInDistance; }
|
||||
static float getMinimumAudibilityThreshold() { return _performanceThrottlingRatio > 0.0f ? _minAudibilityThreshold : 0.0f; }
|
||||
static const QHash<QString, AABox>& getAudioZones() { return _audioZones; }
|
||||
static const QVector<ZoneSettings>& getZoneSettings() { return _zoneSettings; }
|
||||
static const QVector<ReverbSettings>& getReverbSettings() { return _zoneReverbSettings; }
|
||||
|
||||
public slots:
|
||||
void run() override;
|
||||
void sendStatsPacket() override;
|
||||
|
||||
private slots:
|
||||
void broadcastMixes();
|
||||
// packet handlers
|
||||
void handleNodeAudioPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
|
||||
void handleMuteEnvironmentPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
|
||||
void handleNegotiateAudioFormat(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
|
||||
|
@ -52,74 +66,66 @@ private slots:
|
|||
void handleKillAvatarPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
|
||||
void handleNodeMuteRequestPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
|
||||
|
||||
void start();
|
||||
void removeHRTFsForFinishedInjector(const QUuid& streamID);
|
||||
|
||||
private:
|
||||
// mixing helpers
|
||||
// check and maybe throttle mixer load by changing audibility threshold
|
||||
void manageLoad(p_high_resolution_clock::time_point& frameTimestamp, unsigned int& framesSinceManagement);
|
||||
// pop a frame from any streams on the node
|
||||
// returns the number of available streams
|
||||
int prepareFrame(const SharedNodePointer& node, unsigned int frame);
|
||||
|
||||
AudioMixerClientData* getOrCreateClientData(Node* node);
|
||||
void domainSettingsRequestComplete();
|
||||
|
||||
/// adds one stream to the mix for a listening node
|
||||
void addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
|
||||
const PositionalAudioStream& streamToAdd,
|
||||
const QUuid& sourceNodeID,
|
||||
const AvatarAudioStream& listeningNodeStream);
|
||||
|
||||
float gainForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
|
||||
const glm::vec3& relativePosition, bool isEcho);
|
||||
float azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
|
||||
const glm::vec3& relativePosition);
|
||||
|
||||
/// prepares and sends a mix to one Node
|
||||
bool prepareMixForListeningNode(Node* node);
|
||||
|
||||
/// Send Audio Environment packet for a single node
|
||||
void sendAudioEnvironmentPacket(SharedNodePointer node);
|
||||
|
||||
void perSecondActions();
|
||||
|
||||
QString percentageForMixStats(int counter);
|
||||
|
||||
bool shouldMute(float quietestFrame);
|
||||
|
||||
void parseSettingsObject(const QJsonObject& settingsObject);
|
||||
|
||||
float _trailingSleepRatio;
|
||||
float _minAudibilityThreshold;
|
||||
float _performanceThrottlingRatio;
|
||||
float _attenuationPerDoublingInDistance;
|
||||
float _noiseMutingThreshold;
|
||||
int _numStatFrames { 0 };
|
||||
int _sumStreams { 0 };
|
||||
int _sumListeners { 0 };
|
||||
int _hrtfRenders { 0 };
|
||||
int _hrtfSilentRenders { 0 };
|
||||
int _hrtfStruggleRenders { 0 };
|
||||
int _manualStereoMixes { 0 };
|
||||
int _manualEchoMixes { 0 };
|
||||
int _totalMixes { 0 };
|
||||
AudioMixerStats _stats;
|
||||
|
||||
QString _codecPreferenceOrder;
|
||||
|
||||
float _mixedSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
int16_t _clampedSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
AudioMixerSlavePool _slavePool;
|
||||
|
||||
QHash<QString, AABox> _audioZones;
|
||||
struct ZonesSettings {
|
||||
QString source;
|
||||
QString listener;
|
||||
float coefficient;
|
||||
class Timer {
|
||||
public:
|
||||
class Timing{
|
||||
public:
|
||||
Timing(uint64_t& sum);
|
||||
~Timing();
|
||||
private:
|
||||
p_high_resolution_clock::time_point _timing;
|
||||
uint64_t& _sum;
|
||||
};
|
||||
|
||||
Timing timer() { return Timing(_sum); }
|
||||
void get(uint64_t& timing, uint64_t& trailing);
|
||||
private:
|
||||
static const int TIMER_TRAILING_SECONDS = 10;
|
||||
|
||||
uint64_t _sum { 0 };
|
||||
uint64_t _trailing { 0 };
|
||||
uint64_t _history[TIMER_TRAILING_SECONDS] {};
|
||||
int _index { 0 };
|
||||
};
|
||||
QVector<ZonesSettings> _zonesSettings;
|
||||
struct ReverbSettings {
|
||||
QString zone;
|
||||
float reverbTime;
|
||||
float wetLevel;
|
||||
};
|
||||
QVector<ReverbSettings> _zoneReverbSettings;
|
||||
Timer _sleepTiming;
|
||||
Timer _frameTiming;
|
||||
Timer _prepareTiming;
|
||||
Timer _mixTiming;
|
||||
Timer _eventsTiming;
|
||||
|
||||
static int _numStaticJitterFrames; // -1 denotes dynamic jitter buffering
|
||||
|
||||
static bool _enableFilter;
|
||||
static float _noiseMutingThreshold;
|
||||
static float _attenuationPerDoublingInDistance;
|
||||
static float _trailingSleepRatio;
|
||||
static float _performanceThrottlingRatio;
|
||||
static float _minAudibilityThreshold;
|
||||
static QHash<QString, AABox> _audioZones;
|
||||
static QVector<ZoneSettings> _zoneSettings;
|
||||
static QVector<ReverbSettings> _zoneReverbSettings;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixer_h
|
||||
|
|
|
@ -90,6 +90,8 @@ public:
|
|||
bool shouldMuteClient() { return _shouldMuteClient; }
|
||||
void setShouldMuteClient(bool shouldMuteClient) { _shouldMuteClient = shouldMuteClient; }
|
||||
glm::vec3 getPosition() { return getAvatarAudioStream() ? getAvatarAudioStream()->getPosition() : glm::vec3(0); }
|
||||
glm::vec3 getAvatarBoundingBoxCorner() { return getAvatarAudioStream() ? getAvatarAudioStream()->getAvatarBoundingBoxCorner() : glm::vec3(0); }
|
||||
glm::vec3 getAvatarBoundingBoxScale() { return getAvatarAudioStream() ? getAvatarAudioStream()->getAvatarBoundingBoxScale() : glm::vec3(0); }
|
||||
|
||||
signals:
|
||||
void injectorStreamFinished(const QUuid& streamIdentifier);
|
||||
|
|
545
assignment-client/src/audio/AudioMixerSlave.cpp
Normal file
545
assignment-client/src/audio/AudioMixerSlave.cpp
Normal file
|
@ -0,0 +1,545 @@
|
|||
//
|
||||
// AudioMixerSlave.cpp
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/22/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtx/norm.hpp>
|
||||
#include <glm/gtx/vector_angle.hpp>
|
||||
|
||||
#include <LogHandler.h>
|
||||
#include <NetworkAccessManager.h>
|
||||
#include <NodeList.h>
|
||||
#include <Node.h>
|
||||
#include <OctreeConstants.h>
|
||||
#include <plugins/PluginManager.h>
|
||||
#include <plugins/CodecPlugin.h>
|
||||
#include <udt/PacketHeaders.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <StDev.h>
|
||||
#include <UUID.h>
|
||||
|
||||
#include "AudioRingBuffer.h"
|
||||
#include "AudioMixer.h"
|
||||
#include "AudioMixerClientData.h"
|
||||
#include "AvatarAudioStream.h"
|
||||
#include "InjectedAudioStream.h"
|
||||
|
||||
#include "AudioMixerSlave.h"
|
||||
|
||||
std::unique_ptr<NLPacket> createAudioPacket(PacketType type, int size, quint16 sequence, QString codec) {
|
||||
auto audioPacket = NLPacket::create(type, size);
|
||||
audioPacket->writePrimitive(sequence);
|
||||
audioPacket->writeString(codec);
|
||||
return audioPacket;
|
||||
}
|
||||
|
||||
void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QByteArray& buffer) {
|
||||
static const int MIX_PACKET_SIZE =
|
||||
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
|
||||
quint16 sequence = data.getOutgoingSequenceNumber();
|
||||
QString codec = data.getCodecName();
|
||||
auto mixPacket = createAudioPacket(PacketType::MixedAudio, MIX_PACKET_SIZE, sequence, codec);
|
||||
|
||||
// pack samples
|
||||
mixPacket->write(buffer.constData(), buffer.size());
|
||||
|
||||
// send packet
|
||||
DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
|
||||
data.incrementOutgoingMixedAudioSequenceNumber();
|
||||
}
|
||||
|
||||
void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
|
||||
static const int SILENT_PACKET_SIZE =
|
||||
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + sizeof(quint16);
|
||||
quint16 sequence = data.getOutgoingSequenceNumber();
|
||||
QString codec = data.getCodecName();
|
||||
auto mixPacket = createAudioPacket(PacketType::SilentAudioFrame, SILENT_PACKET_SIZE, sequence, codec);
|
||||
|
||||
// pack number of samples
|
||||
mixPacket->writePrimitive(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);
|
||||
|
||||
// send packet
|
||||
DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
|
||||
data.incrementOutgoingMixedAudioSequenceNumber();
|
||||
}
|
||||
|
||||
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
|
||||
bool hasReverb = false;
|
||||
float reverbTime, wetLevel;
|
||||
|
||||
auto& reverbSettings = AudioMixer::getReverbSettings();
|
||||
auto& audioZones = AudioMixer::getAudioZones();
|
||||
|
||||
AvatarAudioStream* stream = data.getAvatarAudioStream();
|
||||
glm::vec3 streamPosition = stream->getPosition();
|
||||
|
||||
// find reverb properties
|
||||
for (int i = 0; i < reverbSettings.size(); ++i) {
|
||||
AABox box = audioZones[reverbSettings[i].zone];
|
||||
if (box.contains(streamPosition)) {
|
||||
hasReverb = true;
|
||||
reverbTime = reverbSettings[i].reverbTime;
|
||||
wetLevel = reverbSettings[i].wetLevel;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// check if data changed
|
||||
bool dataChanged = (stream->hasReverb() != hasReverb) ||
|
||||
(stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel));
|
||||
if (dataChanged) {
|
||||
// update stream
|
||||
if (hasReverb) {
|
||||
stream->setReverb(reverbTime, wetLevel);
|
||||
} else {
|
||||
stream->clearReverb();
|
||||
}
|
||||
}
|
||||
|
||||
// send packet at change or every so often
|
||||
float CHANCE_OF_SEND = 0.01f;
|
||||
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
|
||||
|
||||
if (sendData) {
|
||||
// size the packet
|
||||
unsigned char bitset = 0;
|
||||
int packetSize = sizeof(bitset);
|
||||
if (hasReverb) {
|
||||
packetSize += sizeof(reverbTime) + sizeof(wetLevel);
|
||||
}
|
||||
|
||||
// write the packet
|
||||
auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
|
||||
if (hasReverb) {
|
||||
setAtBit(bitset, HAS_REVERB_BIT);
|
||||
}
|
||||
envPacket->writePrimitive(bitset);
|
||||
if (hasReverb) {
|
||||
envPacket->writePrimitive(reverbTime);
|
||||
envPacket->writePrimitive(wetLevel);
|
||||
}
|
||||
|
||||
// send the packet
|
||||
DependencyManager::get<NodeList>()->sendPacket(std::move(envPacket), *node);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerSlave::configure(ConstIter begin, ConstIter end, unsigned int frame) {
|
||||
_begin = begin;
|
||||
_end = end;
|
||||
_frame = frame;
|
||||
}
|
||||
|
||||
void AudioMixerSlave::mix(const SharedNodePointer& node) {
|
||||
// check that the node is valid
|
||||
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
|
||||
if (data == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto avatarStream = data->getAvatarAudioStream();
|
||||
if (avatarStream == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// send mute packet, if necessary
|
||||
if (AudioMixer::shouldMute(avatarStream->getQuietestFrameLoudness()) || data->shouldMuteClient()) {
|
||||
auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
|
||||
DependencyManager::get<NodeList>()->sendPacket(std::move(mutePacket), *node);
|
||||
|
||||
// probably now we just reset the flag, once should do it (?)
|
||||
data->setShouldMuteClient(false);
|
||||
}
|
||||
|
||||
// send audio packets, if necessary
|
||||
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
|
||||
++stats.sumListeners;
|
||||
|
||||
// mix the audio
|
||||
bool mixHasAudio = prepareMix(node);
|
||||
|
||||
// send audio packet
|
||||
if (mixHasAudio || data->shouldFlushEncoder()) {
|
||||
// encode the audio
|
||||
QByteArray encodedBuffer;
|
||||
if (mixHasAudio) {
|
||||
QByteArray decodedBuffer(reinterpret_cast<char*>(_bufferSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
||||
data->encode(decodedBuffer, encodedBuffer);
|
||||
} else {
|
||||
// time to flush, which resets the shouldFlush until next time we encode something
|
||||
data->encodeFrameOfZeros(encodedBuffer);
|
||||
}
|
||||
|
||||
sendMixPacket(node, *data, encodedBuffer);
|
||||
} else {
|
||||
sendSilentPacket(node, *data);
|
||||
}
|
||||
|
||||
// send environment packet
|
||||
sendEnvironmentPacket(node, *data);
|
||||
|
||||
// send stats packet (about every second)
|
||||
static const unsigned int NUM_FRAMES_PER_SEC = (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
|
||||
if (data->shouldSendStats(_frame % NUM_FRAMES_PER_SEC)) {
|
||||
data->sendAudioStreamStatsPackets(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioMixerSlave::prepareMix(const SharedNodePointer& node) {
|
||||
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_mixSamples, 0, sizeof(_mixSamples));
|
||||
|
||||
// loop through all other nodes that have sufficient audio to mix
|
||||
std::for_each(_begin, _end, [&](const SharedNodePointer& otherNode){
|
||||
// make sure that we have audio data for this other node
|
||||
// and that it isn't being ignored by our listening node
|
||||
// and that it isn't ignoring our listening node
|
||||
AudioMixerClientData* otherData = static_cast<AudioMixerClientData*>(otherNode->getLinkedData());
|
||||
if (otherData
|
||||
&& !node->isIgnoringNodeWithID(otherNode->getUUID()) && !otherNode->isIgnoringNodeWithID(node->getUUID())) {
|
||||
|
||||
// check to see if we're ignoring in radius
|
||||
bool insideIgnoreRadius = false;
|
||||
// If the otherNode equals the node, we're doing a comparison on ourselves
|
||||
if (*otherNode == *node) {
|
||||
// We'll always be inside the radius in that case.
|
||||
insideIgnoreRadius = true;
|
||||
// Check to see if the space bubble is enabled
|
||||
} else if ((node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) && (*otherNode != *node)) {
|
||||
// Define the minimum bubble size
|
||||
static const glm::vec3 minBubbleSize = glm::vec3(0.3f, 1.3f, 0.3f);
|
||||
AudioMixerClientData* nodeData = reinterpret_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
// Set up the bounding box for the current node
|
||||
AABox nodeBox(nodeData->getAvatarBoundingBoxCorner(), nodeData->getAvatarBoundingBoxScale());
|
||||
// Clamp the size of the bounding box to a minimum scale
|
||||
if (glm::any(glm::lessThan(nodeData->getAvatarBoundingBoxScale(), minBubbleSize))) {
|
||||
nodeBox.setScaleStayCentered(minBubbleSize);
|
||||
}
|
||||
// Set up the bounding box for the current other node
|
||||
AABox otherNodeBox(otherData->getAvatarBoundingBoxCorner(), otherData->getAvatarBoundingBoxScale());
|
||||
// Clamp the size of the bounding box to a minimum scale
|
||||
if (glm::any(glm::lessThan(otherData->getAvatarBoundingBoxScale(), minBubbleSize))) {
|
||||
otherNodeBox.setScaleStayCentered(minBubbleSize);
|
||||
}
|
||||
// Quadruple the scale of both bounding boxes
|
||||
nodeBox.embiggen(4.0f);
|
||||
otherNodeBox.embiggen(4.0f);
|
||||
|
||||
// Perform the collision check between the two bounding boxes
|
||||
if (nodeBox.touches(otherNodeBox)) {
|
||||
insideIgnoreRadius = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Enumerate the audio streams attached to the otherNode
|
||||
auto streamsCopy = otherData->getAudioStreams();
|
||||
for (auto& streamPair : streamsCopy) {
|
||||
auto otherNodeStream = streamPair.second;
|
||||
bool isSelfWithEcho = ((*otherNode == *node) && (otherNodeStream->shouldLoopbackForNode()));
|
||||
// Add all audio streams that should be added to the mix
|
||||
if (isSelfWithEcho || (!isSelfWithEcho && !insideIgnoreRadius)) {
|
||||
addStreamToMix(*nodeData, otherNode->getUUID(), *nodeAudioStream, *otherNodeStream);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// use the per listener AudioLimiter to render the mixed data...
|
||||
nodeData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// check for silent audio after the peak limiter has converted the samples
|
||||
bool hasAudio = false;
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
|
||||
if (_bufferSamples[i] != 0) {
|
||||
hasAudio = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return hasAudio;
|
||||
}
|
||||
|
||||
void AudioMixerSlave::addStreamToMix(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID,
|
||||
const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
|
||||
// to reduce artifacts we calculate the gain and azimuth for every source for this listener
|
||||
// even if we are not going to end up mixing in this source
|
||||
|
||||
++stats.totalMixes;
|
||||
|
||||
// this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct
|
||||
|
||||
// check if this is a server echo of a source back to itself
|
||||
bool isEcho = (&streamToAdd == &listeningNodeStream);
|
||||
|
||||
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
||||
|
||||
// figure out the distance between source and listener
|
||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||
|
||||
// figure out the gain for this source at the listener
|
||||
float gain = gainForSource(listeningNodeStream, streamToAdd, relativePosition, isEcho);
|
||||
|
||||
// figure out the azimuth to this source at the listener
|
||||
float azimuth = isEcho ? 0.0f : azimuthForSource(listeningNodeStream, listeningNodeStream, relativePosition);
|
||||
|
||||
float repeatedFrameFadeFactor = 1.0f;
|
||||
|
||||
static const int HRTF_DATASET_INDEX = 1;
|
||||
|
||||
if (!streamToAdd.lastPopSucceeded()) {
|
||||
bool forceSilentBlock = true;
|
||||
|
||||
if (!streamToAdd.getLastPopOutput().isNull()) {
|
||||
bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);
|
||||
|
||||
// in an injector, just go silent - the injector has likely ended
|
||||
// in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
|
||||
|
||||
// we'll repeat the last block until it has a block to mix
|
||||
// and we'll gradually fade that repeated block into silence.
|
||||
|
||||
// calculate its fade factor, which depends on how many times it's already been repeated.
|
||||
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
|
||||
if (!isInjector && repeatedFrameFadeFactor > 0.0f) {
|
||||
// apply the repeatedFrameFadeFactor to the gain
|
||||
gain *= repeatedFrameFadeFactor;
|
||||
|
||||
forceSilentBlock = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (forceSilentBlock) {
|
||||
// we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
|
||||
// in this case we will call renderSilent with a forced silent block
|
||||
// this ensures the correct tail from the previously mixed block and the correct spatialization of first block
|
||||
// of any upcoming audio
|
||||
|
||||
if (!streamToAdd.isStereo() && !isEcho) {
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
// this is not done for stereo streams since they do not go through the HRTF
|
||||
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
|
||||
hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfSilentRenders;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// grab the stream from the ring buffer
|
||||
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();
|
||||
|
||||
if (streamToAdd.isStereo() || isEcho) {
|
||||
// this is a stereo source or server echo so we do not pass it through the HRTF
|
||||
// simply apply our calculated gain to each sample
|
||||
if (streamToAdd.isStereo()) {
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
|
||||
_mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
++stats.manualStereoMixes;
|
||||
} else {
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
|
||||
auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
|
||||
_mixSamples[i] += monoSample;
|
||||
_mixSamples[i + 1] += monoSample;
|
||||
}
|
||||
|
||||
++stats.manualEchoMixes;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// if the frame we're about to mix is silent, simply call render silent and move on
|
||||
if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
|
||||
// silent frame from source
|
||||
|
||||
// we still need to call renderSilent via the HRTF for mono source
|
||||
hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfSilentRenders;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
float audibilityThreshold = AudioMixer::getMinimumAudibilityThreshold();
|
||||
if (audibilityThreshold > 0.0f &&
|
||||
streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= audibilityThreshold) {
|
||||
// the mixer is struggling so we're going to drop off some streams
|
||||
|
||||
// we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
|
||||
hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfStruggleRenders;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
++stats.hrtfRenders;
|
||||
|
||||
// mono stream, call the HRTF with our block and calculated azimuth and gain
|
||||
hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
}
|
||||
|
||||
const int IEEE754_MANT_BITS = 23;
|
||||
const int IEEE754_EXPN_BIAS = 127;
|
||||
|
||||
//
|
||||
// for x > 0.0f, returns log2(x)
|
||||
// for x <= 0.0f, returns large negative value
|
||||
//
|
||||
// abs |error| < 8e-3, smooth (exact for x=2^N) for NPOLY=3
|
||||
// abs |error| < 2e-4, smooth (exact for x=2^N) for NPOLY=5
|
||||
// rel |error| < 0.4 from precision loss very close to 1.0f
|
||||
//
|
||||
static inline float fastlog2(float x) {
|
||||
|
||||
union { float f; int32_t i; } mant, bits = { x };
|
||||
|
||||
// split into mantissa and exponent
|
||||
mant.i = (bits.i & ((1 << IEEE754_MANT_BITS) - 1)) | (IEEE754_EXPN_BIAS << IEEE754_MANT_BITS);
|
||||
int32_t expn = (bits.i >> IEEE754_MANT_BITS) - IEEE754_EXPN_BIAS;
|
||||
|
||||
mant.f -= 1.0f;
|
||||
|
||||
// polynomial for log2(1+x) over x=[0,1]
|
||||
//x = (-0.346555386f * mant.f + 1.346555386f) * mant.f;
|
||||
x = (((-0.0821307180f * mant.f + 0.321188984f) * mant.f - 0.677784014f) * mant.f + 1.43872575f) * mant.f;
|
||||
|
||||
return x + expn;
|
||||
}
|
||||
|
||||
//
|
||||
// for -126 <= x < 128, returns exp2(x)
|
||||
//
|
||||
// rel |error| < 3e-3, smooth (exact for x=N) for NPOLY=3
|
||||
// rel |error| < 9e-6, smooth (exact for x=N) for NPOLY=5
|
||||
//
|
||||
static inline float fastexp2(float x) {
|
||||
|
||||
union { float f; int32_t i; } xi;
|
||||
|
||||
// bias such that x > 0
|
||||
x += IEEE754_EXPN_BIAS;
|
||||
//x = MAX(x, 1.0f);
|
||||
//x = MIN(x, 254.9999f);
|
||||
|
||||
// split into integer and fraction
|
||||
xi.i = (int32_t)x;
|
||||
x -= xi.i;
|
||||
|
||||
// construct exp2(xi) as a float
|
||||
xi.i <<= IEEE754_MANT_BITS;
|
||||
|
||||
// polynomial for exp2(x) over x=[0,1]
|
||||
//x = (0.339766028f * x + 0.660233972f) * x + 1.0f;
|
||||
x = (((0.0135557472f * x + 0.0520323690f) * x + 0.241379763f) * x + 0.693032121f) * x + 1.0f;
|
||||
|
||||
return x * xi.f;
|
||||
}
|
||||
|
||||
float AudioMixerSlave::gainForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition, bool isEcho) {
|
||||
float gain = 1.0f;
|
||||
|
||||
float distanceBetween = glm::length(relativePosition);
|
||||
|
||||
if (distanceBetween < EPSILON) {
|
||||
distanceBetween = EPSILON;
|
||||
}
|
||||
|
||||
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
|
||||
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
|
||||
}
|
||||
|
||||
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
|
||||
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
|
||||
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
|
||||
|
||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedListenerPosition));
|
||||
|
||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||
|
||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
|
||||
|
||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||
gain *= offAxisCoefficient;
|
||||
}
|
||||
|
||||
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
|
||||
auto& zoneSettings = AudioMixer::getZoneSettings();
|
||||
auto& audioZones = AudioMixer::getAudioZones();
|
||||
for (int i = 0; i < zoneSettings.length(); ++i) {
|
||||
if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
|
||||
audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
|
||||
attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
||||
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
|
||||
|
||||
// translate the zone setting to gain per log2(distance)
|
||||
float g = 1.0f - attenuationPerDoublingInDistance;
|
||||
g = (g < EPSILON) ? EPSILON : g;
|
||||
g = (g > 1.0f) ? 1.0f : g;
|
||||
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
float distanceCoefficient = fastexp2(fastlog2(g) * fastlog2(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));
|
||||
|
||||
// multiply the current attenuation coefficient by the distance coefficient
|
||||
gain *= distanceCoefficient;
|
||||
}
|
||||
|
||||
return gain;
|
||||
}
|
||||
|
||||
float AudioMixerSlave::azimuthForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition) {
|
||||
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
|
||||
|
||||
// Compute sample delay for the two ears to create phase panning
|
||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||
|
||||
// project the rotated source position vector onto the XZ plane
|
||||
rotatedSourcePosition.y = 0.0f;
|
||||
|
||||
static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;
|
||||
|
||||
if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) {
|
||||
// produce an oriented angle about the y-axis
|
||||
return glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedSourcePosition), glm::vec3(0.0f, -1.0f, 0.0f));
|
||||
} else {
|
||||
// there is no distance between listener and source - return no azimuth
|
||||
return 0;
|
||||
}
|
||||
}
|
63
assignment-client/src/audio/AudioMixerSlave.h
Normal file
63
assignment-client/src/audio/AudioMixerSlave.h
Normal file
|
@ -0,0 +1,63 @@
|
|||
//
|
||||
// AudioMixerSlave.h
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/22/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioMixerSlave_h
|
||||
#define hifi_AudioMixerSlave_h
|
||||
|
||||
#include <AABox.h>
|
||||
#include <AudioHRTF.h>
|
||||
#include <AudioRingBuffer.h>
|
||||
#include <ThreadedAssignment.h>
|
||||
#include <UUIDHasher.h>
|
||||
#include <NodeList.h>
|
||||
|
||||
#include "AudioMixerStats.h"
|
||||
|
||||
class PositionalAudioStream;
|
||||
class AvatarAudioStream;
|
||||
class AudioHRTF;
|
||||
class AudioMixerClientData;
|
||||
|
||||
class AudioMixerSlave {
|
||||
public:
|
||||
using ConstIter = NodeList::const_iterator;
|
||||
|
||||
void configure(ConstIter begin, ConstIter end, unsigned int frame);
|
||||
|
||||
// mix and broadcast non-ignored streams to the node
|
||||
// returns true if a mixed packet was sent to the node
|
||||
void mix(const SharedNodePointer& node);
|
||||
|
||||
AudioMixerStats stats;
|
||||
|
||||
private:
|
||||
// create mix, returns true if mix has audio
|
||||
bool prepareMix(const SharedNodePointer& node);
|
||||
// add a stream to the mix
|
||||
void addStreamToMix(AudioMixerClientData& listenerData, const QUuid& streamerID,
|
||||
const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer);
|
||||
|
||||
float gainForSource(const AvatarAudioStream& listener, const PositionalAudioStream& streamer,
|
||||
const glm::vec3& relativePosition, bool isEcho);
|
||||
float azimuthForSource(const AvatarAudioStream& listener, const PositionalAudioStream& streamer,
|
||||
const glm::vec3& relativePosition);
|
||||
|
||||
// mixing buffers
|
||||
float _mixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
int16_t _bufferSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
|
||||
// frame state
|
||||
ConstIter _begin;
|
||||
ConstIter _end;
|
||||
unsigned int _frame { 0 };
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerSlave_h
|
187
assignment-client/src/audio/AudioMixerSlavePool.cpp
Normal file
187
assignment-client/src/audio/AudioMixerSlavePool.cpp
Normal file
|
@ -0,0 +1,187 @@
|
|||
//
|
||||
// AudioMixerSlavePool.cpp
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/16/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <assert.h>
|
||||
#include <algorithm>
|
||||
|
||||
#include "AudioMixerSlavePool.h"
|
||||
|
||||
void AudioMixerSlaveThread::run() {
|
||||
while (true) {
|
||||
wait();
|
||||
|
||||
// iterate over all available nodes
|
||||
SharedNodePointer node;
|
||||
while (try_pop(node)) {
|
||||
mix(node);
|
||||
}
|
||||
|
||||
bool stopping = _stop;
|
||||
notify(stopping);
|
||||
if (stopping) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerSlaveThread::wait() {
|
||||
{
|
||||
Lock lock(_pool._mutex);
|
||||
_pool._slaveCondition.wait(lock, [&] {
|
||||
assert(_pool._numStarted <= _pool._numThreads);
|
||||
return _pool._numStarted != _pool._numThreads;
|
||||
});
|
||||
++_pool._numStarted;
|
||||
}
|
||||
configure(_pool._begin, _pool._end, _pool._frame);
|
||||
}
|
||||
|
||||
void AudioMixerSlaveThread::notify(bool stopping) {
|
||||
{
|
||||
Lock lock(_pool._mutex);
|
||||
assert(_pool._numFinished < _pool._numThreads);
|
||||
++_pool._numFinished;
|
||||
if (stopping) {
|
||||
++_pool._numStopped;
|
||||
}
|
||||
}
|
||||
_pool._poolCondition.notify_one();
|
||||
}
|
||||
|
||||
bool AudioMixerSlaveThread::try_pop(SharedNodePointer& node) {
|
||||
return _pool._queue.try_pop(node);
|
||||
}
|
||||
|
||||
#ifdef AUDIO_SINGLE_THREADED
|
||||
static AudioMixerSlave slave;
|
||||
#endif
|
||||
|
||||
void AudioMixerSlavePool::mix(ConstIter begin, ConstIter end, unsigned int frame) {
|
||||
_begin = begin;
|
||||
_end = end;
|
||||
_frame = frame;
|
||||
|
||||
#ifdef AUDIO_SINGLE_THREADED
|
||||
slave.configure(_begin, _end, frame);
|
||||
std::for_each(begin, end, [&](const SharedNodePointer& node) {
|
||||
slave.mix(node);
|
||||
});
|
||||
#else
|
||||
// fill the queue
|
||||
std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
|
||||
_queue.emplace(node);
|
||||
});
|
||||
|
||||
{
|
||||
Lock lock(_mutex);
|
||||
|
||||
// mix
|
||||
_numStarted = _numFinished = 0;
|
||||
_slaveCondition.notify_all();
|
||||
|
||||
// wait
|
||||
_poolCondition.wait(lock, [&] {
|
||||
assert(_numFinished <= _numThreads);
|
||||
return _numFinished == _numThreads;
|
||||
});
|
||||
|
||||
assert(_numStarted == _numThreads);
|
||||
}
|
||||
|
||||
assert(_queue.empty());
|
||||
#endif
|
||||
}
|
||||
|
||||
void AudioMixerSlavePool::each(std::function<void(AudioMixerSlave& slave)> functor) {
|
||||
#ifdef AUDIO_SINGLE_THREADED
|
||||
functor(slave);
|
||||
#else
|
||||
for (auto& slave : _slaves) {
|
||||
functor(*slave.get());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void AudioMixerSlavePool::setNumThreads(int numThreads) {
|
||||
// clamp to allowed size
|
||||
{
|
||||
int maxThreads = QThread::idealThreadCount();
|
||||
if (maxThreads == -1) {
|
||||
// idealThreadCount returns -1 if cores cannot be detected
|
||||
static const int MAX_THREADS_IF_UNKNOWN = 4;
|
||||
maxThreads = MAX_THREADS_IF_UNKNOWN;
|
||||
}
|
||||
|
||||
int clampedThreads = std::min(std::max(1, numThreads), maxThreads);
|
||||
if (clampedThreads != numThreads) {
|
||||
qWarning("%s: clamped to %d (was %d)", __FUNCTION__, clampedThreads, numThreads);
|
||||
numThreads = clampedThreads;
|
||||
}
|
||||
}
|
||||
|
||||
resize(numThreads);
|
||||
}
|
||||
|
||||
void AudioMixerSlavePool::resize(int numThreads) {
|
||||
assert(_numThreads == _slaves.size());
|
||||
|
||||
#ifdef AUDIO_SINGLE_THREADED
|
||||
qDebug("%s: running single threaded", __FUNCTION__, numThreads);
|
||||
#else
|
||||
qDebug("%s: set %d threads (was %d)", __FUNCTION__, numThreads, _numThreads);
|
||||
|
||||
Lock lock(_mutex);
|
||||
|
||||
if (numThreads > _numThreads) {
|
||||
// start new slaves
|
||||
for (int i = 0; i < numThreads - _numThreads; ++i) {
|
||||
auto slave = new AudioMixerSlaveThread(*this);
|
||||
slave->start();
|
||||
_slaves.emplace_back(slave);
|
||||
}
|
||||
} else if (numThreads < _numThreads) {
|
||||
auto extraBegin = _slaves.begin() + numThreads;
|
||||
|
||||
// mark slaves to stop...
|
||||
auto slave = extraBegin;
|
||||
while (slave != _slaves.end()) {
|
||||
(*slave)->_stop = true;
|
||||
++slave;
|
||||
}
|
||||
|
||||
// ...cycle them until they do stop...
|
||||
_numStopped = 0;
|
||||
while (_numStopped != (_numThreads - numThreads)) {
|
||||
_numStarted = _numFinished = _numStopped;
|
||||
_slaveCondition.notify_all();
|
||||
_poolCondition.wait(lock, [&] {
|
||||
assert(_numFinished <= _numThreads);
|
||||
return _numFinished == _numThreads;
|
||||
});
|
||||
}
|
||||
|
||||
// ...wait for threads to finish...
|
||||
slave = extraBegin;
|
||||
while (slave != _slaves.end()) {
|
||||
QThread* thread = reinterpret_cast<QThread*>(slave->get());
|
||||
static const int MAX_THREAD_WAIT_TIME = 10;
|
||||
thread->wait(MAX_THREAD_WAIT_TIME);
|
||||
++slave;
|
||||
}
|
||||
|
||||
// ...and erase them
|
||||
_slaves.erase(extraBegin, _slaves.end());
|
||||
}
|
||||
|
||||
_numThreads = _numStarted = _numFinished = numThreads;
|
||||
assert(_numThreads == _slaves.size());
|
||||
#endif
|
||||
}
|
97
assignment-client/src/audio/AudioMixerSlavePool.h
Normal file
97
assignment-client/src/audio/AudioMixerSlavePool.h
Normal file
|
@ -0,0 +1,97 @@
|
|||
//
|
||||
// AudioMixerSlavePool.h
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/16/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioMixerSlavePool_h
|
||||
#define hifi_AudioMixerSlavePool_h
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include <tbb/concurrent_queue.h>
|
||||
|
||||
#include <QThread>
|
||||
|
||||
#include "AudioMixerSlave.h"
|
||||
|
||||
class AudioMixerSlavePool;
|
||||
|
||||
class AudioMixerSlaveThread : public QThread, public AudioMixerSlave {
|
||||
Q_OBJECT
|
||||
using ConstIter = NodeList::const_iterator;
|
||||
using Mutex = std::mutex;
|
||||
using Lock = std::unique_lock<Mutex>;
|
||||
|
||||
public:
|
||||
AudioMixerSlaveThread(AudioMixerSlavePool& pool) : _pool(pool) {}
|
||||
|
||||
void run() override final;
|
||||
|
||||
private:
|
||||
friend class AudioMixerSlavePool;
|
||||
|
||||
void wait();
|
||||
void notify(bool stopping);
|
||||
bool try_pop(SharedNodePointer& node);
|
||||
|
||||
AudioMixerSlavePool& _pool;
|
||||
bool _stop { false };
|
||||
};
|
||||
|
||||
// Slave pool for audio mixers
|
||||
// AudioMixerSlavePool is not thread-safe! It should be instantiated and used from a single thread.
|
||||
class AudioMixerSlavePool {
|
||||
using Queue = tbb::concurrent_queue<SharedNodePointer>;
|
||||
using Mutex = std::mutex;
|
||||
using Lock = std::unique_lock<Mutex>;
|
||||
using ConditionVariable = std::condition_variable;
|
||||
|
||||
public:
|
||||
using ConstIter = NodeList::const_iterator;
|
||||
|
||||
AudioMixerSlavePool(int numThreads = QThread::idealThreadCount()) { setNumThreads(numThreads); }
|
||||
~AudioMixerSlavePool() { resize(0); }
|
||||
|
||||
// mix on slave threads
|
||||
void mix(ConstIter begin, ConstIter end, unsigned int frame);
|
||||
|
||||
// iterate over all slaves
|
||||
void each(std::function<void(AudioMixerSlave& slave)> functor);
|
||||
|
||||
void setNumThreads(int numThreads);
|
||||
int numThreads() { return _numThreads; }
|
||||
|
||||
private:
|
||||
void resize(int numThreads);
|
||||
|
||||
std::vector<std::unique_ptr<AudioMixerSlaveThread>> _slaves;
|
||||
|
||||
friend void AudioMixerSlaveThread::wait();
|
||||
friend void AudioMixerSlaveThread::notify(bool stopping);
|
||||
friend bool AudioMixerSlaveThread::try_pop(SharedNodePointer& node);
|
||||
|
||||
// synchronization state
|
||||
Mutex _mutex;
|
||||
ConditionVariable _slaveCondition;
|
||||
ConditionVariable _poolCondition;
|
||||
int _numThreads { 0 };
|
||||
int _numStarted { 0 }; // guarded by _mutex
|
||||
int _numFinished { 0 }; // guarded by _mutex
|
||||
int _numStopped { 0 }; // guarded by _mutex
|
||||
|
||||
// frame state
|
||||
Queue _queue;
|
||||
unsigned int _frame { 0 };
|
||||
ConstIter _begin;
|
||||
ConstIter _end;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerSlavePool_h
|
34
assignment-client/src/audio/AudioMixerStats.cpp
Normal file
34
assignment-client/src/audio/AudioMixerStats.cpp
Normal file
|
@ -0,0 +1,34 @@
|
|||
//
|
||||
// AudioMixerStats.cpp
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/22/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "AudioMixerStats.h"
|
||||
|
||||
void AudioMixerStats::reset() {
|
||||
sumStreams = 0;
|
||||
sumListeners = 0;
|
||||
totalMixes = 0;
|
||||
hrtfRenders = 0;
|
||||
hrtfSilentRenders = 0;
|
||||
hrtfStruggleRenders = 0;
|
||||
manualStereoMixes = 0;
|
||||
manualEchoMixes = 0;
|
||||
}
|
||||
|
||||
void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) {
|
||||
sumStreams += otherStats.sumStreams;
|
||||
sumListeners += otherStats.sumListeners;
|
||||
totalMixes += otherStats.totalMixes;
|
||||
hrtfRenders += otherStats.hrtfRenders;
|
||||
hrtfSilentRenders += otherStats.hrtfSilentRenders;
|
||||
hrtfStruggleRenders += otherStats.hrtfStruggleRenders;
|
||||
manualStereoMixes += otherStats.manualStereoMixes;
|
||||
manualEchoMixes += otherStats.manualEchoMixes;
|
||||
}
|
32
assignment-client/src/audio/AudioMixerStats.h
Normal file
32
assignment-client/src/audio/AudioMixerStats.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
//
|
||||
// AudioMixerStats.h
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/22/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioMixerStats_h
|
||||
#define hifi_AudioMixerStats_h
|
||||
|
||||
struct AudioMixerStats {
|
||||
int sumStreams { 0 };
|
||||
int sumListeners { 0 };
|
||||
|
||||
int totalMixes { 0 };
|
||||
|
||||
int hrtfRenders { 0 };
|
||||
int hrtfSilentRenders { 0 };
|
||||
int hrtfStruggleRenders { 0 };
|
||||
|
||||
int manualStereoMixes { 0 };
|
||||
int manualEchoMixes { 0 };
|
||||
|
||||
void reset();
|
||||
void accumulate(const AudioMixerStats& otherStats);
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerStats_h
|
|
@ -19,6 +19,7 @@
|
|||
#include <QtCore/QTimer>
|
||||
#include <QtCore/QThread>
|
||||
|
||||
#include <AABox.h>
|
||||
#include <LogHandler.h>
|
||||
#include <NodeList.h>
|
||||
#include <udt/PacketHeaders.h>
|
||||
|
@ -240,18 +241,39 @@ void AvatarMixer::broadcastAvatarData() {
|
|||
} else {
|
||||
AvatarMixerClientData* otherData = reinterpret_cast<AvatarMixerClientData*>(otherNode->getLinkedData());
|
||||
AvatarMixerClientData* nodeData = reinterpret_cast<AvatarMixerClientData*>(node->getLinkedData());
|
||||
// check to see if we're ignoring in radius
|
||||
// Check to see if the space bubble is enabled
|
||||
if (node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) {
|
||||
float ignoreRadius = glm::min(node->getIgnoreRadius(), otherNode->getIgnoreRadius());
|
||||
if (glm::distance(nodeData->getPosition(), otherData->getPosition()) < ignoreRadius) {
|
||||
// Define the minimum bubble size
|
||||
static const glm::vec3 minBubbleSize = glm::vec3(0.3f, 1.3f, 0.3f);
|
||||
// Define the scale of the box for the current node
|
||||
glm::vec3 nodeBoxScale = (nodeData->getPosition() - nodeData->getGlobalBoundingBoxCorner()) * 2.0f;
|
||||
// Define the scale of the box for the current other node
|
||||
glm::vec3 otherNodeBoxScale = (otherData->getPosition() - otherData->getGlobalBoundingBoxCorner()) * 2.0f;
|
||||
|
||||
// Set up the bounding box for the current node
|
||||
AABox nodeBox(nodeData->getGlobalBoundingBoxCorner(), nodeBoxScale);
|
||||
// Clamp the size of the bounding box to a minimum scale
|
||||
if (glm::any(glm::lessThan(nodeBoxScale, minBubbleSize))) {
|
||||
nodeBox.setScaleStayCentered(minBubbleSize);
|
||||
}
|
||||
// Set up the bounding box for the current other node
|
||||
AABox otherNodeBox(otherData->getGlobalBoundingBoxCorner(), otherNodeBoxScale);
|
||||
// Clamp the size of the bounding box to a minimum scale
|
||||
if (glm::any(glm::lessThan(otherNodeBoxScale, minBubbleSize))) {
|
||||
otherNodeBox.setScaleStayCentered(minBubbleSize);
|
||||
}
|
||||
// Quadruple the scale of both bounding boxes
|
||||
nodeBox.embiggen(4.0f);
|
||||
otherNodeBox.embiggen(4.0f);
|
||||
|
||||
// Perform the collision check between the two bounding boxes
|
||||
if (nodeBox.touches(otherNodeBox)) {
|
||||
nodeData->ignoreOther(node, otherNode);
|
||||
otherData->ignoreOther(otherNode, node);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// not close enough to ignore
|
||||
// Not close enough to ignore
|
||||
nodeData->removeFromRadiusIgnoringSet(otherNode->getUUID());
|
||||
otherData->removeFromRadiusIgnoringSet(node->getUUID());
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
@ -395,8 +417,9 @@ void AvatarMixer::nodeKilled(SharedNodePointer killedNode) {
|
|||
|
||||
// this was an avatar we were sending to other people
|
||||
// send a kill packet for it to our other nodes
|
||||
auto killPacket = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID);
|
||||
auto killPacket = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID + sizeof(KillAvatarReason));
|
||||
killPacket->write(killedNode->getUUID().toRfc4122());
|
||||
killPacket->writePrimitive(KillAvatarReason::AvatarDisconnected);
|
||||
|
||||
nodeList->broadcastToNodes(std::move(killPacket), NodeSet() << NodeType::Agent);
|
||||
|
||||
|
|
|
@ -45,8 +45,13 @@ uint16_t AvatarMixerClientData::getLastBroadcastSequenceNumber(const QUuid& node
|
|||
void AvatarMixerClientData::ignoreOther(SharedNodePointer self, SharedNodePointer other) {
|
||||
if (!isRadiusIgnoring(other->getUUID())) {
|
||||
addToRadiusIgnoringSet(other->getUUID());
|
||||
auto killPacket = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID);
|
||||
auto killPacket = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID + sizeof(KillAvatarReason));
|
||||
killPacket->write(other->getUUID().toRfc4122());
|
||||
if (self->isIgnoreRadiusEnabled()) {
|
||||
killPacket->writePrimitive(KillAvatarReason::TheirAvatarEnteredYourBubble);
|
||||
} else {
|
||||
killPacket->writePrimitive(KillAvatarReason::YourAvatarEnteredTheirBubble);
|
||||
}
|
||||
DependencyManager::get<NodeList>()->sendUnreliablePacket(*killPacket, *self);
|
||||
_hasReceivedFirstPacketsFrom.erase(other->getUUID());
|
||||
}
|
||||
|
|
|
@ -81,6 +81,7 @@ public:
|
|||
void loadJSONStats(QJsonObject& jsonObject) const;
|
||||
|
||||
glm::vec3 getPosition() { return _avatar ? _avatar->getPosition() : glm::vec3(0); }
|
||||
glm::vec3 getGlobalBoundingBoxCorner() { return _avatar ? _avatar->getGlobalBoundingBoxCorner() : glm::vec3(0); }
|
||||
bool isRadiusIgnoring(const QUuid& other) { return _radiusIgnoredOthers.find(other) != _radiusIgnoredOthers.end(); }
|
||||
void addToRadiusIgnoringSet(const QUuid& other) { _radiusIgnoredOthers.insert(other); }
|
||||
void removeFromRadiusIgnoringSet(const QUuid& other) { _radiusIgnoredOthers.erase(other); }
|
||||
|
|
|
@ -43,26 +43,24 @@ macro(PACKAGE_LIBRARIES_FOR_DEPLOYMENT)
|
|||
)
|
||||
|
||||
set(QTAUDIO_PATH $<TARGET_FILE_DIR:${TARGET_NAME}>/audio)
|
||||
set(QTAUDIO_WIN7_PATH $<TARGET_FILE_DIR:${TARGET_NAME}>/audioWin7/audio)
|
||||
set(QTAUDIO_WIN8_PATH $<TARGET_FILE_DIR:${TARGET_NAME}>/audioWin8/audio)
|
||||
|
||||
if (DEPLOY_PACKAGE)
|
||||
# copy qtaudio_wasapi.dll alongside qtaudio_windows.dll, and let the installer resolve
|
||||
add_custom_command(
|
||||
TARGET ${TARGET_NAME}
|
||||
POST_BUILD
|
||||
COMMAND if exist ${QTAUDIO_PATH}/qtaudio_windows.dll ( ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapi.dll ${QTAUDIO_PATH} && ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapi.pdb ${QTAUDIO_PATH} )
|
||||
COMMAND if exist ${QTAUDIO_PATH}/qtaudio_windowsd.dll ( ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapid.dll ${QTAUDIO_PATH} && ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapid.pdb ${QTAUDIO_PATH} )
|
||||
)
|
||||
elseif (${CMAKE_SYSTEM_VERSION} VERSION_LESS 6.2)
|
||||
# continue using qtaudio_windows.dll on Windows 7
|
||||
else ()
|
||||
# replace qtaudio_windows.dll with qtaudio_wasapi.dll on Windows 8/8.1/10
|
||||
add_custom_command(
|
||||
TARGET ${TARGET_NAME}
|
||||
POST_BUILD
|
||||
COMMAND if exist ${QTAUDIO_PATH}/qtaudio_windows.dll ( ${CMAKE_COMMAND} -E remove ${QTAUDIO_PATH}/qtaudio_windows.dll && ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapi.dll ${QTAUDIO_PATH} && ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapi.pdb ${QTAUDIO_PATH} )
|
||||
COMMAND if exist ${QTAUDIO_PATH}/qtaudio_windowsd.dll ( ${CMAKE_COMMAND} -E remove ${QTAUDIO_PATH}/qtaudio_windowsd.dll && ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapid.dll ${QTAUDIO_PATH} && ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapid.pdb ${QTAUDIO_PATH} )
|
||||
)
|
||||
endif ()
|
||||
# copy qtaudio_wasapi.dll and qtaudio_windows.dll in the correct directories for runtime selection
|
||||
add_custom_command(
|
||||
TARGET ${TARGET_NAME}
|
||||
POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${QTAUDIO_WIN7_PATH}
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${QTAUDIO_WIN8_PATH}
|
||||
# copy release DLLs
|
||||
COMMAND if exist ${QTAUDIO_PATH}/qtaudio_windows.dll ( ${CMAKE_COMMAND} -E copy ${QTAUDIO_PATH}/qtaudio_windows.dll ${QTAUDIO_WIN7_PATH} )
|
||||
COMMAND if exist ${QTAUDIO_PATH}/qtaudio_windows.dll ( ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapi.dll ${QTAUDIO_WIN8_PATH} )
|
||||
# copy debug DLLs
|
||||
COMMAND if exist ${QTAUDIO_PATH}/qtaudio_windowsd.dll ( ${CMAKE_COMMAND} -E copy ${QTAUDIO_PATH}/qtaudio_windowsd.dll ${QTAUDIO_WIN7_PATH} )
|
||||
COMMAND if exist ${QTAUDIO_PATH}/qtaudio_windowsd.dll ( ${CMAKE_COMMAND} -E copy ${WASAPI_DLL_PATH}/qtaudio_wasapid.dll ${QTAUDIO_WIN8_PATH} )
|
||||
# remove directory
|
||||
COMMAND ${CMAKE_COMMAND} -E remove_directory ${QTAUDIO_PATH}
|
||||
)
|
||||
|
||||
endif ()
|
||||
endmacro()
|
||||
|
|
|
@ -630,17 +630,6 @@ Section "-Core installation"
|
|||
Delete "$INSTDIR\version"
|
||||
Delete "$INSTDIR\xinput1_3.dll"
|
||||
|
||||
; The installer includes two different Qt audio plugins.
|
||||
; On Windows 8 and above, only qtaudio_wasapi.dll should be installed.
|
||||
; On Windows 7 and below, only qtaudio_windows.dll should be installed.
|
||||
${If} ${AtLeastWin8}
|
||||
Delete "$INSTDIR\audio\qtaudio_windows.dll"
|
||||
Delete "$INSTDIR\audio\qtaudio_windows.pdb"
|
||||
${Else}
|
||||
Delete "$INSTDIR\audio\qtaudio_wasapi.dll"
|
||||
Delete "$INSTDIR\audio\qtaudio_wasapi.pdb"
|
||||
${EndIf}
|
||||
|
||||
; Delete old desktop shortcuts before they were renamed during Sandbox rename
|
||||
Delete "$DESKTOP\@PRE_SANDBOX_INTERFACE_SHORTCUT_NAME@.lnk"
|
||||
Delete "$DESKTOP\@PRE_SANDBOX_CONSOLE_SHORTCUT_NAME@.lnk"
|
||||
|
|
|
@ -978,6 +978,29 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "audio_threading",
|
||||
"label": "Audio Threading",
|
||||
"assignment-types": [0],
|
||||
"settings": [
|
||||
{
|
||||
"name": "auto_threads",
|
||||
"label": "Automatically determine thread count",
|
||||
"type": "checkbox",
|
||||
"help": "Allow system to determine number of threads (recommended)",
|
||||
"default": false,
|
||||
"advanced": true
|
||||
},
|
||||
{
|
||||
"name": "num_threads",
|
||||
"label": "Number of Threads",
|
||||
"help": "Threads to spin up for audio mixing (if not automatically set)",
|
||||
"placeholder": "1",
|
||||
"default": "1",
|
||||
"advanced": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "audio_env",
|
||||
"label": "Audio Environment",
|
||||
|
|
|
@ -44,8 +44,8 @@
|
|||
{ "from": "GamePad.RB", "to": "Standard.RB" },
|
||||
{ "from": "GamePad.RS", "to": "Standard.RS" },
|
||||
|
||||
{ "from": "GamePad.Start", "to": "Actions.CycleCamera" },
|
||||
{ "from": "GamePad.Back", "to": "Standard.Start" },
|
||||
{ "from": "GamePad.Start", "to": "Standard.Start" },
|
||||
{ "from": "GamePad.Back", "to": "Actions.CycleCamera" },
|
||||
|
||||
{ "from": "GamePad.DU", "to": "Standard.DU" },
|
||||
{ "from": "GamePad.DD", "to": "Standard.DD" },
|
||||
|
|
|
@ -40,7 +40,6 @@ ModalWindow {
|
|||
|
||||
Loader {
|
||||
id: bodyLoader
|
||||
anchors.fill: parent
|
||||
source: loginDialog.isSteamRunning() ? "LoginDialog/SignInBody.qml" : "LoginDialog/LinkAccountBody.qml"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@ import "../styles-uit"
|
|||
Item {
|
||||
id: completeProfileBody
|
||||
clip: true
|
||||
width: pane.width
|
||||
height: pane.height
|
||||
width: root.pane.width
|
||||
height: root.pane.height
|
||||
|
||||
QtObject {
|
||||
id: d
|
||||
|
@ -33,8 +33,8 @@ Item {
|
|||
termsContainer.contentWidth))
|
||||
var targetHeight = 5 * hifi.dimensions.contentSpacing.y + buttons.height + additionalTextContainer.height + termsContainer.height
|
||||
|
||||
root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth))
|
||||
root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
parent.width = root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth))
|
||||
parent.height = root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,11 +19,13 @@ import "../styles-uit"
|
|||
Item {
|
||||
id: linkAccountBody
|
||||
clip: true
|
||||
width: root.pane.width
|
||||
height: root.pane.height
|
||||
width: root.pane.width
|
||||
property bool failAfterSignUp: false
|
||||
|
||||
function login() {
|
||||
mainTextContainer.visible = false
|
||||
toggleLoading(true)
|
||||
loginDialog.login(usernameField.text, passwordField.text)
|
||||
}
|
||||
|
||||
|
@ -51,12 +53,40 @@ Item {
|
|||
targetHeight += hifi.dimensions.contentSpacing.y + additionalInformation.height
|
||||
}
|
||||
|
||||
root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth));
|
||||
root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
parent.width = root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth));
|
||||
parent.height = root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
+ (keyboardEnabled && keyboardRaised ? (200 + 2 * hifi.dimensions.contentSpacing.y) : hifi.dimensions.contentSpacing.y);
|
||||
}
|
||||
}
|
||||
|
||||
function toggleLoading(isLoading) {
|
||||
linkAccountSpinner.visible = isLoading
|
||||
form.visible = !isLoading
|
||||
|
||||
if (loginDialog.isSteamRunning()) {
|
||||
additionalInformation.visible = !isLoading
|
||||
}
|
||||
|
||||
leftButton.visible = !isLoading
|
||||
buttons.visible = !isLoading
|
||||
}
|
||||
|
||||
BusyIndicator {
|
||||
id: linkAccountSpinner
|
||||
|
||||
anchors {
|
||||
top: parent.top
|
||||
horizontalCenter: parent.horizontalCenter
|
||||
topMargin: hifi.dimensions.contentSpacing.y
|
||||
}
|
||||
|
||||
visible: false
|
||||
running: true
|
||||
|
||||
width: 48
|
||||
height: 48
|
||||
}
|
||||
|
||||
ShortcutText {
|
||||
id: mainTextContainer
|
||||
anchors {
|
||||
|
@ -96,7 +126,7 @@ Item {
|
|||
}
|
||||
width: 350
|
||||
|
||||
label: "User Name or Email"
|
||||
label: "Username or Email"
|
||||
}
|
||||
|
||||
ShortcutText {
|
||||
|
@ -108,6 +138,7 @@ Item {
|
|||
|
||||
verticalAlignment: Text.AlignVCenter
|
||||
horizontalAlignment: Text.AlignHCenter
|
||||
linkColor: hifi.colors.blueAccent
|
||||
|
||||
onLinkActivated: loginDialog.openUrl(link)
|
||||
}
|
||||
|
@ -135,6 +166,7 @@ Item {
|
|||
|
||||
verticalAlignment: Text.AlignVCenter
|
||||
horizontalAlignment: Text.AlignHCenter
|
||||
linkColor: hifi.colors.blueAccent
|
||||
|
||||
onLinkActivated: loginDialog.openUrl(link)
|
||||
}
|
||||
|
@ -173,6 +205,31 @@ Item {
|
|||
}
|
||||
}
|
||||
|
||||
Row {
|
||||
id: leftButton
|
||||
anchors {
|
||||
left: parent.left
|
||||
bottom: parent.bottom
|
||||
bottomMargin: hifi.dimensions.contentSpacing.y
|
||||
}
|
||||
|
||||
spacing: hifi.dimensions.contentSpacing.x
|
||||
onHeightChanged: d.resize(); onWidthChanged: d.resize();
|
||||
|
||||
Button {
|
||||
anchors.verticalCenter: parent.verticalCenter
|
||||
|
||||
text: qsTr("Sign Up")
|
||||
visible: !loginDialog.isSteamRunning()
|
||||
|
||||
onClicked: {
|
||||
bodyLoader.setSource("SignUpBody.qml")
|
||||
bodyLoader.item.width = root.pane.width
|
||||
bodyLoader.item.height = root.pane.height
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Row {
|
||||
id: buttons
|
||||
anchors {
|
||||
|
@ -209,6 +266,11 @@ Item {
|
|||
keyboardEnabled = HMD.active;
|
||||
d.resize();
|
||||
|
||||
if (failAfterSignUp) {
|
||||
mainTextContainer.text = "Account created successfully."
|
||||
mainTextContainer.visible = true
|
||||
}
|
||||
|
||||
usernameField.forceActiveFocus();
|
||||
}
|
||||
|
||||
|
@ -228,6 +290,7 @@ Item {
|
|||
onHandleLoginFailed: {
|
||||
console.log("Login Failed")
|
||||
mainTextContainer.visible = true
|
||||
toggleLoading(false)
|
||||
}
|
||||
onHandleLinkCompleted: {
|
||||
console.log("Link Succeeded")
|
||||
|
@ -238,7 +301,7 @@ Item {
|
|||
}
|
||||
onHandleLinkFailed: {
|
||||
console.log("Link Failed")
|
||||
|
||||
toggleLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,8 +18,8 @@ import "../styles-uit"
|
|||
Item {
|
||||
id: signInBody
|
||||
clip: true
|
||||
width: pane.width
|
||||
height: pane.height
|
||||
width: root.pane.width
|
||||
height: root.pane.height
|
||||
|
||||
property bool required: false
|
||||
|
||||
|
@ -43,8 +43,8 @@ Item {
|
|||
var targetWidth = Math.max(titleWidth, mainTextContainer.contentWidth)
|
||||
var targetHeight = mainTextContainer.height + 3 * hifi.dimensions.contentSpacing.y + buttons.height
|
||||
|
||||
root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth))
|
||||
root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
parent.width = root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth))
|
||||
parent.height = root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
296
interface/resources/qml/LoginDialog/SignUpBody.qml
Normal file
296
interface/resources/qml/LoginDialog/SignUpBody.qml
Normal file
|
@ -0,0 +1,296 @@
|
|||
//
|
||||
// SignUpBody.qml
|
||||
//
|
||||
// Created by Stephen Birarda on 7 Dec 2016
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
import Hifi 1.0
|
||||
import QtQuick 2.4
|
||||
import QtQuick.Controls 1.4
|
||||
import QtQuick.Controls.Styles 1.4 as OriginalStyles
|
||||
|
||||
import "../controls-uit"
|
||||
import "../styles-uit"
|
||||
|
||||
Item {
|
||||
id: signupBody
|
||||
clip: true
|
||||
height: root.pane.height
|
||||
width: root.pane.width
|
||||
|
||||
function signup() {
|
||||
mainTextContainer.visible = false
|
||||
toggleLoading(true)
|
||||
loginDialog.signup(emailField.text, usernameField.text, passwordField.text)
|
||||
}
|
||||
|
||||
property bool keyboardEnabled: false
|
||||
property bool keyboardRaised: false
|
||||
property bool punctuationMode: false
|
||||
|
||||
onKeyboardRaisedChanged: d.resize();
|
||||
|
||||
QtObject {
|
||||
id: d
|
||||
readonly property int minWidth: 480
|
||||
readonly property int maxWidth: 1280
|
||||
readonly property int minHeight: 120
|
||||
readonly property int maxHeight: 720
|
||||
|
||||
function resize() {
|
||||
var targetWidth = Math.max(titleWidth, form.contentWidth);
|
||||
var targetHeight = hifi.dimensions.contentSpacing.y + mainTextContainer.height +
|
||||
4 * hifi.dimensions.contentSpacing.y + form.height +
|
||||
hifi.dimensions.contentSpacing.y + buttons.height;
|
||||
|
||||
parent.width = root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth));
|
||||
parent.height = root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
+ (keyboardEnabled && keyboardRaised ? (200 + 2 * hifi.dimensions.contentSpacing.y) : 0);
|
||||
}
|
||||
}
|
||||
|
||||
function toggleLoading(isLoading) {
|
||||
linkAccountSpinner.visible = isLoading
|
||||
form.visible = !isLoading
|
||||
|
||||
leftButton.visible = !isLoading
|
||||
buttons.visible = !isLoading
|
||||
}
|
||||
|
||||
BusyIndicator {
|
||||
id: linkAccountSpinner
|
||||
|
||||
anchors {
|
||||
top: parent.top
|
||||
horizontalCenter: parent.horizontalCenter
|
||||
topMargin: hifi.dimensions.contentSpacing.y
|
||||
}
|
||||
|
||||
visible: false
|
||||
running: true
|
||||
|
||||
width: 48
|
||||
height: 48
|
||||
}
|
||||
|
||||
ShortcutText {
|
||||
id: mainTextContainer
|
||||
anchors {
|
||||
top: parent.top
|
||||
left: parent.left
|
||||
margins: 0
|
||||
topMargin: hifi.dimensions.contentSpacing.y
|
||||
}
|
||||
|
||||
visible: false
|
||||
|
||||
text: qsTr("There was an unknown error while creating your account.")
|
||||
wrapMode: Text.WordWrap
|
||||
color: hifi.colors.redAccent
|
||||
horizontalAlignment: Text.AlignLeft
|
||||
}
|
||||
|
||||
Column {
|
||||
id: form
|
||||
anchors {
|
||||
top: mainTextContainer.bottom
|
||||
left: parent.left
|
||||
margins: 0
|
||||
topMargin: 2 * hifi.dimensions.contentSpacing.y
|
||||
}
|
||||
spacing: 2 * hifi.dimensions.contentSpacing.y
|
||||
|
||||
Row {
|
||||
spacing: hifi.dimensions.contentSpacing.x
|
||||
|
||||
TextField {
|
||||
id: emailField
|
||||
anchors {
|
||||
verticalCenter: parent.verticalCenter
|
||||
}
|
||||
width: 350
|
||||
|
||||
label: "Email"
|
||||
}
|
||||
}
|
||||
|
||||
Row {
|
||||
spacing: hifi.dimensions.contentSpacing.x
|
||||
|
||||
TextField {
|
||||
id: usernameField
|
||||
anchors {
|
||||
verticalCenter: parent.verticalCenter
|
||||
}
|
||||
width: 350
|
||||
|
||||
label: "Username"
|
||||
}
|
||||
|
||||
ShortcutText {
|
||||
anchors {
|
||||
verticalCenter: parent.verticalCenter
|
||||
}
|
||||
|
||||
text: qsTr("No spaces / special chars.")
|
||||
|
||||
verticalAlignment: Text.AlignVCenter
|
||||
horizontalAlignment: Text.AlignHCenter
|
||||
|
||||
color: hifi.colors.blueAccent
|
||||
}
|
||||
}
|
||||
|
||||
Row {
|
||||
spacing: hifi.dimensions.contentSpacing.x
|
||||
|
||||
TextField {
|
||||
id: passwordField
|
||||
anchors {
|
||||
verticalCenter: parent.verticalCenter
|
||||
}
|
||||
width: 350
|
||||
|
||||
label: "Password"
|
||||
echoMode: TextInput.Password
|
||||
}
|
||||
|
||||
ShortcutText {
|
||||
anchors {
|
||||
verticalCenter: parent.verticalCenter
|
||||
}
|
||||
|
||||
text: qsTr("At least 6 characters")
|
||||
|
||||
verticalAlignment: Text.AlignVCenter
|
||||
horizontalAlignment: Text.AlignHCenter
|
||||
|
||||
color: hifi.colors.blueAccent
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Override ScrollingWindow's keyboard that would be at very bottom of dialog.
|
||||
Keyboard {
|
||||
raised: keyboardEnabled && keyboardRaised
|
||||
numeric: punctuationMode
|
||||
anchors {
|
||||
left: parent.left
|
||||
right: parent.right
|
||||
bottom: buttons.top
|
||||
bottomMargin: keyboardRaised ? 2 * hifi.dimensions.contentSpacing.y : 0
|
||||
}
|
||||
}
|
||||
|
||||
Row {
|
||||
id: leftButton
|
||||
anchors {
|
||||
left: parent.left
|
||||
bottom: parent.bottom
|
||||
bottomMargin: hifi.dimensions.contentSpacing.y
|
||||
}
|
||||
|
||||
spacing: hifi.dimensions.contentSpacing.x
|
||||
onHeightChanged: d.resize(); onWidthChanged: d.resize();
|
||||
|
||||
Button {
|
||||
anchors.verticalCenter: parent.verticalCenter
|
||||
|
||||
text: qsTr("Existing User")
|
||||
|
||||
onClicked: {
|
||||
bodyLoader.setSource("LinkAccountBody.qml")
|
||||
bodyLoader.item.width = root.pane.width
|
||||
bodyLoader.item.height = root.pane.height
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Row {
|
||||
id: buttons
|
||||
anchors {
|
||||
right: parent.right
|
||||
bottom: parent.bottom
|
||||
bottomMargin: hifi.dimensions.contentSpacing.y
|
||||
}
|
||||
spacing: hifi.dimensions.contentSpacing.x
|
||||
onHeightChanged: d.resize(); onWidthChanged: d.resize();
|
||||
|
||||
Button {
|
||||
id: linkAccountButton
|
||||
anchors.verticalCenter: parent.verticalCenter
|
||||
width: 200
|
||||
|
||||
text: qsTr("Sign Up")
|
||||
color: hifi.buttons.blue
|
||||
|
||||
onClicked: signupBody.signup()
|
||||
}
|
||||
|
||||
Button {
|
||||
anchors.verticalCenter: parent.verticalCenter
|
||||
|
||||
text: qsTr("Cancel")
|
||||
|
||||
onClicked: root.destroy()
|
||||
}
|
||||
}
|
||||
|
||||
Component.onCompleted: {
|
||||
root.title = qsTr("Create an Account")
|
||||
root.iconText = "<"
|
||||
keyboardEnabled = HMD.active;
|
||||
d.resize();
|
||||
|
||||
emailField.forceActiveFocus();
|
||||
}
|
||||
|
||||
Connections {
|
||||
target: loginDialog
|
||||
onHandleSignupCompleted: {
|
||||
console.log("Sign Up Succeeded");
|
||||
|
||||
// now that we have an account, login with that username and password
|
||||
loginDialog.login(usernameField.text, passwordField.text)
|
||||
}
|
||||
onHandleSignupFailed: {
|
||||
console.log("Sign Up Failed")
|
||||
toggleLoading(false)
|
||||
|
||||
mainTextContainer.text = errorString
|
||||
mainTextContainer.visible = true
|
||||
|
||||
d.resize();
|
||||
}
|
||||
onHandleLoginCompleted: {
|
||||
bodyLoader.setSource("WelcomeBody.qml", { "welcomeBack": false })
|
||||
bodyLoader.item.width = root.pane.width
|
||||
bodyLoader.item.height = root.pane.height
|
||||
}
|
||||
onHandleLoginFailed: {
|
||||
// we failed to login, show the LoginDialog so the user will try again
|
||||
bodyLoader.setSource("LinkAccountBody.qml", { "failAfterSignUp": true })
|
||||
bodyLoader.item.width = root.pane.width
|
||||
bodyLoader.item.height = root.pane.height
|
||||
}
|
||||
}
|
||||
|
||||
Keys.onPressed: {
|
||||
if (!visible) {
|
||||
return
|
||||
}
|
||||
|
||||
switch (event.key) {
|
||||
case Qt.Key_Enter:
|
||||
case Qt.Key_Return:
|
||||
event.accepted = true
|
||||
signupBody.signup()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
|
@ -47,11 +47,9 @@ Item {
|
|||
hifi.dimensions.contentSpacing.y + textField.height +
|
||||
hifi.dimensions.contentSpacing.y + buttons.height
|
||||
|
||||
root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth))
|
||||
root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
parent.width = root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth))
|
||||
parent.height = root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
+ (keyboardEnabled && keyboardRaised ? (200 + 2 * hifi.dimensions.contentSpacing.y) : hifi.dimensions.contentSpacing.y)
|
||||
|
||||
height = root.height
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@ import "../styles-uit"
|
|||
Item {
|
||||
id: welcomeBody
|
||||
clip: true
|
||||
width: pane.width
|
||||
height: pane.height
|
||||
width: root.pane.width
|
||||
height: root.pane.height
|
||||
|
||||
property bool welcomeBack: false
|
||||
|
||||
|
@ -39,8 +39,8 @@ Item {
|
|||
var targetWidth = Math.max(titleWidth, mainTextContainer.contentWidth)
|
||||
var targetHeight = mainTextContainer.height + 3 * hifi.dimensions.contentSpacing.y + buttons.height
|
||||
|
||||
root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth))
|
||||
root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
parent.width = root.width = Math.max(d.minWidth, Math.min(d.maxWidth, targetWidth))
|
||||
parent.height = root.height = Math.max(d.minHeight, Math.min(d.maxHeight, targetHeight))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ OriginalDesktop.Desktop {
|
|||
WebEngine.settings.localContentCanAccessRemoteUrls = true;
|
||||
|
||||
[ // Allocate the standard buttons in the correct order. They will get images, etc., via scripts.
|
||||
"hmdToggle", "mute", "mod", "help",
|
||||
"hmdToggle", "mute", "mod", "bubble", "help",
|
||||
"hudToggle",
|
||||
"com.highfidelity.interface.system.editButton", "marketplace", "snapshot", "goto"
|
||||
].forEach(function (name) {
|
||||
|
|
|
@ -131,6 +131,7 @@
|
|||
#include "LODManager.h"
|
||||
#include "ModelPackager.h"
|
||||
#include "networking/HFWebEngineProfile.h"
|
||||
#include "scripting/TestScriptingInterface.h"
|
||||
#include "scripting/AccountScriptingInterface.h"
|
||||
#include "scripting/AssetMappingsScriptingInterface.h"
|
||||
#include "scripting/AudioDeviceScriptingInterface.h"
|
||||
|
@ -168,6 +169,8 @@
|
|||
// On Windows PC, NVidia Optimus laptop, we want to enable NVIDIA GPU
|
||||
// FIXME seems to be broken.
|
||||
#if defined(Q_OS_WIN)
|
||||
#include <VersionHelpers.h>
|
||||
|
||||
extern "C" {
|
||||
_declspec(dllexport) DWORD NvOptimusEnablement = 0x00000001;
|
||||
}
|
||||
|
@ -418,6 +421,16 @@ bool setupEssentials(int& argc, char** argv) {
|
|||
|
||||
Setting::preInit();
|
||||
|
||||
#if defined(Q_OS_WIN)
|
||||
// Select appropriate audio DLL
|
||||
QString audioDLLPath = QCoreApplication::applicationDirPath();
|
||||
if (IsWindows8OrGreater()) {
|
||||
audioDLLPath += "/audioWin8";
|
||||
} else {
|
||||
audioDLLPath += "/audioWin7";
|
||||
}
|
||||
QCoreApplication::addLibraryPath(audioDLLPath);
|
||||
#endif
|
||||
|
||||
static const auto SUPPRESS_SETTINGS_RESET = "--suppress-settings-reset";
|
||||
bool suppressPrompt = cmdOptionExists(argc, const_cast<const char**>(argv), SUPPRESS_SETTINGS_RESET);
|
||||
|
@ -541,6 +554,20 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
setProperty(hifi::properties::STEAM, SteamClient::isRunning());
|
||||
setProperty(hifi::properties::CRASHED, _previousSessionCrashed);
|
||||
|
||||
{
|
||||
const QString TEST_SCRIPT = "--testScript";
|
||||
const QStringList args = arguments();
|
||||
for (int i = 0; i < args.size() - 1; ++i) {
|
||||
if (args.at(i) == TEST_SCRIPT) {
|
||||
QString testScriptPath = args.at(i + 1);
|
||||
if (QFileInfo(testScriptPath).exists()) {
|
||||
setProperty(hifi::properties::TEST, QUrl::fromLocalFile(testScriptPath));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
_runningMarker.startRunningMarker();
|
||||
|
||||
PluginContainer* pluginContainer = dynamic_cast<PluginContainer*>(this); // set the container for any plugins that care
|
||||
|
@ -591,8 +618,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
qCDebug(interfaceapp) << "[VERSION] We will use DEVELOPMENT global services.";
|
||||
#endif
|
||||
|
||||
|
||||
bool wantsSandboxRunning = shouldRunServer();
|
||||
|
||||
static const QString NO_UPDATER_ARG = "--no-updater";
|
||||
static const bool noUpdater = arguments().indexOf(NO_UPDATER_ARG) != -1;
|
||||
static const bool wantsSandboxRunning = shouldRunServer();
|
||||
static bool determinedSandboxState = false;
|
||||
static bool sandboxIsRunning = false;
|
||||
SandboxUtils sandboxUtils;
|
||||
|
@ -602,11 +631,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
qCDebug(interfaceapp) << "Home sandbox appears to be running.....";
|
||||
determinedSandboxState = true;
|
||||
sandboxIsRunning = true;
|
||||
}, [&, wantsSandboxRunning]() {
|
||||
}, [&]() {
|
||||
qCDebug(interfaceapp) << "Home sandbox does not appear to be running....";
|
||||
if (wantsSandboxRunning) {
|
||||
QString contentPath = getRunServerPath();
|
||||
bool noUpdater = SteamClient::isRunning();
|
||||
SandboxUtils::runLocalSandbox(contentPath, true, RUNNING_MARKER_FILENAME, noUpdater);
|
||||
sandboxIsRunning = true;
|
||||
}
|
||||
|
@ -1128,7 +1156,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
#endif
|
||||
|
||||
// If launched from Steam, let it handle updates
|
||||
if (!SteamClient::isRunning()) {
|
||||
if (!noUpdater) {
|
||||
auto applicationUpdater = DependencyManager::get<AutoUpdater>();
|
||||
connect(applicationUpdater.data(), &AutoUpdater::newVersionIsAvailable, dialogsManager.data(), &DialogsManager::showUpdateDialog);
|
||||
applicationUpdater->checkForUpdate();
|
||||
|
@ -1328,90 +1356,96 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
return entityServerNode && !isPhysicsEnabled();
|
||||
});
|
||||
|
||||
QVariant testProperty = property(hifi::properties::TEST);
|
||||
qDebug() << testProperty;
|
||||
if (testProperty.isValid()) {
|
||||
auto scriptEngines = DependencyManager::get<ScriptEngines>();
|
||||
const auto testScript = property(hifi::properties::TEST).toUrl();
|
||||
scriptEngines->loadScript(testScript, false);
|
||||
} else {
|
||||
// Get sandbox content set version, if available
|
||||
auto acDirPath = PathUtils::getRootDataDirectory() + BuildInfo::MODIFIED_ORGANIZATION + "/assignment-client/";
|
||||
auto contentVersionPath = acDirPath + "content-version.txt";
|
||||
qCDebug(interfaceapp) << "Checking " << contentVersionPath << " for content version";
|
||||
auto contentVersion = 0;
|
||||
QFile contentVersionFile(contentVersionPath);
|
||||
if (contentVersionFile.open(QIODevice::ReadOnly | QIODevice::Text)) {
|
||||
QString line = contentVersionFile.readAll();
|
||||
// toInt() returns 0 if the conversion fails, so we don't need to specifically check for failure
|
||||
contentVersion = line.toInt();
|
||||
}
|
||||
qCDebug(interfaceapp) << "Server content version: " << contentVersion;
|
||||
|
||||
bool hasTutorialContent = contentVersion >= 1;
|
||||
|
||||
// Get sandbox content set version, if available
|
||||
auto acDirPath = PathUtils::getRootDataDirectory() + BuildInfo::MODIFIED_ORGANIZATION + "/assignment-client/";
|
||||
auto contentVersionPath = acDirPath + "content-version.txt";
|
||||
qCDebug(interfaceapp) << "Checking " << contentVersionPath << " for content version";
|
||||
auto contentVersion = 0;
|
||||
QFile contentVersionFile(contentVersionPath);
|
||||
if (contentVersionFile.open(QIODevice::ReadOnly | QIODevice::Text)) {
|
||||
QString line = contentVersionFile.readAll();
|
||||
// toInt() returns 0 if the conversion fails, so we don't need to specifically check for failure
|
||||
contentVersion = line.toInt();
|
||||
}
|
||||
qCDebug(interfaceapp) << "Server content version: " << contentVersion;
|
||||
Setting::Handle<bool> firstRun { Settings::firstRun, true };
|
||||
bool hasHMDAndHandControllers = PluginUtils::isHMDAvailable("OpenVR (Vive)") && PluginUtils::isHandControllerAvailable();
|
||||
Setting::Handle<bool> tutorialComplete { "tutorialComplete", false };
|
||||
|
||||
bool hasTutorialContent = contentVersion >= 1;
|
||||
bool shouldGoToTutorial = hasHMDAndHandControllers && hasTutorialContent && !tutorialComplete.get();
|
||||
|
||||
Setting::Handle<bool> firstRun { Settings::firstRun, true };
|
||||
bool hasHMDAndHandControllers = PluginUtils::isHMDAvailable("OpenVR (Vive)") && PluginUtils::isHandControllerAvailable();
|
||||
Setting::Handle<bool> tutorialComplete { "tutorialComplete", false };
|
||||
qCDebug(interfaceapp) << "Has HMD + Hand Controllers: " << hasHMDAndHandControllers << ", current plugin: " << _displayPlugin->getName();
|
||||
qCDebug(interfaceapp) << "Has tutorial content: " << hasTutorialContent;
|
||||
qCDebug(interfaceapp) << "Tutorial complete: " << tutorialComplete.get();
|
||||
qCDebug(interfaceapp) << "Should go to tutorial: " << shouldGoToTutorial;
|
||||
|
||||
bool shouldGoToTutorial = hasHMDAndHandControllers && hasTutorialContent && !tutorialComplete.get();
|
||||
// when --url in command line, teleport to location
|
||||
const QString HIFI_URL_COMMAND_LINE_KEY = "--url";
|
||||
int urlIndex = arguments().indexOf(HIFI_URL_COMMAND_LINE_KEY);
|
||||
QString addressLookupString;
|
||||
if (urlIndex != -1) {
|
||||
addressLookupString = arguments().value(urlIndex + 1);
|
||||
}
|
||||
|
||||
qCDebug(interfaceapp) << "Has HMD + Hand Controllers: " << hasHMDAndHandControllers << ", current plugin: " << _displayPlugin->getName();
|
||||
qCDebug(interfaceapp) << "Has tutorial content: " << hasTutorialContent;
|
||||
qCDebug(interfaceapp) << "Tutorial complete: " << tutorialComplete.get();
|
||||
qCDebug(interfaceapp) << "Should go to tutorial: " << shouldGoToTutorial;
|
||||
const QString TUTORIAL_PATH = "/tutorial_begin";
|
||||
|
||||
// when --url in command line, teleport to location
|
||||
const QString HIFI_URL_COMMAND_LINE_KEY = "--url";
|
||||
int urlIndex = arguments().indexOf(HIFI_URL_COMMAND_LINE_KEY);
|
||||
QString addressLookupString;
|
||||
if (urlIndex != -1) {
|
||||
addressLookupString = arguments().value(urlIndex + 1);
|
||||
}
|
||||
|
||||
const QString TUTORIAL_PATH = "/tutorial_begin";
|
||||
|
||||
if (shouldGoToTutorial) {
|
||||
if(sandboxIsRunning) {
|
||||
qCDebug(interfaceapp) << "Home sandbox appears to be running, going to Home.";
|
||||
DependencyManager::get<AddressManager>()->goToLocalSandbox(TUTORIAL_PATH);
|
||||
if (shouldGoToTutorial) {
|
||||
if (sandboxIsRunning) {
|
||||
qCDebug(interfaceapp) << "Home sandbox appears to be running, going to Home.";
|
||||
DependencyManager::get<AddressManager>()->goToLocalSandbox(TUTORIAL_PATH);
|
||||
} else {
|
||||
qCDebug(interfaceapp) << "Home sandbox does not appear to be running, going to Entry.";
|
||||
if (firstRun.get()) {
|
||||
showHelp();
|
||||
}
|
||||
if (addressLookupString.isEmpty()) {
|
||||
DependencyManager::get<AddressManager>()->goToEntry();
|
||||
} else {
|
||||
DependencyManager::get<AddressManager>()->loadSettings(addressLookupString);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
qCDebug(interfaceapp) << "Home sandbox does not appear to be running, going to Entry.";
|
||||
if (firstRun.get()) {
|
||||
|
||||
bool isFirstRun = firstRun.get();
|
||||
|
||||
if (isFirstRun) {
|
||||
showHelp();
|
||||
}
|
||||
if (addressLookupString.isEmpty()) {
|
||||
DependencyManager::get<AddressManager>()->goToEntry();
|
||||
} else {
|
||||
DependencyManager::get<AddressManager>()->loadSettings(addressLookupString);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
bool isFirstRun = firstRun.get();
|
||||
|
||||
if (isFirstRun) {
|
||||
showHelp();
|
||||
}
|
||||
|
||||
// If this is a first run we short-circuit the address passed in
|
||||
if (isFirstRun) {
|
||||
if (hasHMDAndHandControllers) {
|
||||
if(sandboxIsRunning) {
|
||||
qCDebug(interfaceapp) << "Home sandbox appears to be running, going to Home.";
|
||||
DependencyManager::get<AddressManager>()->goToLocalSandbox();
|
||||
// If this is a first run we short-circuit the address passed in
|
||||
if (isFirstRun) {
|
||||
if (hasHMDAndHandControllers) {
|
||||
if (sandboxIsRunning) {
|
||||
qCDebug(interfaceapp) << "Home sandbox appears to be running, going to Home.";
|
||||
DependencyManager::get<AddressManager>()->goToLocalSandbox();
|
||||
} else {
|
||||
qCDebug(interfaceapp) << "Home sandbox does not appear to be running, going to Entry.";
|
||||
DependencyManager::get<AddressManager>()->goToEntry();
|
||||
}
|
||||
} else {
|
||||
qCDebug(interfaceapp) << "Home sandbox does not appear to be running, going to Entry.";
|
||||
DependencyManager::get<AddressManager>()->goToEntry();
|
||||
}
|
||||
} else {
|
||||
DependencyManager::get<AddressManager>()->goToEntry();
|
||||
qCDebug(interfaceapp) << "Not first run... going to" << qPrintable(addressLookupString.isEmpty() ? QString("previous location") : addressLookupString);
|
||||
DependencyManager::get<AddressManager>()->loadSettings(addressLookupString);
|
||||
}
|
||||
} else {
|
||||
qCDebug(interfaceapp) << "Not first run... going to" << qPrintable(addressLookupString.isEmpty() ? QString("previous location") : addressLookupString);
|
||||
DependencyManager::get<AddressManager>()->loadSettings(addressLookupString);
|
||||
}
|
||||
|
||||
_connectionMonitor.init();
|
||||
|
||||
// After all of the constructor is completed, then set firstRun to false.
|
||||
firstRun.set(false);
|
||||
}
|
||||
|
||||
_connectionMonitor.init();
|
||||
|
||||
// After all of the constructor is completed, then set firstRun to false.
|
||||
firstRun.set(false);
|
||||
}
|
||||
|
||||
void Application::domainConnectionRefused(const QString& reasonMessage, int reasonCodeInt, const QString& extraInfo) {
|
||||
|
@ -2519,6 +2553,12 @@ void Application::keyPressEvent(QKeyEvent* event) {
|
|||
Menu::getInstance()->triggerOption(MenuOption::DefaultSkybox);
|
||||
break;
|
||||
|
||||
case Qt::Key_N:
|
||||
if (!isOption && !isShifted && isMeta) {
|
||||
DependencyManager::get<NodeList>()->toggleIgnoreRadius();
|
||||
}
|
||||
break;
|
||||
|
||||
case Qt::Key_S:
|
||||
if (isShifted && isMeta && !isOption) {
|
||||
Menu::getInstance()->triggerOption(MenuOption::SuppressShortTimings);
|
||||
|
@ -4755,7 +4795,7 @@ void Application::resetSensors(bool andReload) {
|
|||
DependencyManager::get<EyeTracker>()->reset();
|
||||
getActiveDisplayPlugin()->resetSensors();
|
||||
_overlayConductor.centerUI();
|
||||
getMyAvatar()->reset(andReload);
|
||||
getMyAvatar()->reset(true, andReload);
|
||||
QMetaObject::invokeMethod(DependencyManager::get<AudioClient>().data(), "reset", Qt::QueuedConnection);
|
||||
}
|
||||
|
||||
|
@ -5060,6 +5100,11 @@ void Application::registerScriptEngineWithApplicationServices(ScriptEngine* scri
|
|||
// AvatarManager has some custom types
|
||||
AvatarManager::registerMetaTypes(scriptEngine);
|
||||
|
||||
if (property(hifi::properties::TEST).isValid()) {
|
||||
scriptEngine->registerGlobalObject("Test", TestScriptingInterface::getInstance());
|
||||
}
|
||||
|
||||
scriptEngine->registerGlobalObject("Overlays", &_overlays);
|
||||
scriptEngine->registerGlobalObject("Rates", new RatesScriptingInterface(this));
|
||||
|
||||
// hook our avatar and avatar hash map object into this script engine
|
||||
|
@ -5740,6 +5785,7 @@ void Application::updateDisplayMode() {
|
|||
QObject::connect(displayPlugin.get(), &DisplayPlugin::recommendedFramebufferSizeChanged, [this](const QSize & size) {
|
||||
resizeGL();
|
||||
});
|
||||
QObject::connect(displayPlugin.get(), &DisplayPlugin::resetSensorsRequested, this, &Application::requestReset);
|
||||
first = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ namespace MenuOption {
|
|||
const QString LoadScript = "Open and Run Script File...";
|
||||
const QString LoadScriptURL = "Open and Run Script from URL...";
|
||||
const QString LodTools = "LOD Tools";
|
||||
const QString Login = "Login";
|
||||
const QString Login = "Login / Sign Up";
|
||||
const QString Log = "Log";
|
||||
const QString LogExtraTimings = "Log Extra Timing Details";
|
||||
const QString LowVelocityFilter = "Low Velocity Filter";
|
||||
|
|
|
@ -80,7 +80,7 @@ AvatarManager::AvatarManager(QObject* parent) :
|
|||
|
||||
// when we hear that the user has ignored an avatar by session UUID
|
||||
// immediately remove that avatar instead of waiting for the absence of packets from avatar mixer
|
||||
connect(nodeList.data(), &NodeList::ignoredNode, this, &AvatarManager::removeAvatar);
|
||||
connect(nodeList.data(), "ignoredNode", this, "removeAvatar");
|
||||
}
|
||||
|
||||
AvatarManager::~AvatarManager() {
|
||||
|
@ -223,16 +223,16 @@ AvatarSharedPointer AvatarManager::addAvatar(const QUuid& sessionUUID, const QWe
|
|||
}
|
||||
|
||||
// virtual
|
||||
void AvatarManager::removeAvatar(const QUuid& sessionUUID) {
|
||||
void AvatarManager::removeAvatar(const QUuid& sessionUUID, KillAvatarReason removalReason) {
|
||||
QWriteLocker locker(&_hashLock);
|
||||
|
||||
auto removedAvatar = _avatarHash.take(sessionUUID);
|
||||
if (removedAvatar) {
|
||||
handleRemovedAvatar(removedAvatar);
|
||||
handleRemovedAvatar(removedAvatar, removalReason);
|
||||
}
|
||||
}
|
||||
|
||||
void AvatarManager::handleRemovedAvatar(const AvatarSharedPointer& removedAvatar) {
|
||||
void AvatarManager::handleRemovedAvatar(const AvatarSharedPointer& removedAvatar, KillAvatarReason removalReason) {
|
||||
AvatarHashMap::handleRemovedAvatar(removedAvatar);
|
||||
|
||||
// removedAvatar is a shared pointer to an AvatarData but we need to get to the derived Avatar
|
||||
|
@ -247,6 +247,9 @@ void AvatarManager::handleRemovedAvatar(const AvatarSharedPointer& removedAvatar
|
|||
_motionStatesToRemoveFromPhysics.push_back(motionState);
|
||||
}
|
||||
|
||||
if (removalReason == KillAvatarReason::TheirAvatarEnteredYourBubble) {
|
||||
emit DependencyManager::get<UsersScriptingInterface>()->enteredIgnoreRadius();
|
||||
}
|
||||
_avatarFades.push_back(removedAvatar);
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ public slots:
|
|||
void updateAvatarRenderStatus(bool shouldRenderAvatars);
|
||||
|
||||
private slots:
|
||||
virtual void removeAvatar(const QUuid& sessionUUID) override;
|
||||
virtual void removeAvatar(const QUuid& sessionUUID, KillAvatarReason removalReason = KillAvatarReason::NoReason) override;
|
||||
|
||||
private:
|
||||
explicit AvatarManager(QObject* parent = 0);
|
||||
|
@ -91,7 +91,7 @@ private:
|
|||
virtual AvatarSharedPointer newSharedAvatar() override;
|
||||
virtual AvatarSharedPointer addAvatar(const QUuid& sessionUUID, const QWeakPointer<Node>& mixerWeakPointer) override;
|
||||
|
||||
virtual void handleRemovedAvatar(const AvatarSharedPointer& removedAvatar) override;
|
||||
virtual void handleRemovedAvatar(const AvatarSharedPointer& removedAvatar, KillAvatarReason removalReason = KillAvatarReason::NoReason) override;
|
||||
|
||||
QVector<AvatarSharedPointer> _avatarFades;
|
||||
std::shared_ptr<MyAvatar> _myAvatar;
|
||||
|
|
|
@ -230,6 +230,10 @@ void MyAvatar::simulateAttachments(float deltaTime) {
|
|||
QByteArray MyAvatar::toByteArray(bool cullSmallChanges, bool sendAll) {
|
||||
CameraMode mode = qApp->getCamera()->getMode();
|
||||
_globalPosition = getPosition();
|
||||
_globalBoundingBoxCorner.x = _characterController.getCapsuleRadius();
|
||||
_globalBoundingBoxCorner.y = _characterController.getCapsuleHalfHeight();
|
||||
_globalBoundingBoxCorner.z = _characterController.getCapsuleRadius();
|
||||
_globalBoundingBoxCorner += _characterController.getCapsuleLocalOffset();
|
||||
if (mode == CAMERA_MODE_THIRD_PERSON || mode == CAMERA_MODE_INDEPENDENT) {
|
||||
// fake the avatar position that is sent up to the AvatarMixer
|
||||
glm::vec3 oldPosition = getPosition();
|
||||
|
@ -360,10 +364,18 @@ void MyAvatar::update(float deltaTime) {
|
|||
updateFromTrackers(deltaTime);
|
||||
|
||||
// Get audio loudness data from audio input device
|
||||
// Also get the AudioClient so we can update the avatar bounding box data
|
||||
// on the AudioClient side.
|
||||
auto audio = DependencyManager::get<AudioClient>();
|
||||
head->setAudioLoudness(audio->getLastInputLoudness());
|
||||
head->setAudioAverageLoudness(audio->getAudioAverageInputLoudness());
|
||||
|
||||
glm::vec3 halfBoundingBoxDimensions(_characterController.getCapsuleRadius(), _characterController.getCapsuleHalfHeight(), _characterController.getCapsuleRadius());
|
||||
halfBoundingBoxDimensions += _characterController.getCapsuleLocalOffset();
|
||||
QMetaObject::invokeMethod(audio.data(), "setAvatarBoundingBoxParameters",
|
||||
Q_ARG(glm::vec3, (getPosition() - halfBoundingBoxDimensions)),
|
||||
Q_ARG(glm::vec3, (halfBoundingBoxDimensions*2.0f)));
|
||||
|
||||
if (_avatarEntityDataLocallyEdited) {
|
||||
sendIdentityPacket();
|
||||
}
|
||||
|
|
30
interface/src/scripting/TestScriptingInterface.cpp
Normal file
30
interface/src/scripting/TestScriptingInterface.cpp
Normal file
|
@ -0,0 +1,30 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/12/12
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
#include "TestScriptingInterface.h"
|
||||
|
||||
#include <QtCore/QCoreApplication>
|
||||
|
||||
TestScriptingInterface* TestScriptingInterface::getInstance() {
|
||||
static TestScriptingInterface sharedInstance;
|
||||
return &sharedInstance;
|
||||
}
|
||||
|
||||
void TestScriptingInterface::quit() {
|
||||
qApp->quit();
|
||||
}
|
||||
|
||||
void TestScriptingInterface::waitForTextureIdle() {
|
||||
}
|
||||
|
||||
void TestScriptingInterface::waitForDownloadIdle() {
|
||||
}
|
||||
|
||||
void TestScriptingInterface::waitIdle() {
|
||||
}
|
43
interface/src/scripting/TestScriptingInterface.h
Normal file
43
interface/src/scripting/TestScriptingInterface.h
Normal file
|
@ -0,0 +1,43 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/12/12
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#pragma once
|
||||
#ifndef hifi_TestScriptingInterface_h
|
||||
#define hifi_TestScriptingInterface_h
|
||||
|
||||
#include <QtCore/QObject>
|
||||
|
||||
class TestScriptingInterface : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public slots:
|
||||
static TestScriptingInterface* getInstance();
|
||||
|
||||
/**jsdoc
|
||||
* Exits the application
|
||||
*/
|
||||
void quit();
|
||||
|
||||
/**jsdoc
|
||||
* Waits for all texture transfers to be complete
|
||||
*/
|
||||
void waitForTextureIdle();
|
||||
|
||||
/**jsdoc
|
||||
* Waits for all pending downloads to be complete
|
||||
*/
|
||||
void waitForDownloadIdle();
|
||||
|
||||
/**jsdoc
|
||||
* Waits for all pending downloads and texture transfers to be complete
|
||||
*/
|
||||
void waitIdle();
|
||||
|
||||
};
|
||||
|
||||
#endif // hifi_TestScriptingInterface_h
|
|
@ -11,9 +11,10 @@
|
|||
|
||||
#include "LoginDialog.h"
|
||||
|
||||
#include <QDesktopServices>
|
||||
#include <QJsonDocument>
|
||||
#include <QNetworkReply>
|
||||
#include <QtGui/QDesktopServices>
|
||||
#include <QtCore/QJsonArray>
|
||||
#include <QtCore/QJsonDocument>
|
||||
#include <QtNetwork/QNetworkReply>
|
||||
|
||||
#include <NetworkingConstants.h>
|
||||
#include <steamworks-wrapper/SteamClient.h>
|
||||
|
@ -47,7 +48,7 @@ void LoginDialog::toggleAction() {
|
|||
connection = connect(loginAction, &QAction::triggered, accountManager.data(), &AccountManager::logout);
|
||||
} else {
|
||||
// change the menu item to login
|
||||
loginAction->setText("Login");
|
||||
loginAction->setText("Login / Sign Up");
|
||||
connection = connect(loginAction, &QAction::triggered, [] {
|
||||
LoginDialog::show();
|
||||
});
|
||||
|
@ -153,3 +154,83 @@ void LoginDialog::createFailed(QNetworkReply& reply) {
|
|||
emit handleCreateFailed(reply.errorString());
|
||||
}
|
||||
|
||||
void LoginDialog::signup(const QString& email, const QString& username, const QString& password) {
|
||||
|
||||
JSONCallbackParameters callbackParams;
|
||||
callbackParams.jsonCallbackReceiver = this;
|
||||
callbackParams.jsonCallbackMethod = "signupCompleted";
|
||||
callbackParams.errorCallbackReceiver = this;
|
||||
callbackParams.errorCallbackMethod = "signupFailed";
|
||||
|
||||
QJsonObject payload;
|
||||
|
||||
QJsonObject userObject;
|
||||
userObject.insert("email", email);
|
||||
userObject.insert("username", username);
|
||||
userObject.insert("password", password);
|
||||
|
||||
payload.insert("user", userObject);
|
||||
|
||||
static const QString API_SIGNUP_PATH = "api/v1/users";
|
||||
|
||||
qDebug() << "Sending a request to create an account for" << username;
|
||||
|
||||
auto accountManager = DependencyManager::get<AccountManager>();
|
||||
accountManager->sendRequest(API_SIGNUP_PATH, AccountManagerAuth::None,
|
||||
QNetworkAccessManager::PostOperation, callbackParams,
|
||||
QJsonDocument(payload).toJson());
|
||||
}
|
||||
|
||||
void LoginDialog::signupCompleted(QNetworkReply& reply) {
|
||||
emit handleSignupCompleted();
|
||||
}
|
||||
|
||||
QString errorStringFromAPIObject(const QJsonValue& apiObject) {
|
||||
if (apiObject.isArray()) {
|
||||
return apiObject.toArray()[0].toString();
|
||||
} else if (apiObject.isString()) {
|
||||
return apiObject.toString();
|
||||
} else {
|
||||
return "is invalid";
|
||||
}
|
||||
}
|
||||
|
||||
void LoginDialog::signupFailed(QNetworkReply& reply) {
|
||||
|
||||
// parse the returned JSON to see what the problem was
|
||||
auto jsonResponse = QJsonDocument::fromJson(reply.readAll());
|
||||
|
||||
static const QString RESPONSE_DATA_KEY = "data";
|
||||
|
||||
auto dataJsonValue = jsonResponse.object()[RESPONSE_DATA_KEY];
|
||||
|
||||
if (dataJsonValue.isObject()) {
|
||||
auto dataObject = dataJsonValue.toObject();
|
||||
|
||||
static const QString EMAIL_DATA_KEY = "email";
|
||||
static const QString USERNAME_DATA_KEY = "username";
|
||||
static const QString PASSWORD_DATA_KEY = "password";
|
||||
|
||||
QStringList errorStringList;
|
||||
|
||||
if (dataObject.contains(EMAIL_DATA_KEY)) {
|
||||
errorStringList.append(QString("Email %1.").arg(errorStringFromAPIObject(dataObject[EMAIL_DATA_KEY])));
|
||||
}
|
||||
|
||||
if (dataObject.contains(USERNAME_DATA_KEY)) {
|
||||
errorStringList.append(QString("Username %1.").arg(errorStringFromAPIObject(dataObject[USERNAME_DATA_KEY])));
|
||||
}
|
||||
|
||||
if (dataObject.contains(PASSWORD_DATA_KEY)) {
|
||||
errorStringList.append(QString("Password %1.").arg(errorStringFromAPIObject(dataObject[PASSWORD_DATA_KEY])));
|
||||
}
|
||||
|
||||
emit handleSignupFailed(errorStringList.join('\n'));
|
||||
} else {
|
||||
static const QString DEFAULT_SIGN_UP_FAILURE_MESSAGE = "There was an unknown error while creating your account. Please try again later.";
|
||||
emit handleSignupFailed(DEFAULT_SIGN_UP_FAILURE_MESSAGE);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,9 @@ signals:
|
|||
|
||||
void handleCreateCompleted();
|
||||
void handleCreateFailed(QString error);
|
||||
|
||||
void handleSignupCompleted();
|
||||
void handleSignupFailed(QString errorString);
|
||||
|
||||
public slots:
|
||||
void linkCompleted(QNetworkReply& reply);
|
||||
|
@ -43,6 +46,9 @@ public slots:
|
|||
|
||||
void createCompleted(QNetworkReply& reply);
|
||||
void createFailed(QNetworkReply& reply);
|
||||
|
||||
void signupCompleted(QNetworkReply& reply);
|
||||
void signupFailed(QNetworkReply& reply);
|
||||
|
||||
protected slots:
|
||||
Q_INVOKABLE bool isSteamRunning() const;
|
||||
|
@ -51,6 +57,8 @@ protected slots:
|
|||
Q_INVOKABLE void loginThroughSteam();
|
||||
Q_INVOKABLE void linkSteam();
|
||||
Q_INVOKABLE void createAccountFromStream(QString username = QString());
|
||||
|
||||
Q_INVOKABLE void signup(const QString& email, const QString& username, const QString& password);
|
||||
|
||||
Q_INVOKABLE void openUrl(const QString& url) const;
|
||||
|
||||
|
|
|
@ -68,18 +68,6 @@ void setupPreferences() {
|
|||
auto setter = [=](bool value) { myAvatar->setClearOverlayWhenMoving(value); };
|
||||
preferences->addPreference(new CheckPreference(AVATAR_BASICS, "Clear overlays when moving", getter, setter));
|
||||
}
|
||||
{
|
||||
auto getter = [=]()->float { return nodeList->getIgnoreRadius(); };
|
||||
auto setter = [=](float value) {
|
||||
nodeList->ignoreNodesInRadius(value, nodeList->getIgnoreRadiusEnabled());
|
||||
};
|
||||
auto preference = new SpinnerPreference(AVATAR_BASICS, "Personal space bubble radius (default is 1m)", getter, setter);
|
||||
preference->setMin(0.01f);
|
||||
preference->setMax(99.9f);
|
||||
preference->setDecimals(2);
|
||||
preference->setStep(0.25);
|
||||
preferences->addPreference(preference);
|
||||
}
|
||||
|
||||
// UI
|
||||
{
|
||||
|
|
|
@ -1063,7 +1063,9 @@ void AudioClient::handleAudioInput() {
|
|||
encodedBuffer = decodedBuffer;
|
||||
}
|
||||
|
||||
emitAudioPacket(encodedBuffer.constData(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, packetType, _selectedCodecName);
|
||||
emitAudioPacket(encodedBuffer.constData(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber,
|
||||
audioTransform, avatarBoundingBoxCorner, avatarBoundingBoxScale,
|
||||
packetType, _selectedCodecName);
|
||||
_stats.sentPacket();
|
||||
|
||||
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * AudioConstants::SAMPLE_SIZE;
|
||||
|
@ -1085,7 +1087,9 @@ void AudioClient::handleRecordedAudioInput(const QByteArray& audio) {
|
|||
}
|
||||
|
||||
// FIXME check a flag to see if we should echo audio?
|
||||
emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, PacketType::MicrophoneAudioWithEcho, _selectedCodecName);
|
||||
emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber,
|
||||
audioTransform, avatarBoundingBoxCorner, avatarBoundingBoxScale,
|
||||
PacketType::MicrophoneAudioWithEcho, _selectedCodecName);
|
||||
}
|
||||
|
||||
void AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
||||
|
@ -1098,13 +1102,37 @@ void AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
|||
for (AudioInjector* injector : getActiveLocalAudioInjectors()) {
|
||||
if (injector->getLocalBuffer()) {
|
||||
|
||||
qint64 samplesToRead = injector->isStereo() ? AudioConstants::NETWORK_FRAME_BYTES_STEREO : AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||
static const int HRTF_DATASET_INDEX = 1;
|
||||
|
||||
// get one frame from the injector (mono or stereo)
|
||||
memset(_scratchBuffer, 0, sizeof(_scratchBuffer));
|
||||
if (0 < injector->getLocalBuffer()->readData((char*)_scratchBuffer, samplesToRead)) {
|
||||
int numChannels = injector->isAmbisonic() ? AudioConstants::AMBISONIC : (injector->isStereo() ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
qint64 bytesToRead = numChannels * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||
|
||||
// get one frame from the injector
|
||||
memset(_scratchBuffer, 0, bytesToRead);
|
||||
if (0 < injector->getLocalBuffer()->readData((char*)_scratchBuffer, bytesToRead)) {
|
||||
|
||||
if (injector->isStereo()) {
|
||||
if (injector->isAmbisonic()) {
|
||||
|
||||
// no distance attenuation
|
||||
float gain = injector->getVolume();
|
||||
|
||||
//
|
||||
// Calculate the soundfield orientation relative to the listener.
|
||||
// Injector orientation can be used to align a recording to our world coordinates.
|
||||
//
|
||||
glm::quat relativeOrientation = injector->getOrientation() * glm::inverse(_orientationGetter());
|
||||
|
||||
// convert from Y-up (OpenGL) to Z-up (Ambisonic) coordinate system
|
||||
float qw = relativeOrientation.w;
|
||||
float qx = -relativeOrientation.z;
|
||||
float qy = -relativeOrientation.x;
|
||||
float qz = relativeOrientation.y;
|
||||
|
||||
// Ambisonic gets spatialized into mixBuffer
|
||||
injector->getLocalFOA().render(_scratchBuffer, mixBuffer, HRTF_DATASET_INDEX,
|
||||
qw, qx, qy, qz, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
} else if (injector->isStereo()) {
|
||||
|
||||
// stereo gets directly mixed into mixBuffer
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i++) {
|
||||
|
@ -1120,7 +1148,8 @@ void AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
|||
float azimuth = azimuthForSource(relativePosition);
|
||||
|
||||
// mono gets spatialized into mixBuffer
|
||||
injector->getLocalHRTF().render(_scratchBuffer, mixBuffer, 1, azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
injector->getLocalHRTF().render(_scratchBuffer, mixBuffer, HRTF_DATASET_INDEX,
|
||||
azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -1225,8 +1254,7 @@ void AudioClient::setIsStereoInput(bool isStereoInput) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
bool AudioClient::outputLocalInjector(bool isStereo, AudioInjector* injector) {
|
||||
bool AudioClient::outputLocalInjector(AudioInjector* injector) {
|
||||
Lock lock(_injectorsMutex);
|
||||
if (injector->getLocalBuffer() && _audioInput ) {
|
||||
// just add it to the vector of active local injectors, if
|
||||
|
@ -1595,3 +1623,8 @@ void AudioClient::saveSettings() {
|
|||
dynamicJitterBufferEnabled.set(_receivedAudioStream.dynamicJitterBufferEnabled());
|
||||
staticJitterBufferFrames.set(_receivedAudioStream.getStaticJitterBufferFrames());
|
||||
}
|
||||
|
||||
void AudioClient::setAvatarBoundingBoxParameters(glm::vec3 corner, glm::vec3 scale) {
|
||||
avatarBoundingBoxCorner = corner;
|
||||
avatarBoundingBoxScale = scale;
|
||||
}
|
||||
|
|
|
@ -127,6 +127,8 @@ public:
|
|||
void setPositionGetter(AudioPositionGetter positionGetter) { _positionGetter = positionGetter; }
|
||||
void setOrientationGetter(AudioOrientationGetter orientationGetter) { _orientationGetter = orientationGetter; }
|
||||
|
||||
Q_INVOKABLE void setAvatarBoundingBoxParameters(glm::vec3 corner, glm::vec3 scale);
|
||||
|
||||
QVector<AudioInjector*>& getActiveLocalAudioInjectors() { return _activeLocalAudioInjectors; }
|
||||
|
||||
void checkDevices();
|
||||
|
@ -169,7 +171,7 @@ public slots:
|
|||
|
||||
int setOutputBufferSize(int numFrames, bool persist = true);
|
||||
|
||||
bool outputLocalInjector(bool isStereo, AudioInjector* injector) override;
|
||||
bool outputLocalInjector(AudioInjector* injector) override;
|
||||
bool shouldLoopbackInjectors() override { return _shouldEchoToServer; }
|
||||
|
||||
bool switchInputToAudioDevice(const QString& inputDeviceName);
|
||||
|
@ -297,7 +299,7 @@ private:
|
|||
|
||||
// for local hrtf-ing
|
||||
float _mixBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
int16_t _scratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
int16_t _scratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
|
||||
AudioLimiter _audioLimiter;
|
||||
|
||||
// Adds Reverb
|
||||
|
@ -324,6 +326,9 @@ private:
|
|||
AudioPositionGetter _positionGetter;
|
||||
AudioOrientationGetter _orientationGetter;
|
||||
|
||||
glm::vec3 avatarBoundingBoxCorner;
|
||||
glm::vec3 avatarBoundingBoxScale;
|
||||
|
||||
QVector<QString> _inputDevices;
|
||||
QVector<QString> _outputDevices;
|
||||
|
||||
|
|
|
@ -19,8 +19,9 @@
|
|||
|
||||
#include "AudioConstants.h"
|
||||
|
||||
void AbstractAudioInterface::emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber,
|
||||
const Transform& transform, PacketType packetType, QString codecName) {
|
||||
void AbstractAudioInterface::emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber,
|
||||
const Transform& transform, glm::vec3 avatarBoundingBoxCorner, glm::vec3 avatarBoundingBoxScale,
|
||||
PacketType packetType, QString codecName) {
|
||||
static std::mutex _mutex;
|
||||
using Locker = std::unique_lock<std::mutex>;
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
@ -55,6 +56,10 @@ void AbstractAudioInterface::emitAudioPacket(const void* audioData, size_t bytes
|
|||
// pack the orientation
|
||||
audioPacket->writePrimitive(transform.getRotation());
|
||||
|
||||
audioPacket->writePrimitive(avatarBoundingBoxCorner);
|
||||
audioPacket->writePrimitive(avatarBoundingBoxScale);
|
||||
|
||||
|
||||
if (audioPacket->getType() != PacketType::SilentAudioFrame) {
|
||||
// audio samples have already been packed (written to networkAudioSamples)
|
||||
int leadingBytes = audioPacket->getPayloadSize();
|
||||
|
|
|
@ -28,11 +28,12 @@ class AbstractAudioInterface : public QObject {
|
|||
public:
|
||||
AbstractAudioInterface(QObject* parent = 0) : QObject(parent) {};
|
||||
|
||||
static void emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber, const Transform& transform,
|
||||
static void emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber,
|
||||
const Transform& transform, glm::vec3 avatarBoundingBoxCorner, glm::vec3 avatarBoundingBoxScale,
|
||||
PacketType packetType, QString codecName = QString(""));
|
||||
|
||||
public slots:
|
||||
virtual bool outputLocalInjector(bool isStereo, AudioInjector* injector) = 0;
|
||||
virtual bool outputLocalInjector(AudioInjector* injector) = 0;
|
||||
virtual bool shouldLoopbackInjectors() { return false; }
|
||||
|
||||
virtual void setIsStereoInput(bool stereo) = 0;
|
||||
|
|
|
@ -20,7 +20,7 @@ namespace AudioConstants {
|
|||
const int SAMPLE_RATE = 24000;
|
||||
const int MONO = 1;
|
||||
const int STEREO = 2;
|
||||
|
||||
const int AMBISONIC = 4;
|
||||
|
||||
typedef int16_t AudioSample;
|
||||
const int SAMPLE_SIZE = sizeof(AudioSample);
|
||||
|
@ -33,6 +33,7 @@ namespace AudioConstants {
|
|||
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / SAMPLE_SIZE;
|
||||
const int NETWORK_FRAME_BYTES_PER_CHANNEL = NETWORK_FRAME_BYTES_STEREO / 2;
|
||||
const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / SAMPLE_SIZE;
|
||||
const int NETWORK_FRAME_SAMPLES_AMBISONIC = NETWORK_FRAME_SAMPLES_PER_CHANNEL * AMBISONIC;
|
||||
const float NETWORK_FRAME_SECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL / float(AudioConstants::SAMPLE_RATE));
|
||||
const float NETWORK_FRAME_MSECS = NETWORK_FRAME_SECS * 1000.0f;
|
||||
const float NETWORK_FRAMES_PER_SEC = 1.0f / NETWORK_FRAME_SECS;
|
||||
|
|
1095
libraries/audio/src/AudioFOA.cpp
Normal file
1095
libraries/audio/src/AudioFOA.cpp
Normal file
File diff suppressed because it is too large
Load diff
62
libraries/audio/src/AudioFOA.h
Normal file
62
libraries/audio/src/AudioFOA.h
Normal file
|
@ -0,0 +1,62 @@
|
|||
//
|
||||
// AudioFOA.h
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Created by Ken Cooke on 10/28/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioFOA_h
|
||||
#define hifi_AudioFOA_h
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
static const int FOA_TAPS = 273; // FIR coefs
|
||||
static const int FOA_NFFT = 512; // FFT length
|
||||
static const int FOA_OVERLAP = FOA_TAPS - 1;
|
||||
static const int FOA_TABLES = 25; // number of HRTF subjects
|
||||
|
||||
static const int FOA_BLOCK = 240; // block processing size
|
||||
|
||||
static const float FOA_GAIN = 1.0f; // FOA global gain adjustment
|
||||
|
||||
static_assert((FOA_BLOCK + FOA_OVERLAP) == FOA_NFFT, "FFT convolution requires L+M-1 == NFFT");
|
||||
|
||||
class AudioFOA {
|
||||
|
||||
public:
|
||||
AudioFOA() {
|
||||
// identity matrix
|
||||
_rotationState[0][0] = 1.0f;
|
||||
_rotationState[1][1] = 1.0f;
|
||||
_rotationState[2][2] = 1.0f;
|
||||
};
|
||||
|
||||
//
|
||||
// input: interleaved First-Order Ambisonic source
|
||||
// output: interleaved stereo mix buffer (accumulates into existing output)
|
||||
// index: HRTF subject index
|
||||
// qw, qx, qy, qz: normalized quaternion for orientation
|
||||
// gain: gain factor for volume control
|
||||
// numFrames: must be FOA_BLOCK in this version
|
||||
//
|
||||
void render(int16_t* input, float* output, int index, float qw, float qx, float qy, float qz, float gain, int numFrames);
|
||||
|
||||
private:
|
||||
AudioFOA(const AudioFOA&) = delete;
|
||||
AudioFOA& operator=(const AudioFOA&) = delete;
|
||||
|
||||
// For best cache utilization when processing thousands of instances, only
|
||||
// the minimum persistant state is stored here. No coefs or work buffers.
|
||||
|
||||
// input history, for overlap-save
|
||||
float _fftState[4][FOA_OVERLAP] = {};
|
||||
|
||||
// orientation history
|
||||
float _rotationState[3][3] = {};
|
||||
};
|
||||
|
||||
#endif // AudioFOA_h
|
17735
libraries/audio/src/AudioFOAData.h
Normal file
17735
libraries/audio/src/AudioFOAData.h
Normal file
File diff suppressed because it is too large
Load diff
|
@ -58,8 +58,10 @@ void AudioInjector::setOptions(const AudioInjectorOptions& options) {
|
|||
// since options.stereo is computed from the audio stream,
|
||||
// we need to copy it from existing options just in case.
|
||||
bool currentlyStereo = _options.stereo;
|
||||
bool currentlyAmbisonic = _options.ambisonic;
|
||||
_options = options;
|
||||
_options.stereo = currentlyStereo;
|
||||
_options.ambisonic = currentlyAmbisonic;
|
||||
}
|
||||
|
||||
void AudioInjector::finishNetworkInjection() {
|
||||
|
@ -134,7 +136,8 @@ bool AudioInjector::inject(bool(AudioInjectorManager::*injection)(AudioInjector*
|
|||
|
||||
int byteOffset = 0;
|
||||
if (_options.secondOffset > 0.0f) {
|
||||
byteOffset = (int)floorf(AudioConstants::SAMPLE_RATE * _options.secondOffset * (_options.stereo ? 2.0f : 1.0f));
|
||||
int numChannels = _options.ambisonic ? 4 : (_options.stereo ? 2 : 1);
|
||||
byteOffset = (int)(AudioConstants::SAMPLE_RATE * _options.secondOffset * numChannels);
|
||||
byteOffset *= sizeof(AudioConstants::SAMPLE_SIZE);
|
||||
}
|
||||
_currentSendOffset = byteOffset;
|
||||
|
@ -169,7 +172,7 @@ bool AudioInjector::injectLocally() {
|
|||
_localBuffer->setCurrentOffset(_currentSendOffset);
|
||||
|
||||
// call this function on the AudioClient's thread
|
||||
success = QMetaObject::invokeMethod(_localAudioInterface, "outputLocalInjector", Q_ARG(bool, _options.stereo), Q_ARG(AudioInjector*, this));
|
||||
success = QMetaObject::invokeMethod(_localAudioInterface, "outputLocalInjector", Q_ARG(AudioInjector*, this));
|
||||
|
||||
if (!success) {
|
||||
qCDebug(audio) << "AudioInjector::injectLocally could not output locally via _localAudioInterface";
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "AudioInjectorLocalBuffer.h"
|
||||
#include "AudioInjectorOptions.h"
|
||||
#include "AudioHRTF.h"
|
||||
#include "AudioFOA.h"
|
||||
#include "Sound.h"
|
||||
|
||||
class AbstractAudioInterface;
|
||||
|
@ -59,11 +60,14 @@ public:
|
|||
|
||||
AudioInjectorLocalBuffer* getLocalBuffer() const { return _localBuffer; }
|
||||
AudioHRTF& getLocalHRTF() { return _localHRTF; }
|
||||
AudioFOA& getLocalFOA() { return _localFOA; }
|
||||
|
||||
bool isLocalOnly() const { return _options.localOnly; }
|
||||
float getVolume() const { return _options.volume; }
|
||||
glm::vec3 getPosition() const { return _options.position; }
|
||||
glm::quat getOrientation() const { return _options.orientation; }
|
||||
bool isStereo() const { return _options.stereo; }
|
||||
bool isAmbisonic() const { return _options.ambisonic; }
|
||||
|
||||
bool stateHas(AudioInjectorState state) const ;
|
||||
static void setLocalAudioInterface(AbstractAudioInterface* audioInterface) { _localAudioInterface = audioInterface; }
|
||||
|
@ -113,6 +117,7 @@ private:
|
|||
|
||||
// when the injector is local, we need this
|
||||
AudioHRTF _localHRTF;
|
||||
AudioFOA _localFOA;
|
||||
friend class AudioInjectorManager;
|
||||
};
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ AudioInjectorOptions::AudioInjectorOptions() :
|
|||
loop(false),
|
||||
orientation(glm::vec3(0.0f, 0.0f, 0.0f)),
|
||||
stereo(false),
|
||||
ambisonic(false),
|
||||
ignorePenumbra(false),
|
||||
localOnly(false),
|
||||
secondOffset(0.0)
|
||||
|
|
|
@ -25,6 +25,7 @@ public:
|
|||
bool loop;
|
||||
glm::quat orientation;
|
||||
bool stereo;
|
||||
bool ambisonic;
|
||||
bool ignorePenumbra;
|
||||
bool localOnly;
|
||||
float secondOffset;
|
||||
|
|
|
@ -77,6 +77,8 @@ int PositionalAudioStream::parsePositionalData(const QByteArray& positionalByteA
|
|||
|
||||
packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position));
|
||||
packetStream.readRawData(reinterpret_cast<char*>(&_orientation), sizeof(_orientation));
|
||||
packetStream.readRawData(reinterpret_cast<char*>(&_avatarBoundingBoxCorner), sizeof(_avatarBoundingBoxCorner));
|
||||
packetStream.readRawData(reinterpret_cast<char*>(&_avatarBoundingBoxScale), sizeof(_avatarBoundingBoxScale));
|
||||
|
||||
// if this node sent us a NaN for first float in orientation then don't consider this good audio and bail
|
||||
if (glm::isnan(_orientation.x)) {
|
||||
|
|
|
@ -46,6 +46,8 @@ public:
|
|||
PositionalAudioStream::Type getType() const { return _type; }
|
||||
const glm::vec3& getPosition() const { return _position; }
|
||||
const glm::quat& getOrientation() const { return _orientation; }
|
||||
const glm::vec3& getAvatarBoundingBoxCorner() const { return _avatarBoundingBoxCorner; }
|
||||
const glm::vec3& getAvatarBoundingBoxScale() const { return _avatarBoundingBoxScale; }
|
||||
|
||||
|
||||
protected:
|
||||
|
@ -60,6 +62,9 @@ protected:
|
|||
glm::vec3 _position;
|
||||
glm::quat _orientation;
|
||||
|
||||
glm::vec3 _avatarBoundingBoxCorner;
|
||||
glm::vec3 _avatarBoundingBoxScale;
|
||||
|
||||
bool _shouldLoopbackForNode;
|
||||
bool _isStereo;
|
||||
// Ignore penumbra filter
|
||||
|
|
|
@ -43,9 +43,10 @@ SoundScriptingInterface::SoundScriptingInterface(SharedSoundPointer sound) : _so
|
|||
QObject::connect(sound.data(), &Sound::ready, this, &SoundScriptingInterface::ready);
|
||||
}
|
||||
|
||||
Sound::Sound(const QUrl& url, bool isStereo) :
|
||||
Sound::Sound(const QUrl& url, bool isStereo, bool isAmbisonic) :
|
||||
Resource(url),
|
||||
_isStereo(isStereo),
|
||||
_isAmbisonic(isAmbisonic),
|
||||
_isReady(false)
|
||||
{
|
||||
|
||||
|
@ -62,8 +63,10 @@ void Sound::downloadFinished(const QByteArray& data) {
|
|||
|
||||
QByteArray outputAudioByteArray;
|
||||
|
||||
interpretAsWav(rawAudioByteArray, outputAudioByteArray);
|
||||
downSample(outputAudioByteArray);
|
||||
int sampleRate = interpretAsWav(rawAudioByteArray, outputAudioByteArray);
|
||||
if (sampleRate != 0) {
|
||||
downSample(outputAudioByteArray, sampleRate);
|
||||
}
|
||||
} else if (fileName.endsWith(RAW_EXTENSION)) {
|
||||
// check if this was a stereo raw file
|
||||
// since it's raw the only way for us to know that is if the file was called .stereo.raw
|
||||
|
@ -72,8 +75,8 @@ void Sound::downloadFinished(const QByteArray& data) {
|
|||
qCDebug(audio) << "Processing sound of" << rawAudioByteArray.size() << "bytes from" << getURL() << "as stereo audio file.";
|
||||
}
|
||||
|
||||
// Process as RAW file
|
||||
downSample(rawAudioByteArray);
|
||||
// Process as 48khz RAW file
|
||||
downSample(rawAudioByteArray, 48000);
|
||||
} else {
|
||||
qCDebug(audio) << "Unknown sound file type";
|
||||
}
|
||||
|
@ -84,29 +87,80 @@ void Sound::downloadFinished(const QByteArray& data) {
|
|||
emit ready();
|
||||
}
|
||||
|
||||
void Sound::downSample(const QByteArray& rawAudioByteArray) {
|
||||
// assume that this was a RAW file and is now an array of samples that are
|
||||
// signed, 16-bit, 48Khz
|
||||
void Sound::downSample(const QByteArray& rawAudioByteArray, int sampleRate) {
|
||||
|
||||
// we want to convert it to the format that the audio-mixer wants
|
||||
// which is signed, 16-bit, 24Khz
|
||||
|
||||
int numChannels = _isStereo ? 2 : 1;
|
||||
AudioSRC resampler(48000, AudioConstants::SAMPLE_RATE, numChannels);
|
||||
if (sampleRate == AudioConstants::SAMPLE_RATE) {
|
||||
|
||||
// resize to max possible output
|
||||
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
|
||||
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
|
||||
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_byteArray.resize(maxDestinationBytes);
|
||||
// no resampling needed
|
||||
_byteArray = rawAudioByteArray;
|
||||
|
||||
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
|
||||
(int16_t*)_byteArray.data(),
|
||||
numSourceFrames);
|
||||
} else if (_isAmbisonic) {
|
||||
|
||||
// truncate to actual output
|
||||
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_byteArray.resize(numDestinationBytes);
|
||||
// FIXME: add a proper Ambisonic resampler!
|
||||
int numChannels = 4;
|
||||
AudioSRC resampler[4] { {sampleRate, AudioConstants::SAMPLE_RATE, 1},
|
||||
{sampleRate, AudioConstants::SAMPLE_RATE, 1},
|
||||
{sampleRate, AudioConstants::SAMPLE_RATE, 1},
|
||||
{sampleRate, AudioConstants::SAMPLE_RATE, 1} };
|
||||
|
||||
// resize to max possible output
|
||||
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
|
||||
int maxDestinationFrames = resampler[0].getMaxOutput(numSourceFrames);
|
||||
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_byteArray.resize(maxDestinationBytes);
|
||||
|
||||
int numDestinationFrames = 0;
|
||||
|
||||
// iterate over channels
|
||||
int16_t* srcBuffer = new int16_t[numSourceFrames];
|
||||
int16_t* dstBuffer = new int16_t[maxDestinationFrames];
|
||||
for (int ch = 0; ch < 4; ch++) {
|
||||
|
||||
int16_t* src = (int16_t*)rawAudioByteArray.data();
|
||||
int16_t* dst = (int16_t*)_byteArray.data();
|
||||
|
||||
// deinterleave samples
|
||||
for (int i = 0; i < numSourceFrames; i++) {
|
||||
srcBuffer[i] = src[4*i + ch];
|
||||
}
|
||||
|
||||
// resample one channel
|
||||
numDestinationFrames = resampler[ch].render(srcBuffer, dstBuffer, numSourceFrames);
|
||||
|
||||
// reinterleave samples
|
||||
for (int i = 0; i < numDestinationFrames; i++) {
|
||||
dst[4*i + ch] = dstBuffer[i];
|
||||
}
|
||||
}
|
||||
delete[] srcBuffer;
|
||||
delete[] dstBuffer;
|
||||
|
||||
// truncate to actual output
|
||||
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_byteArray.resize(numDestinationBytes);
|
||||
|
||||
} else {
|
||||
|
||||
int numChannels = _isStereo ? 2 : 1;
|
||||
AudioSRC resampler(sampleRate, AudioConstants::SAMPLE_RATE, numChannels);
|
||||
|
||||
// resize to max possible output
|
||||
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
|
||||
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
|
||||
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_byteArray.resize(maxDestinationBytes);
|
||||
|
||||
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
|
||||
(int16_t*)_byteArray.data(),
|
||||
numSourceFrames);
|
||||
|
||||
// truncate to actual output
|
||||
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_byteArray.resize(numDestinationBytes);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -160,7 +214,8 @@ struct CombinedHeader {
|
|||
WAVEHeader wave;
|
||||
};
|
||||
|
||||
void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
|
||||
// returns wavfile sample rate, used for resampling
|
||||
int Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
|
||||
|
||||
CombinedHeader fileHeader;
|
||||
|
||||
|
@ -174,36 +229,35 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
|
|||
// descriptor.id == "RIFX" also signifies BigEndian file
|
||||
// waveStream.setByteOrder(QDataStream::BigEndian);
|
||||
qCDebug(audio) << "Currently not supporting big-endian audio files.";
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strncmp(fileHeader.riff.type, "WAVE", 4) != 0
|
||||
|| strncmp(fileHeader.wave.descriptor.id, "fmt", 3) != 0) {
|
||||
qCDebug(audio) << "Not a WAVE Audio file.";
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// added the endianess check as an extra level of security
|
||||
|
||||
if (qFromLittleEndian<quint16>(fileHeader.wave.audioFormat) != 1) {
|
||||
qCDebug(audio) << "Currently not supporting non PCM audio files.";
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) == 2) {
|
||||
_isStereo = true;
|
||||
} else if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) > 2) {
|
||||
qCDebug(audio) << "Currently not support audio files with more than 2 channels.";
|
||||
} else if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) == 4) {
|
||||
_isAmbisonic = true;
|
||||
} else if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) != 1) {
|
||||
qCDebug(audio) << "Currently not support audio files with other than 1/2/4 channels.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (qFromLittleEndian<quint16>(fileHeader.wave.bitsPerSample) != 16) {
|
||||
qCDebug(audio) << "Currently not supporting non 16bit audio files.";
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
if (qFromLittleEndian<quint32>(fileHeader.wave.sampleRate) != 48000) {
|
||||
qCDebug(audio) << "Currently not supporting non 48KHz audio files.";
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Skip any extra data in the WAVE chunk
|
||||
waveStream.skipRawData(fileHeader.wave.descriptor.size - (sizeof(WAVEHeader) - sizeof(chunk)));
|
||||
|
||||
|
@ -218,7 +272,7 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
|
|||
waveStream.skipRawData(dataHeader.descriptor.size);
|
||||
} else {
|
||||
qCDebug(audio) << "Could not read wav audio data header.";
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,12 +281,14 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
|
|||
outputAudioByteArray.resize(outputAudioByteArraySize);
|
||||
if (waveStream.readRawData(outputAudioByteArray.data(), outputAudioByteArraySize) != (int)outputAudioByteArraySize) {
|
||||
qCDebug(audio) << "Error reading WAV file";
|
||||
return 0;
|
||||
}
|
||||
|
||||
_duration = (float) (outputAudioByteArraySize / (fileHeader.wave.sampleRate * fileHeader.wave.numChannels * fileHeader.wave.bitsPerSample / 8.0f));
|
||||
return fileHeader.wave.sampleRate;
|
||||
|
||||
} else {
|
||||
qCDebug(audio) << "Could not read wav audio file header.";
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,9 +22,10 @@ class Sound : public Resource {
|
|||
Q_OBJECT
|
||||
|
||||
public:
|
||||
Sound(const QUrl& url, bool isStereo = false);
|
||||
Sound(const QUrl& url, bool isStereo = false, bool isAmbisonic = false);
|
||||
|
||||
bool isStereo() const { return _isStereo; }
|
||||
bool isAmbisonic() const { return _isAmbisonic; }
|
||||
bool isReady() const { return _isReady; }
|
||||
float getDuration() const { return _duration; }
|
||||
|
||||
|
@ -37,11 +38,12 @@ signals:
|
|||
private:
|
||||
QByteArray _byteArray;
|
||||
bool _isStereo;
|
||||
bool _isAmbisonic;
|
||||
bool _isReady;
|
||||
float _duration; // In seconds
|
||||
|
||||
void downSample(const QByteArray& rawAudioByteArray);
|
||||
void interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||
void downSample(const QByteArray& rawAudioByteArray, int sampleRate);
|
||||
int interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||
|
||||
virtual void downloadFinished(const QByteArray& data) override;
|
||||
};
|
||||
|
|
1346
libraries/audio/src/avx2/AudioFOA_avx2.cpp
Normal file
1346
libraries/audio/src/avx2/AudioFOA_avx2.cpp
Normal file
File diff suppressed because it is too large
Load diff
|
@ -55,6 +55,7 @@ namespace AvatarDataPacket {
|
|||
PACKED_BEGIN struct Header {
|
||||
float position[3]; // skeletal model's position
|
||||
float globalPosition[3]; // avatar's position
|
||||
float globalBoundingBoxCorner[3]; // global position of the lowest corner of the avatar's bounding box
|
||||
uint16_t localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the thing it's attached to
|
||||
uint16_t scale; // (compressed) 'ratio' encoding uses sign bit as flag.
|
||||
float lookAtPosition[3]; // world space position that eyes are focusing on.
|
||||
|
@ -64,7 +65,7 @@ namespace AvatarDataPacket {
|
|||
float sensorToWorldTrans[3]; // fourth column of sensor to world matrix
|
||||
uint8_t flags;
|
||||
} PACKED_END;
|
||||
const size_t HEADER_SIZE = 69;
|
||||
const size_t HEADER_SIZE = 81;
|
||||
|
||||
// only present if HAS_REFERENTIAL flag is set in header.flags
|
||||
PACKED_BEGIN struct ParentInfo {
|
||||
|
@ -205,6 +206,9 @@ QByteArray AvatarData::toByteArray(bool cullSmallChanges, bool sendAll) {
|
|||
header->globalPosition[0] = _globalPosition.x;
|
||||
header->globalPosition[1] = _globalPosition.y;
|
||||
header->globalPosition[2] = _globalPosition.z;
|
||||
header->globalBoundingBoxCorner[0] = getPosition().x - _globalBoundingBoxCorner.x;
|
||||
header->globalBoundingBoxCorner[1] = getPosition().y - _globalBoundingBoxCorner.y;
|
||||
header->globalBoundingBoxCorner[2] = getPosition().z - _globalBoundingBoxCorner.z;
|
||||
|
||||
glm::vec3 bodyEulerAngles = glm::degrees(safeEulerAngles(getLocalOrientation()));
|
||||
packFloatAngleToTwoByte((uint8_t*)(header->localOrientation + 0), bodyEulerAngles.y);
|
||||
|
@ -481,6 +485,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
|
||||
glm::vec3 position = glm::vec3(header->position[0], header->position[1], header->position[2]);
|
||||
_globalPosition = glm::vec3(header->globalPosition[0], header->globalPosition[1], header->globalPosition[2]);
|
||||
_globalBoundingBoxCorner = glm::vec3(header->globalBoundingBoxCorner[0], header->globalBoundingBoxCorner[1], header->globalBoundingBoxCorner[2]);
|
||||
if (isNaN(position)) {
|
||||
if (shouldLogError(now)) {
|
||||
qCWarning(avatars) << "Discard AvatarData packet: position NaN, uuid " << getSessionUUID();
|
||||
|
|
|
@ -133,6 +133,15 @@ enum KeyState {
|
|||
DELETE_KEY_DOWN
|
||||
};
|
||||
|
||||
enum KillAvatarReason : uint8_t {
|
||||
NoReason = 0,
|
||||
AvatarDisconnected,
|
||||
AvatarIgnored,
|
||||
TheirAvatarEnteredYourBubble,
|
||||
YourAvatarEnteredTheirBubble
|
||||
};
|
||||
Q_DECLARE_METATYPE(KillAvatarReason);
|
||||
|
||||
class QDataStream;
|
||||
|
||||
class AttachmentData;
|
||||
|
@ -353,6 +362,7 @@ public:
|
|||
void fromJson(const QJsonObject& json);
|
||||
|
||||
glm::vec3 getClientGlobalPosition() { return _globalPosition; }
|
||||
glm::vec3 getGlobalBoundingBoxCorner() { return _globalBoundingBoxCorner; }
|
||||
|
||||
Q_INVOKABLE AvatarEntityMap getAvatarEntityData() const;
|
||||
Q_INVOKABLE void setAvatarEntityData(const AvatarEntityMap& avatarEntityData);
|
||||
|
@ -436,6 +446,7 @@ protected:
|
|||
// where Entities are located. This is currently only used by the mixer to decide how often to send
|
||||
// updates about one avatar to another.
|
||||
glm::vec3 _globalPosition;
|
||||
glm::vec3 _globalBoundingBoxCorner;
|
||||
|
||||
mutable ReadWriteLockable _avatarEntitiesLock;
|
||||
AvatarEntityIDs _avatarEntityDetached; // recently detached from this avatar
|
||||
|
|
|
@ -141,20 +141,23 @@ void AvatarHashMap::processAvatarIdentityPacket(QSharedPointer<ReceivedMessage>
|
|||
void AvatarHashMap::processKillAvatar(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
|
||||
// read the node id
|
||||
QUuid sessionUUID = QUuid::fromRfc4122(message->readWithoutCopy(NUM_BYTES_RFC4122_UUID));
|
||||
removeAvatar(sessionUUID);
|
||||
|
||||
KillAvatarReason reason;
|
||||
message->readPrimitive(&reason);
|
||||
removeAvatar(sessionUUID, reason);
|
||||
}
|
||||
|
||||
void AvatarHashMap::removeAvatar(const QUuid& sessionUUID) {
|
||||
void AvatarHashMap::removeAvatar(const QUuid& sessionUUID, KillAvatarReason removalReason) {
|
||||
QWriteLocker locker(&_hashLock);
|
||||
|
||||
auto removedAvatar = _avatarHash.take(sessionUUID);
|
||||
|
||||
if (removedAvatar) {
|
||||
handleRemovedAvatar(removedAvatar);
|
||||
handleRemovedAvatar(removedAvatar, removalReason);
|
||||
}
|
||||
}
|
||||
|
||||
void AvatarHashMap::handleRemovedAvatar(const AvatarSharedPointer& removedAvatar) {
|
||||
void AvatarHashMap::handleRemovedAvatar(const AvatarSharedPointer& removedAvatar, KillAvatarReason removalReason) {
|
||||
qDebug() << "Removed avatar with UUID" << uuidStringWithoutCurlyBraces(removedAvatar->getSessionUUID())
|
||||
<< "from AvatarHashMap";
|
||||
emit avatarRemovedEvent(removedAvatar->getSessionUUID());
|
||||
|
|
|
@ -65,9 +65,9 @@ protected:
|
|||
virtual AvatarSharedPointer addAvatar(const QUuid& sessionUUID, const QWeakPointer<Node>& mixerWeakPointer);
|
||||
AvatarSharedPointer newOrExistingAvatar(const QUuid& sessionUUID, const QWeakPointer<Node>& mixerWeakPointer);
|
||||
virtual AvatarSharedPointer findAvatar(const QUuid& sessionUUID); // uses a QReadLocker on the hashLock
|
||||
virtual void removeAvatar(const QUuid& sessionUUID);
|
||||
virtual void removeAvatar(const QUuid& sessionUUID, KillAvatarReason removalReason = KillAvatarReason::NoReason);
|
||||
|
||||
virtual void handleRemovedAvatar(const AvatarSharedPointer& removedAvatar);
|
||||
virtual void handleRemovedAvatar(const AvatarSharedPointer& removedAvatar, KillAvatarReason removalReason = KillAvatarReason::NoReason);
|
||||
|
||||
AvatarHash _avatarHash;
|
||||
// "Case-based safety": Most access to the _avatarHash is on the same thread. Write access is protected by a write-lock.
|
||||
|
|
|
@ -1165,6 +1165,7 @@ EntityItemProperties EntityItem::getProperties(EntityPropertyFlags desiredProper
|
|||
properties._id = getID();
|
||||
properties._idSet = true;
|
||||
properties._created = _created;
|
||||
properties._lastEdited = _lastEdited;
|
||||
properties.setClientOnly(_clientOnly);
|
||||
properties.setOwningAvatarID(_owningAvatarID);
|
||||
|
||||
|
|
|
@ -369,6 +369,7 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
|
|||
COPY_PROPERTY_TO_QSCRIPTVALUE_GETTER_NO_SKIP(ageAsText, formatSecondsElapsed(getAge())); // gettable, but not settable
|
||||
}
|
||||
|
||||
properties.setProperty("lastEdited", convertScriptValue(engine, _lastEdited));
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_LAST_EDITED_BY, lastEditedBy);
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_POSITION, position);
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_DIMENSIONS, dimensions);
|
||||
|
|
|
@ -174,7 +174,26 @@ public:
|
|||
void sendPeerQueryToIceServer(const HifiSockAddr& iceServerSockAddr, const QUuid& clientID, const QUuid& peerID);
|
||||
|
||||
SharedNodePointer findNodeWithAddr(const HifiSockAddr& addr);
|
||||
|
||||
|
||||
using value_type = SharedNodePointer;
|
||||
using const_iterator = std::vector<value_type>::const_iterator;
|
||||
|
||||
// Cede control of iteration under a single read lock (e.g. for use by thread pools)
|
||||
// Use this for nested loops instead of taking nested read locks!
|
||||
// This allows multiple threads (i.e. a thread pool) to share a lock
|
||||
// without deadlocking when a dying node attempts to acquire a write lock
|
||||
template<typename NestedNodeLambda>
|
||||
void nestedEach(NestedNodeLambda functor) {
|
||||
QReadLocker readLock(&_nodeMutex);
|
||||
|
||||
std::vector<SharedNodePointer> nodes(_nodeHash.size());
|
||||
std::transform(_nodeHash.cbegin(), _nodeHash.cend(), nodes.begin(), [](const NodeHash::value_type& it) {
|
||||
return it.second;
|
||||
});
|
||||
|
||||
functor(nodes.cbegin(), nodes.cend());
|
||||
}
|
||||
|
||||
template<typename NodeLambda>
|
||||
void eachNode(NodeLambda functor) {
|
||||
QReadLocker readLock(&_nodeMutex);
|
||||
|
@ -280,7 +299,7 @@ signals:
|
|||
protected slots:
|
||||
void connectedForLocalSocketTest();
|
||||
void errorTestingLocalSocket();
|
||||
|
||||
|
||||
void clientConnectionToSockAddrReset(const HifiSockAddr& sockAddr);
|
||||
|
||||
protected:
|
||||
|
@ -347,7 +366,7 @@ protected:
|
|||
functor(it);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private slots:
|
||||
void flagTimeForConnectionStep(ConnectionStep connectionStep, quint64 timestamp);
|
||||
void possiblyTimeoutSTUNAddressLookup();
|
||||
|
|
|
@ -104,11 +104,8 @@ void Node::addIgnoredNode(const QUuid& otherNodeID) {
|
|||
|
||||
void Node::parseIgnoreRadiusRequestMessage(QSharedPointer<ReceivedMessage> message) {
|
||||
bool enabled;
|
||||
float radius;
|
||||
message->readPrimitive(&enabled);
|
||||
message->readPrimitive(&radius);
|
||||
_ignoreRadiusEnabled = enabled;
|
||||
_ignoreRadius = radius;
|
||||
}
|
||||
|
||||
QDataStream& operator<<(QDataStream& out, const Node& node) {
|
||||
|
|
|
@ -80,7 +80,6 @@ public:
|
|||
friend QDataStream& operator>>(QDataStream& in, Node& node);
|
||||
|
||||
bool isIgnoreRadiusEnabled() const { return _ignoreRadiusEnabled; }
|
||||
float getIgnoreRadius() { return _ignoreRadiusEnabled ? _ignoreRadius.load() : std::numeric_limits<float>::max(); }
|
||||
|
||||
private:
|
||||
// privatize copy and assignment operator to disallow Node copying
|
||||
|
@ -100,7 +99,6 @@ private:
|
|||
tbb::concurrent_unordered_set<QUuid, UUIDHasher> _ignoredNodeIDSet;
|
||||
|
||||
std::atomic_bool _ignoreRadiusEnabled;
|
||||
std::atomic<float> _ignoreRadius { 0.0f };
|
||||
};
|
||||
|
||||
Q_DECLARE_METATYPE(Node*)
|
||||
|
|
|
@ -750,10 +750,9 @@ bool NodeList::sockAddrBelongsToDomainOrNode(const HifiSockAddr& sockAddr) {
|
|||
return _domainHandler.getSockAddr() == sockAddr || LimitedNodeList::sockAddrBelongsToNode(sockAddr);
|
||||
}
|
||||
|
||||
void NodeList::ignoreNodesInRadius(float radiusToIgnore, bool enabled) {
|
||||
void NodeList::ignoreNodesInRadius(bool enabled) {
|
||||
bool isEnabledChange = _ignoreRadiusEnabled.get() != enabled;
|
||||
_ignoreRadiusEnabled.set(enabled);
|
||||
_ignoreRadius.set(radiusToIgnore);
|
||||
|
||||
eachMatchingNode([](const SharedNodePointer& node)->bool {
|
||||
return (node->getType() == NodeType::AudioMixer || node->getType() == NodeType::AvatarMixer);
|
||||
|
@ -768,7 +767,6 @@ void NodeList::ignoreNodesInRadius(float radiusToIgnore, bool enabled) {
|
|||
void NodeList::sendIgnoreRadiusStateToNode(const SharedNodePointer& destinationNode) {
|
||||
auto ignorePacket = NLPacket::create(PacketType::RadiusIgnoreRequest, sizeof(bool) + sizeof(float), true);
|
||||
ignorePacket->writePrimitive(_ignoreRadiusEnabled.get());
|
||||
ignorePacket->writePrimitive(_ignoreRadius.get());
|
||||
sendPacket(std::move(ignorePacket), *destinationNode);
|
||||
}
|
||||
|
||||
|
|
|
@ -71,12 +71,11 @@ public:
|
|||
|
||||
void setIsShuttingDown(bool isShuttingDown) { _isShuttingDown = isShuttingDown; }
|
||||
|
||||
void ignoreNodesInRadius(float radiusToIgnore, bool enabled = true);
|
||||
float getIgnoreRadius() const { return _ignoreRadius.get(); }
|
||||
void ignoreNodesInRadius(bool enabled = true);
|
||||
bool getIgnoreRadiusEnabled() const { return _ignoreRadiusEnabled.get(); }
|
||||
void toggleIgnoreRadius() { ignoreNodesInRadius(getIgnoreRadius(), !getIgnoreRadiusEnabled()); }
|
||||
void enableIgnoreRadius() { ignoreNodesInRadius(getIgnoreRadius(), true); }
|
||||
void disableIgnoreRadius() { ignoreNodesInRadius(getIgnoreRadius(), false); }
|
||||
void toggleIgnoreRadius() { ignoreNodesInRadius(!getIgnoreRadiusEnabled()); }
|
||||
void enableIgnoreRadius() { ignoreNodesInRadius(true); }
|
||||
void disableIgnoreRadius() { ignoreNodesInRadius(false); }
|
||||
void ignoreNodeBySessionID(const QUuid& nodeID);
|
||||
bool isIgnoringNode(const QUuid& nodeID) const;
|
||||
|
||||
|
@ -156,7 +155,6 @@ private:
|
|||
|
||||
void sendIgnoreRadiusStateToNode(const SharedNodePointer& destinationNode);
|
||||
Setting::Handle<bool> _ignoreRadiusEnabled { "IgnoreRadiusEnabled", true };
|
||||
Setting::Handle<float> _ignoreRadius { "IgnoreRadius", 1.0f };
|
||||
|
||||
#if (PR_BUILD || DEV_BUILD)
|
||||
bool _shouldSendNewerVersion { false };
|
||||
|
|
|
@ -53,7 +53,7 @@ PacketVersion versionForPacketType(PacketType packetType) {
|
|||
case PacketType::AvatarData:
|
||||
case PacketType::BulkAvatarData:
|
||||
case PacketType::KillAvatar:
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::HandControllerJoints);
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::HasKillAvatarReason);
|
||||
case PacketType::ICEServerHeartbeat:
|
||||
return 18; // ICE Server Heartbeat signing
|
||||
case PacketType::AssetGetInfo:
|
||||
|
|
|
@ -202,7 +202,8 @@ enum class AvatarMixerPacketVersion : PacketVersion {
|
|||
AvatarEntities,
|
||||
AbsoluteSixByteRotations,
|
||||
SensorToWorldMat,
|
||||
HandControllerJoints
|
||||
HandControllerJoints,
|
||||
HasKillAvatarReason
|
||||
};
|
||||
|
||||
enum class DomainConnectRequestVersion : PacketVersion {
|
||||
|
|
|
@ -205,7 +205,8 @@ public:
|
|||
|
||||
|
||||
signals:
|
||||
void recommendedFramebufferSizeChanged(const QSize & size);
|
||||
void recommendedFramebufferSizeChanged(const QSize& size);
|
||||
void resetSensorsRequested();
|
||||
|
||||
protected:
|
||||
void incrementPresentCount();
|
||||
|
|
|
@ -24,7 +24,7 @@ struct FrustumGrid {
|
|||
mat4 eyeToWorldMat;
|
||||
};
|
||||
|
||||
uniform frustumGridBuffer {
|
||||
layout(std140) uniform frustumGridBuffer {
|
||||
FrustumGrid frustumGrid;
|
||||
};
|
||||
|
||||
|
@ -51,16 +51,20 @@ float projection_getFar(mat4 projection) {
|
|||
#define GRID_INDEX_TYPE ivec4
|
||||
#define GRID_FETCH_BUFFER(i) i / 4][i % 4
|
||||
<@else@>
|
||||
#define GRID_NUM_ELEMENTS 16384
|
||||
#define GRID_NUM_ELEMENTS 4096
|
||||
#define GRID_INDEX_TYPE ivec4
|
||||
#define GRID_FETCH_BUFFER(i) i / 4][i % 4
|
||||
|
||||
<!#define GRID_NUM_ELEMENTS 16384
|
||||
#define GRID_INDEX_TYPE int
|
||||
#define GRID_FETCH_BUFFER(i) i
|
||||
#define GRID_FETCH_BUFFER(i) i!>
|
||||
<@endif@>
|
||||
|
||||
uniform clusterGridBuffer {
|
||||
layout(std140) uniform clusterGridBuffer {
|
||||
GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];
|
||||
};
|
||||
|
||||
uniform clusterContentBuffer {
|
||||
layout(std140) uniform clusterContentBuffer {
|
||||
GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];
|
||||
};
|
||||
|
||||
|
|
|
@ -27,21 +27,21 @@
|
|||
|
||||
enum LightClusterGridShader_MapSlot {
|
||||
DEFERRED_BUFFER_LINEAR_DEPTH_UNIT = 0,
|
||||
DEFERRED_BUFFER_COLOR_UNIT,
|
||||
DEFERRED_BUFFER_NORMAL_UNIT,
|
||||
DEFERRED_BUFFER_EMISSIVE_UNIT,
|
||||
DEFERRED_BUFFER_DEPTH_UNIT,
|
||||
DEFERRED_BUFFER_COLOR_UNIT = 1,
|
||||
DEFERRED_BUFFER_NORMAL_UNIT = 2,
|
||||
DEFERRED_BUFFER_EMISSIVE_UNIT = 3,
|
||||
DEFERRED_BUFFER_DEPTH_UNIT = 4,
|
||||
};
|
||||
|
||||
enum LightClusterGridShader_BufferSlot {
|
||||
LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT = 0,
|
||||
DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT,
|
||||
CAMERA_CORRECTION_BUFFER_SLOT,
|
||||
DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT =1,
|
||||
CAMERA_CORRECTION_BUFFER_SLOT = 2,
|
||||
LIGHT_GPU_SLOT = render::ShapePipeline::Slot::LIGHT,
|
||||
LIGHT_INDEX_GPU_SLOT,
|
||||
LIGHT_INDEX_GPU_SLOT = 5,
|
||||
|
||||
LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT,
|
||||
LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT,
|
||||
LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT = 6,
|
||||
LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT = 7,
|
||||
};
|
||||
|
||||
FrustumGrid::FrustumGrid(const FrustumGrid& source) :
|
||||
|
|
|
@ -34,7 +34,6 @@ in vec2 _texCoord0;
|
|||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
|
||||
// Grab the fragment data from the uv
|
||||
vec2 texCoord = _texCoord0.st;
|
||||
|
||||
|
@ -49,7 +48,7 @@ void main(void) {
|
|||
// Frag pos in world
|
||||
mat4 invViewMat = getViewInverse();
|
||||
vec4 fragPos = invViewMat * fragPosition;
|
||||
|
||||
|
||||
// From frag world pos find the cluster
|
||||
vec4 clusterEyePos = frustumGrid_worldToEye(fragPos);
|
||||
ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);
|
||||
|
@ -84,8 +83,8 @@ void main(void) {
|
|||
vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);
|
||||
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
|
||||
|
||||
// COmpute the rougness into gloss2 once:
|
||||
float fragGloss2 = pow(frag.roughness + 0.001, 2.0);
|
||||
// Compute the rougness into gloss2 once:
|
||||
float fragGloss2 = pow(frag.roughness + 0.001, 4.0);
|
||||
bool withScattering = (frag.scattering * isScatteringEnabled() > 0.0);
|
||||
|
||||
int numLightTouching = 0;
|
||||
|
|
|
@ -45,6 +45,9 @@ ScriptAudioInjector* AudioScriptingInterface::playSound(SharedSoundPointer sound
|
|||
// stereo option isn't set from script, this comes from sound metadata or filename
|
||||
AudioInjectorOptions optionsCopy = injectorOptions;
|
||||
optionsCopy.stereo = sound->isStereo();
|
||||
optionsCopy.ambisonic = sound->isAmbisonic();
|
||||
optionsCopy.localOnly = optionsCopy.localOnly || sound->isAmbisonic(); // force localOnly when Ambisonic
|
||||
|
||||
auto injector = AudioInjector::playSound(sound->getByteArray(), optionsCopy);
|
||||
if (!injector) {
|
||||
return NULL;
|
||||
|
|
|
@ -52,14 +52,6 @@ void UsersScriptingInterface::disableIgnoreRadius() {
|
|||
DependencyManager::get<NodeList>()->disableIgnoreRadius();
|
||||
}
|
||||
|
||||
void UsersScriptingInterface::setIgnoreRadius(float radius, bool enabled) {
|
||||
DependencyManager::get<NodeList>()->ignoreNodesInRadius(radius, enabled);
|
||||
}
|
||||
|
||||
float UsersScriptingInterface::getIgnoreRadius() {
|
||||
return DependencyManager::get<NodeList>()->getIgnoreRadius();
|
||||
}
|
||||
|
||||
bool UsersScriptingInterface::getIgnoreRadiusEnabled() {
|
||||
return DependencyManager::get<NodeList>()->getIgnoreRadiusEnabled();
|
||||
}
|
||||
|
|
|
@ -76,21 +76,6 @@ public slots:
|
|||
*/
|
||||
void disableIgnoreRadius();
|
||||
|
||||
/**jsdoc
|
||||
* sets the parameters for the ignore radius feature.
|
||||
* @function Users.setIgnoreRadius
|
||||
* @param {number} radius The radius for the auto ignore in radius feature
|
||||
* @param {bool} [enabled=true] Whether the ignore in radius feature should be enabled
|
||||
*/
|
||||
void setIgnoreRadius(float radius, bool enabled = true);
|
||||
|
||||
/**jsdoc
|
||||
* Returns the effective radius of the ingore radius feature if it is enabled.
|
||||
* @function Users.getIgnoreRadius
|
||||
* @return {number} radius of the ignore feature
|
||||
*/
|
||||
float getIgnoreRadius();
|
||||
|
||||
/**jsdoc
|
||||
* Returns `true` if the ignore in radius feature is enabled
|
||||
* @function Users.getIgnoreRadiusEnabled
|
||||
|
@ -101,6 +86,12 @@ public slots:
|
|||
signals:
|
||||
void canKickChanged(bool canKick);
|
||||
void ignoreRadiusEnabledChanged(bool isEnabled);
|
||||
|
||||
/**jsdoc
|
||||
* Notifies scripts that another user has entered the ignore radius
|
||||
* @function Users.enteredIgnoreRadius
|
||||
*/
|
||||
void enteredIgnoreRadius();
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -508,6 +508,11 @@ void AABox::embiggen(const glm::vec3& scale) {
|
|||
_scale *= scale;
|
||||
}
|
||||
|
||||
void AABox::setScaleStayCentered(const glm::vec3& scale) {
|
||||
_corner += -0.5f * scale;
|
||||
_scale = scale;
|
||||
}
|
||||
|
||||
void AABox::scale(float scale) {
|
||||
_corner *= scale;
|
||||
_scale *= scale;
|
||||
|
|
|
@ -96,6 +96,9 @@ public:
|
|||
void embiggen(float scale);
|
||||
void embiggen(const glm::vec3& scale);
|
||||
|
||||
// Set a new scale for the Box, but keep it centered at its current location
|
||||
void setScaleStayCentered(const glm::vec3& scale);
|
||||
|
||||
// Transform the extents with transform
|
||||
void transform(const Transform& transform);
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ namespace hifi { namespace properties {
|
|||
const char* CRASHED = "com.highfidelity.crashed";
|
||||
const char* STEAM = "com.highfidelity.launchedFromSteam";
|
||||
const char* LOGGER = "com.highfidelity.logger";
|
||||
const char* TEST = "com.highfidelity.test";
|
||||
|
||||
namespace gl {
|
||||
const char* BACKEND = "com.highfidelity.gl.backend";
|
||||
|
|
|
@ -15,6 +15,7 @@ namespace hifi { namespace properties {
|
|||
extern const char* CRASHED;
|
||||
extern const char* STEAM;
|
||||
extern const char* LOGGER;
|
||||
extern const char* TEST;
|
||||
|
||||
namespace gl {
|
||||
extern const char* BACKEND;
|
||||
|
|
|
@ -21,6 +21,15 @@ void OculusBaseDisplayPlugin::resetSensors() {
|
|||
}
|
||||
|
||||
bool OculusBaseDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
|
||||
handleOVREvents();
|
||||
if (quitRequested()) {
|
||||
QMetaObject::invokeMethod(qApp, "quit");
|
||||
return false;
|
||||
}
|
||||
if (reorientRequested()) {
|
||||
emit resetSensorsRequested();
|
||||
}
|
||||
|
||||
_currentRenderFrameInfo = FrameInfo();
|
||||
_currentRenderFrameInfo.sensorSampleTime = ovr_GetTimeInSeconds();
|
||||
_currentRenderFrameInfo.predictedDisplayTime = ovr_GetPredictedDisplayTime(_session, frameIndex);
|
||||
|
|
|
@ -20,15 +20,15 @@
|
|||
#include <controllers/Pose.h>
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
using Mutex = std::mutex;
|
||||
using Lock = std::unique_lock<Mutex>;
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(oculus)
|
||||
Q_LOGGING_CATEGORY(oculus, "hifi.plugins.oculus")
|
||||
|
||||
static std::atomic<uint32_t> refCount { 0 };
|
||||
static ovrSession session { nullptr };
|
||||
|
||||
static bool _quitRequested { false };
|
||||
static bool _reorientRequested { false };
|
||||
|
||||
inline ovrErrorInfo getError() {
|
||||
ovrErrorInfo error;
|
||||
ovr_GetLastErrorInfo(&error);
|
||||
|
@ -116,6 +116,26 @@ void releaseOculusSession() {
|
|||
#endif
|
||||
}
|
||||
|
||||
void handleOVREvents() {
|
||||
if (!session) {
|
||||
return;
|
||||
}
|
||||
|
||||
ovrSessionStatus status;
|
||||
if (!OVR_SUCCESS(ovr_GetSessionStatus(session, &status))) {
|
||||
return;
|
||||
}
|
||||
|
||||
_quitRequested = status.ShouldQuit;
|
||||
_reorientRequested = status.ShouldRecenter;
|
||||
}
|
||||
|
||||
bool quitRequested() {
|
||||
return _quitRequested;
|
||||
}
|
||||
bool reorientRequested() {
|
||||
return _reorientRequested;
|
||||
}
|
||||
|
||||
controller::Pose ovrControllerPoseToHandPose(
|
||||
ovrHandType hand,
|
||||
|
|
|
@ -20,6 +20,10 @@ bool oculusAvailable();
|
|||
ovrSession acquireOculusSession();
|
||||
void releaseOculusSession();
|
||||
|
||||
void handleOVREvents();
|
||||
bool quitRequested();
|
||||
bool reorientRequested();
|
||||
|
||||
// Convenience method for looping over each eye with a lambda
|
||||
template <typename Function>
|
||||
inline void ovr_for_each_eye(Function function) {
|
||||
|
|
15
scripts/developer/tests/testTestMode.js
Normal file
15
scripts/developer/tests/testTestMode.js
Normal file
|
@ -0,0 +1,15 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/12/12
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
print("Fooooo");
|
||||
|
||||
Script.setTimeout(function() {
|
||||
Test.quit();
|
||||
}, 10 * 1000);
|
||||
|
||||
|
|
@ -50,4 +50,16 @@ describe('Entity', function() {
|
|||
var props = Entities.getEntityProperties(boxEntity);
|
||||
expect(Math.round(props.position.z)).toEqual(Math.round(newPos.z));
|
||||
});
|
||||
|
||||
it("\'s last edited property working correctly", function() {
|
||||
var props = Entities.getEntityProperties(boxEntity);
|
||||
expect(props.lastEdited).toBeDefined();
|
||||
expect(props.lastEdited).not.toBe(0);
|
||||
var prevLastEdited = props.lastEdited;
|
||||
|
||||
// Now we make an edit to the entity, which should update its last edited time
|
||||
Entities.editEntity(boxEntity, {color: {red: 0, green: 255, blue: 0}});
|
||||
props = Entities.getEntityProperties(boxEntity);
|
||||
expect(props.lastEdited).toBeGreaterThan(prevLastEdited);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,167 @@
|
|||
<html>
|
||||
<head>
|
||||
<title>Photo Booth</title>
|
||||
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
|
||||
<link rel="stylesheet" type="text/css" href="../../../../../system/html/css/edit-style.css">
|
||||
<script type="text/javascript" src="qrc:///qtwebchannel/qwebchannel.js"></script>
|
||||
|
||||
<script>
|
||||
var EventBridge;
|
||||
var openEventBridge = function (callback) {
|
||||
var WebChannel = new QWebChannel(qt.webChannelTransport, function (channel) {
|
||||
EventBridge = WebChannel.objects.eventBridgeWrapper.eventBridge;
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
var emit = function (eventType, data) {
|
||||
data = data || {};
|
||||
data.type = eventType;
|
||||
EventBridge.emitWebEvent(JSON.stringify(data));
|
||||
};
|
||||
|
||||
function loaded () {
|
||||
openEventBridge(function () {
|
||||
emit("onLoad", {value: "faye"});
|
||||
|
||||
var elModelURL = document.getElementById("model-url");
|
||||
var elReloadModelButton = document.getElementById("reload-model-button");
|
||||
var elCamera = document.getElementById("property-camera");
|
||||
//var elLightingPreset = document.getElementById("property-lighting-preset");
|
||||
var elPictureButton = document.getElementById("picture-button");
|
||||
|
||||
elReloadModelButton.addEventListener('click', function() {
|
||||
emit("onClickReloadModelButton", {value: elModelURL.value});
|
||||
});
|
||||
elCamera.addEventListener('change', function() {
|
||||
emit("onSelectCamera", {value: this.value});
|
||||
});
|
||||
// elLightingPreset.addEventListener('change', function() {
|
||||
// emit("onSelectLightingPreset", {value: "faye"});
|
||||
// });
|
||||
elPictureButton.addEventListener('click', function() {
|
||||
emit("onClickPictureButton", {value: "faye"});
|
||||
});
|
||||
|
||||
|
||||
});
|
||||
|
||||
// Drop downs
|
||||
function setDropdownText(dropdown) {
|
||||
var lis = dropdown.parentNode.getElementsByTagName("li");
|
||||
var text = "";
|
||||
for (var i = 0; i < lis.length; i++) {
|
||||
if (lis[i].getAttribute("value") === dropdown.value) {
|
||||
text = lis[i].textContent;
|
||||
}
|
||||
}
|
||||
dropdown.firstChild.textContent = text;
|
||||
}
|
||||
function toggleDropdown(event) {
|
||||
var element = event.target;
|
||||
if (element.nodeName !== "DT") {
|
||||
element = element.parentNode;
|
||||
}
|
||||
element = element.parentNode;
|
||||
var isDropped = element.getAttribute("dropped");
|
||||
element.setAttribute("dropped", isDropped !== "true" ? "true" : "false");
|
||||
}
|
||||
function setDropdownValue(event) {
|
||||
var dt = event.target.parentNode.parentNode.previousSibling;
|
||||
dt.value = event.target.getAttribute("value");
|
||||
dt.firstChild.textContent = event.target.textContent;
|
||||
|
||||
dt.parentNode.setAttribute("dropped", "false");
|
||||
|
||||
var evt = document.createEvent("HTMLEvents");
|
||||
evt.initEvent("change", true, true);
|
||||
dt.dispatchEvent(evt);
|
||||
}
|
||||
|
||||
var elDropdowns = document.getElementsByTagName("select");
|
||||
for (var i = 0; i < elDropdowns.length; i++) {
|
||||
var options = elDropdowns[i].getElementsByTagName("option");
|
||||
var selectedOption = 0;
|
||||
for (var j = 0; j < options.length; j++) {
|
||||
if (options[j].getAttribute("selected") === "selected") {
|
||||
selectedOption = j;
|
||||
}
|
||||
}
|
||||
var div = elDropdowns[i].parentNode;
|
||||
|
||||
var dl = document.createElement("dl");
|
||||
div.appendChild(dl);
|
||||
|
||||
var dt = document.createElement("dt");
|
||||
dt.name = elDropdowns[i].name;
|
||||
dt.id = elDropdowns[i].id;
|
||||
dt.addEventListener("click", toggleDropdown, true);
|
||||
dl.appendChild(dt);
|
||||
|
||||
var span = document.createElement("span");
|
||||
span.setAttribute("value", options[selectedOption].value);
|
||||
span.textContent = options[selectedOption].firstChild.textContent;
|
||||
dt.appendChild(span);
|
||||
|
||||
var span = document.createElement("span");
|
||||
span.textContent = "5"; // caratDn
|
||||
dt.appendChild(span);
|
||||
|
||||
var dd = document.createElement("dd");
|
||||
dl.appendChild(dd);
|
||||
|
||||
var ul = document.createElement("ul");
|
||||
dd.appendChild(ul);
|
||||
|
||||
for (var j = 0; j < options.length; j++) {
|
||||
var li = document.createElement("li");
|
||||
li.setAttribute("value", options[j].value);
|
||||
li.textContent = options[j].firstChild.textContent;
|
||||
li.addEventListener("click", setDropdownValue);
|
||||
ul.appendChild(li);
|
||||
}
|
||||
}
|
||||
elDropdowns = document.getElementsByTagName("select");
|
||||
while (elDropdowns.length > 0) {
|
||||
var el = elDropdowns[0];
|
||||
el.parentNode.removeChild(el);
|
||||
elDropdowns = document.getElementsByTagName("select");
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
<style>
|
||||
</style>
|
||||
</head>
|
||||
<body onload="loaded()">
|
||||
<div id="properties-list">
|
||||
<div class="property url refresh">
|
||||
<label>Model URL</label>
|
||||
<input type="text" id="model-url"></input>
|
||||
<input type="button" id="reload-model-button" class="glyph" value="F">
|
||||
</div>
|
||||
<!--
|
||||
<div class="property dropdown">
|
||||
<label>Lighting Preset</label>
|
||||
<select id="property-lighting-preset">
|
||||
<option>Default Lighting</option>
|
||||
<option>Sam's Cool Light</option>
|
||||
<option>Alan's Light Magic</option>
|
||||
</select>
|
||||
</div>
|
||||
-->
|
||||
<div class="property dropdown">
|
||||
<label>Camera</label>
|
||||
<select id="property-camera">
|
||||
<option>First Person Camera</option>
|
||||
<option>Main Camera</option>
|
||||
<option>Left Camera</option>
|
||||
<option>Right Camera</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="property">
|
||||
<input id="picture-button" type="button" class="blue" value="Take Picture">
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
177
scripts/developer/utilities/render/photobooth/photobooth.js
Normal file
177
scripts/developer/utilities/render/photobooth/photobooth.js
Normal file
|
@ -0,0 +1,177 @@
|
|||
(function () {
|
||||
var SNAPSHOT_DELAY = 500; // 500ms
|
||||
var PHOTOBOOTH_WINDOW_HTML_URL = Script.resolvePath("./html/photobooth.html");
|
||||
var PHOTOBOOTH_SETUP_JSON_URL = Script.resolvePath("./photoboothSetup.json");
|
||||
var toolbar = Toolbars.getToolbar("com.highfidelity.interface.toolbar.system");
|
||||
var MODEL_BOUNDING_BOX_DIMENSIONS = {x: 1.0174,y: 1.1925,z: 1.0165};
|
||||
|
||||
var PhotoBooth = {};
|
||||
PhotoBooth.init = function () {
|
||||
var success = Clipboard.importEntities(PHOTOBOOTH_SETUP_JSON_URL);
|
||||
var frontFactor = 10;
|
||||
var frontUnitVec = Vec3.normalize(Quat.getFront(MyAvatar.orientation));
|
||||
var frontOffset = Vec3.multiply(frontUnitVec,frontFactor);
|
||||
var rightFactor = 3;
|
||||
var rightUnitVec = Vec3.normalize(Quat.getRight(MyAvatar.orientation));
|
||||
var spawnLocation = Vec3.sum(Vec3.sum(MyAvatar.position,frontOffset),rightFactor);
|
||||
if (success) {
|
||||
this.pastedEntityIDs = Clipboard.pasteEntities(spawnLocation);
|
||||
this.processPastedEntities();
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
PhotoBooth.processPastedEntities = function () {
|
||||
var cameraResults = {};
|
||||
var modelResult;
|
||||
var modelPos;
|
||||
this.pastedEntityIDs.forEach(function(id) {
|
||||
var props = Entities.getEntityProperties(id);
|
||||
var parts = props["name"].split(":");
|
||||
if (parts[0] === "Photo Booth Camera") {
|
||||
cameraResults[parts[1]] = id;
|
||||
}
|
||||
if (parts[0] === "Photo Booth Model") {
|
||||
modelResult = id;
|
||||
modelPos = props.position;
|
||||
}
|
||||
});
|
||||
print(JSON.stringify(cameraResults));
|
||||
print(JSON.stringify(modelResult));
|
||||
this.cameraEntities = cameraResults;
|
||||
this.modelEntityID = modelResult;
|
||||
this.centrePos = modelPos;
|
||||
};
|
||||
|
||||
// replace the model in scene with new model
|
||||
PhotoBooth.changeModel = function (newModelURL) {
|
||||
// deletes old model
|
||||
Entities.deleteEntity(this.modelEntityID);
|
||||
// create new model at centre of the photobooth
|
||||
var newProps = {
|
||||
name: "Photo Booth Model",
|
||||
type: "Model",
|
||||
modelURL: newModelURL,
|
||||
position: this.centrePos
|
||||
};
|
||||
var newModelEntityID = Entities.addEntity(newProps);
|
||||
|
||||
// scale model dimensions to fit in bounding box
|
||||
var scaleModel = function () {
|
||||
newProps = Entities.getEntityProperties(newModelEntityID);
|
||||
var myDimensions = newProps.dimensions;
|
||||
print("myDimensions: " + JSON.stringify(myDimensions));
|
||||
var k;
|
||||
if (myDimensions.x > MODEL_BOUNDING_BOX_DIMENSIONS.x) {
|
||||
k = MODEL_BOUNDING_BOX_DIMENSIONS.x / myDimensions.x;
|
||||
myDimensions = Vec3.multiply(k, myDimensions);
|
||||
}
|
||||
if (myDimensions.y > MODEL_BOUNDING_BOX_DIMENSIONS.y) {
|
||||
k = MODEL_BOUNDING_BOX_DIMENSIONS.y / myDimensions.y;
|
||||
myDimensions = Vec3.multiply(k, myDimensions);
|
||||
}
|
||||
if (myDimensions.z > MODEL_BOUNDING_BOX_DIMENSIONS.z) {
|
||||
k = MODEL_BOUNDING_BOX_DIMENSIONS.z / myDimensions.z;
|
||||
myDimensions = Vec3.multiply(k, myDimensions);
|
||||
}
|
||||
// position the new model on the table
|
||||
var y_offset = (MODEL_BOUNDING_BOX_DIMENSIONS.y - myDimensions.y) / 2;
|
||||
var myPosition = Vec3.sum(newProps.position, {x:0, y:-y_offset, z:0});
|
||||
Entities.editEntity(newModelEntityID,{position: myPosition, dimensions: myDimensions});
|
||||
};
|
||||
|
||||
// add a delay before scaling to make sure the entity server have gotten the right model dimensions
|
||||
Script.setTimeout(function () {
|
||||
scaleModel();
|
||||
}, 400);
|
||||
|
||||
this.modelEntityID = newModelEntityID;
|
||||
};
|
||||
|
||||
PhotoBooth.destroy = function () {
|
||||
this.pastedEntityIDs.forEach(function(id) {
|
||||
Entities.deleteEntity(id);
|
||||
});
|
||||
Entities.deleteEntity(this.modelEntityID);
|
||||
};
|
||||
|
||||
var main = function () {
|
||||
PhotoBooth.init();
|
||||
|
||||
var photoboothWindowListener = {};
|
||||
photoboothWindowListener.onLoad = function (event) {
|
||||
print("loaded" + event.value);
|
||||
if (!event.hasOwnProperty("value")){
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
photoboothWindowListener.onSelectCamera = function (event) {
|
||||
print("selected camera " + event.value);
|
||||
if (!event.hasOwnProperty("value")){
|
||||
return;
|
||||
}
|
||||
if (event.value === "First Person Camera") {
|
||||
Camera.mode = "first person";
|
||||
} else {
|
||||
Camera.mode = "entity";
|
||||
var cameraID = PhotoBooth.cameraEntities[event.value];
|
||||
Camera.setCameraEntity(cameraID);
|
||||
}
|
||||
};
|
||||
|
||||
photoboothWindowListener.onSelectLightingPreset = function (event) {
|
||||
print("selected lighting preset" + event.value);
|
||||
if (!event.hasOwnProperty("value")){
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
photoboothWindowListener.onClickPictureButton = function (event) {
|
||||
print("clicked picture button");
|
||||
// hide HUD tool bar
|
||||
toolbar.writeProperty("visible", false);
|
||||
// hide Overlays (such as Running Scripts or other Dialog UI)
|
||||
Menu.setIsOptionChecked("Overlays", false);
|
||||
// hide mouse cursor
|
||||
Reticle.visible = false;
|
||||
// giving a delay here before snapshotting so that there is time to hide toolbar and other UIs
|
||||
// void WindowScriptingInterface::takeSnapshot(bool notify, bool includeAnimated, float aspectRatio)
|
||||
Script.setTimeout(function () {
|
||||
Window.takeSnapshot(false, false, 1.91);
|
||||
// show hidden items after snapshot is taken
|
||||
toolbar.writeProperty("visible", true);
|
||||
Menu.setIsOptionChecked("Overlays", true);
|
||||
// unknown issue: somehow we don't need to reset cursor to visible in script and the mouse still returns after snapshot
|
||||
// Reticle.visible = true;
|
||||
}, SNAPSHOT_DELAY);
|
||||
};
|
||||
|
||||
photoboothWindowListener.onClickReloadModelButton = function (event) {
|
||||
print("clicked reload model button " + event.value);
|
||||
PhotoBooth.changeModel(event.value);
|
||||
};
|
||||
|
||||
var photoboothWindow = new OverlayWebWindow({
|
||||
title: 'Photo Booth',
|
||||
source: PHOTOBOOTH_WINDOW_HTML_URL,
|
||||
width: 450,
|
||||
height: 450,
|
||||
visible: true
|
||||
});
|
||||
|
||||
photoboothWindow.webEventReceived.connect(function (data) {
|
||||
var event = JSON.parse(data);
|
||||
if (photoboothWindowListener[event.type]) {
|
||||
photoboothWindowListener[event.type](event);
|
||||
}
|
||||
});
|
||||
};
|
||||
main();
|
||||
|
||||
function cleanup() {
|
||||
Camera.mode = "first person";
|
||||
PhotoBooth.destroy();
|
||||
}
|
||||
Script.scriptEnding.connect(cleanup);
|
||||
}());
|
|
@ -0,0 +1,434 @@
|
|||
{
|
||||
"Entities": [
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"collisionless": 1,
|
||||
"color": {
|
||||
"blue": 149,
|
||||
"green": 245,
|
||||
"red": 245
|
||||
},
|
||||
"created": "2016-12-10T00:09:18Z",
|
||||
"dimensions": {
|
||||
"x": 0.05000000074505806,
|
||||
"y": 0.05000000074505806,
|
||||
"z": 0.0099999997764825821
|
||||
},
|
||||
"id": "{3925ba89-b94a-4555-97d4-18c217655b73}",
|
||||
"ignoreForCollisions": 1,
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"name": "Photo Booth Camera:Main Camera",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{16f2cec8-2ba0-4567-ac3c-89b2779b1351}",
|
||||
"position": {
|
||||
"x": -0.021826114505529404,
|
||||
"y": -0.25215747952461243,
|
||||
"z": 0.17469465732574463
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 0.2142428457736969,
|
||||
"x": 18.361545562744141,
|
||||
"y": -200.90092468261719,
|
||||
"z": -10.209855079650879
|
||||
},
|
||||
"rotation": {
|
||||
"w": -1.52587890625e-05,
|
||||
"x": -1.52587890625e-05,
|
||||
"y": 1,
|
||||
"z": -1.52587890625e-05
|
||||
},
|
||||
"shape": "Cube",
|
||||
"type": "Box",
|
||||
"visible": 0
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"collisionless": 1,
|
||||
"color": {
|
||||
"blue": 149,
|
||||
"green": 245,
|
||||
"red": 245
|
||||
},
|
||||
"created": "2016-12-10T00:09:18Z",
|
||||
"dimensions": {
|
||||
"x": 0.05000000074505806,
|
||||
"y": 0.05000000074505806,
|
||||
"z": 0.0099999997764825821
|
||||
},
|
||||
"id": "{d2bbcea8-7cdc-4f16-a017-a4567fd82a61}",
|
||||
"ignoreForCollisions": 1,
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"name": "Photo Booth Camera:Left Camera",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{349e0687-52d4-4e0d-a03c-88f5bf427ce8}",
|
||||
"position": {
|
||||
"x": -0.021829158067703247,
|
||||
"y": -0.25214886665344238,
|
||||
"z": 0.17469853162765503
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 0.071414284408092499,
|
||||
"x": 19.931711196899414,
|
||||
"y": -200.49835205078125,
|
||||
"z": -10.340259552001953
|
||||
},
|
||||
"rotation": {
|
||||
"w": -1.52587890625e-05,
|
||||
"x": -7.62939453125e-05,
|
||||
"y": 1,
|
||||
"z": -1.52587890625e-05
|
||||
},
|
||||
"shape": "Cube",
|
||||
"type": "Box",
|
||||
"visible": 0
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"collisionless": 1,
|
||||
"color": {
|
||||
"blue": 149,
|
||||
"green": 245,
|
||||
"red": 245
|
||||
},
|
||||
"created": "2016-12-10T00:09:18Z",
|
||||
"dimensions": {
|
||||
"x": 0.05000000074505806,
|
||||
"y": 0.05000000074505806,
|
||||
"z": 0.0099999997764825821
|
||||
},
|
||||
"id": "{24f1dbc2-6feb-4d01-81f6-9c88d41a7fcc}",
|
||||
"ignoreForCollisions": 1,
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"name": "Photo Booth Camera:Right Camera",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{f81a5b69-6d64-4a79-8233-a4cc182bf9f8}",
|
||||
"position": {
|
||||
"x": -0.022934332489967346,
|
||||
"y": -0.25898283720016479,
|
||||
"z": 0.17889007925987244
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 0.2142428457736969,
|
||||
"x": 17.050743103027344,
|
||||
"y": -199.81195068359375,
|
||||
"z": -9.5248327255249023
|
||||
},
|
||||
"rotation": {
|
||||
"w": -7.62939453125e-05,
|
||||
"x": -1.52587890625e-05,
|
||||
"y": 1,
|
||||
"z": -4.57763671875e-05
|
||||
},
|
||||
"shape": "Cube",
|
||||
"type": "Box",
|
||||
"visible": 0
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"collisionless": 1,
|
||||
"created": "2016-12-10T00:09:18Z",
|
||||
"dimensions": {
|
||||
"x": 0.43360000848770142,
|
||||
"y": 0.65679997205734253,
|
||||
"z": 0.42155000567436218
|
||||
},
|
||||
"gravity": {
|
||||
"x": 0,
|
||||
"y": -9,
|
||||
"z": 0
|
||||
},
|
||||
"id": "{349e0687-52d4-4e0d-a03c-88f5bf427ce8}",
|
||||
"ignoreForCollisions": 1,
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"modelURL": "http://hifi-content.s3.amazonaws.com/caitlyn/production/lazybonesToybox/cameras/35mm%20camera.fbx?232222",
|
||||
"name": "35 MM SLR by Lazybones",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{8da928c7-a1d0-4a2e-b256-047498ddcfe7}",
|
||||
"position": {
|
||||
"x": -1.3280529975891113,
|
||||
"y": -0.65367329120635986,
|
||||
"z": 2.894162654876709
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 2.678412914276123,
|
||||
"x": 18.716829299926758,
|
||||
"y": -201.52906799316406,
|
||||
"z": -11.754700660705566
|
||||
},
|
||||
"rotation": {
|
||||
"w": 0.25635090470314026,
|
||||
"x": 0.016817826777696609,
|
||||
"y": 0.96435952186584473,
|
||||
"z": -0.063177049160003662
|
||||
},
|
||||
"scriptTimestamp": 1479859451129,
|
||||
"shapeType": "simple-hull",
|
||||
"type": "Model",
|
||||
"userData": "{\"grabbableKey\":{\"grabbable\":true},\"wearable\":{\"joints\":{\"LeftHand\":[{\"x\":-0.23937,\"y\":0.334177,\"z\":0.150116},{\"x\":-0.31183,\"y\":0.535888,\"z\":-0.37311,\"w\":-0.69021}],\"RightHand\":[{\"x\":0.11031082272529602,\"y\":0.19449540972709656,\"z\":0.0405043363571167},{\"x\":0.2807741165161133,\"y\":0.6332069635391235,\"z\":0.2997693121433258,\"w\":-0.6557632088661194}]}}}"
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"collisionless": 1,
|
||||
"created": "2016-12-10T00:09:18Z",
|
||||
"dimensions": {
|
||||
"x": 0.43360000848770142,
|
||||
"y": 0.65679997205734253,
|
||||
"z": 0.42160001397132874
|
||||
},
|
||||
"gravity": {
|
||||
"x": 0,
|
||||
"y": -9,
|
||||
"z": 0
|
||||
},
|
||||
"id": "{16f2cec8-2ba0-4567-ac3c-89b2779b1351}",
|
||||
"ignoreForCollisions": 1,
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"modelURL": "http://hifi-content.s3.amazonaws.com/caitlyn/production/lazybonesToybox/cameras/35mm%20camera.fbx?232222",
|
||||
"name": "35 MM SLR by Lazybones",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{8da928c7-a1d0-4a2e-b256-047498ddcfe7}",
|
||||
"position": {
|
||||
"x": 0.23746109008789062,
|
||||
"y": -1.0055081844329834,
|
||||
"z": 2.7560458183288574
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 2.6784837245941162,
|
||||
"x": 17.151266098022461,
|
||||
"y": -201.88088989257812,
|
||||
"z": -11.616677284240723
|
||||
},
|
||||
"rotation": {
|
||||
"w": 4.6566128730773926e-10,
|
||||
"x": 4.6566128730773926e-10,
|
||||
"y": 1,
|
||||
"z": 3.0517578125e-05
|
||||
},
|
||||
"scriptTimestamp": 1479859505510,
|
||||
"shapeType": "simple-hull",
|
||||
"type": "Model",
|
||||
"userData": "{\"grabbableKey\":{\"grabbable\":true},\"wearable\":{\"joints\":{\"LeftHand\":[{\"x\":-0.23937,\"y\":0.334177,\"z\":0.150116},{\"x\":-0.31183,\"y\":0.535888,\"z\":-0.37311,\"w\":-0.69021}],\"RightHand\":[{\"x\":0.11031082272529602,\"y\":0.19449540972709656,\"z\":0.0405043363571167},{\"x\":0.2807741165161133,\"y\":0.6332069635391235,\"z\":0.2997693121433258,\"w\":-0.6557632088661194}]}}}"
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"collisionless": 1,
|
||||
"created": "2016-12-10T00:09:18Z",
|
||||
"dimensions": {
|
||||
"x": 0.43360000848770142,
|
||||
"y": 0.65679997205734253,
|
||||
"z": 0.42155000567436218
|
||||
},
|
||||
"gravity": {
|
||||
"x": 0,
|
||||
"y": -9,
|
||||
"z": 0
|
||||
},
|
||||
"id": "{f81a5b69-6d64-4a79-8233-a4cc182bf9f8}",
|
||||
"ignoreForCollisions": 1,
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"modelURL": "http://hifi-content.s3.amazonaws.com/caitlyn/production/lazybonesToybox/cameras/35mm%20camera.fbx?232222",
|
||||
"name": "35 MM SLR by Lazybones",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{8da928c7-a1d0-4a2e-b256-047498ddcfe7}",
|
||||
"position": {
|
||||
"x": 1.5688023567199707,
|
||||
"y": 0.14506533741950989,
|
||||
"z": 1.930147647857666
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 2.678412914276123,
|
||||
"x": 15.81997013092041,
|
||||
"y": -200.73027038574219,
|
||||
"z": -10.790749549865723
|
||||
},
|
||||
"rotation": {
|
||||
"w": -0.3342345654964447,
|
||||
"x": -0.089916400611400604,
|
||||
"y": 0.90600436925888062,
|
||||
"z": -0.24358996748924255
|
||||
},
|
||||
"scriptTimestamp": 1479859456707,
|
||||
"shapeType": "simple-hull",
|
||||
"type": "Model",
|
||||
"userData": "{\"grabbableKey\":{\"grabbable\":true},\"wearable\":{\"joints\":{\"LeftHand\":[{\"x\":-0.23937,\"y\":0.334177,\"z\":0.150116},{\"x\":-0.31183,\"y\":0.535888,\"z\":-0.37311,\"w\":-0.69021}],\"RightHand\":[{\"x\":0.11031082272529602,\"y\":0.19449540972709656,\"z\":0.0405043363571167},{\"x\":0.2807741165161133,\"y\":0.6332069635391235,\"z\":0.2997693121433258,\"w\":-0.6557632088661194}]}}}"
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"created": "2016-12-10T01:03:54Z",
|
||||
"dimensions": {
|
||||
"x": 1.0173934698104858,
|
||||
"y": 1.1924806833267212,
|
||||
"z": 1.0164999961853027
|
||||
},
|
||||
"id": "{5d8e6ca9-eec6-4cd9-9931-343c8e122f5d}",
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"modelURL": "http://hifi-content.s3.amazonaws.com/alan/dev/Test-Object-7-metal.fbx",
|
||||
"name": "Photo Booth Model",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{8da928c7-a1d0-4a2e-b256-047498ddcfe7}",
|
||||
"position": {
|
||||
"x": 0.19543394446372986,
|
||||
"y": -1.2587889432907104,
|
||||
"z": 0.30047845840454102
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 5.6047611236572266,
|
||||
"x": 15.730070114135742,
|
||||
"y": -201.99229431152344,
|
||||
"z": -10.624255180358887
|
||||
},
|
||||
"rotation": {
|
||||
"w": -1.52587890625e-05,
|
||||
"x": 1.52587890625e-05,
|
||||
"y": 1,
|
||||
"z": 1.52587890625e-05
|
||||
},
|
||||
"type": "Model"
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"created": "2016-12-10T00:50:48Z",
|
||||
"cutoff": 90,
|
||||
"dimensions": {
|
||||
"x": 6.2771282196044922,
|
||||
"y": 6.2771282196044922,
|
||||
"z": 6.2771282196044922
|
||||
},
|
||||
"falloffRadius": 0.5,
|
||||
"id": "{0fe028c8-4a14-482f-acf0-b26beb0224b2}",
|
||||
"intensity": 3,
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"name": "Stage Light 1",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{8da928c7-a1d0-4a2e-b256-047498ddcfe7}",
|
||||
"position": {
|
||||
"x": 1.8010828495025635,
|
||||
"y": -0.796134352684021,
|
||||
"z": -0.071148976683616638
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 32.616912841796875,
|
||||
"x": 0.61834812164306641,
|
||||
"y": -212.75062561035156,
|
||||
"z": -23.758739471435547
|
||||
},
|
||||
"rotation": {
|
||||
"w": 4.6566128730773926e-10,
|
||||
"x": 4.6566128730773926e-10,
|
||||
"y": 1,
|
||||
"z": 3.0517578125e-05
|
||||
},
|
||||
"type": "Light"
|
||||
},
|
||||
{
|
||||
"backgroundMode": "skybox",
|
||||
"clientOnly": 0,
|
||||
"created": "2016-12-10T00:18:02Z",
|
||||
"dimensions": {
|
||||
"x": 48.120223999023438,
|
||||
"y": 16.455753326416016,
|
||||
"z": 26.184453964233398
|
||||
},
|
||||
"id": "{98031a0d-1667-4bd0-8ac2-cf0a869b46f9}",
|
||||
"keyLight": {
|
||||
"ambientIntensity": 0.69999998807907104,
|
||||
"direction": {
|
||||
"x": 0,
|
||||
"y": -0.61566150188446045,
|
||||
"z": 0.78801077604293823
|
||||
},
|
||||
"intensity": 0.30000001192092896
|
||||
},
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"name": "Stage Zone",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{8da928c7-a1d0-4a2e-b256-047498ddcfe7}",
|
||||
"position": {
|
||||
"x": 0.069642722606658936,
|
||||
"y": 3.9073746204376221,
|
||||
"z": 4.0827426910400391
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 171.60348510742188,
|
||||
"x": -67.143234252929688,
|
||||
"y": -277.95611572265625,
|
||||
"z": -97.405723571777344
|
||||
},
|
||||
"rotation": {
|
||||
"w": 4.6566128730773926e-10,
|
||||
"x": 4.6566128730773926e-10,
|
||||
"y": 1,
|
||||
"z": 3.0517578125e-05
|
||||
},
|
||||
"shapeType": "box",
|
||||
"skybox": {
|
||||
"url": "http://hifi-content.s3.amazonaws.com/alan/dev/Skybox-Sun-high.png"
|
||||
},
|
||||
"type": "Zone"
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"created": "2016-12-10T00:11:29Z",
|
||||
"dimensions": {
|
||||
"x": 46.438556671142578,
|
||||
"y": 5.1657028198242188,
|
||||
"z": 9.020604133605957
|
||||
},
|
||||
"id": "{8da928c7-a1d0-4a2e-b256-047498ddcfe7}",
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"modelURL": "http://hifi-content.s3.amazonaws.com/alan/dev/PhotoStage-v1.fbx",
|
||||
"name": "Photo Booth Stage",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"queryAACube": {
|
||||
"scale": 47.587764739990234,
|
||||
"x": -23.793882369995117,
|
||||
"y": -23.793882369995117,
|
||||
"z": -23.793882369995117
|
||||
},
|
||||
"rotation": {
|
||||
"w": -1.52587890625e-05,
|
||||
"x": -1.52587890625e-05,
|
||||
"y": -1,
|
||||
"z": -1.52587890625e-05
|
||||
},
|
||||
"shapeType": "static-mesh",
|
||||
"type": "Model"
|
||||
},
|
||||
{
|
||||
"clientOnly": 0,
|
||||
"created": "2016-12-10T00:50:48Z",
|
||||
"cutoff": 90,
|
||||
"dimensions": {
|
||||
"x": 6.2771282196044922,
|
||||
"y": 6.2771282196044922,
|
||||
"z": 6.2771282196044922
|
||||
},
|
||||
"falloffRadius": 0.5,
|
||||
"id": "{cf16fbc7-0a5b-4850-a03a-40e1499c3c5b}",
|
||||
"intensity": 3,
|
||||
"lastEditedBy": "{42574037-43c0-4446-8d75-56a232fa2da5}",
|
||||
"name": "Stage Light 2",
|
||||
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
|
||||
"parentID": "{8da928c7-a1d0-4a2e-b256-047498ddcfe7}",
|
||||
"position": {
|
||||
"x": -1.7681519985198975,
|
||||
"y": -0.79602539539337158,
|
||||
"z": -0.071040049195289612
|
||||
},
|
||||
"queryAACube": {
|
||||
"scale": 32.616912841796875,
|
||||
"x": 4.1875829696655273,
|
||||
"y": -212.75062561035156,
|
||||
"z": -23.758739471435547
|
||||
},
|
||||
"rotation": {
|
||||
"w": 4.6566128730773926e-10,
|
||||
"x": 4.6566128730773926e-10,
|
||||
"y": 1,
|
||||
"z": 3.0517578125e-05
|
||||
},
|
||||
"type": "Light"
|
||||
}
|
||||
],
|
||||
"Version": 65
|
||||
}
|
BIN
scripts/system/assets/models/bubble-v12.fbx
Normal file
BIN
scripts/system/assets/models/bubble-v12.fbx
Normal file
Binary file not shown.
BIN
scripts/system/assets/sounds/bubble.wav
Normal file
BIN
scripts/system/assets/sounds/bubble.wav
Normal file
Binary file not shown.
93
scripts/system/avatarFinderBeacon.js
Normal file
93
scripts/system/avatarFinderBeacon.js
Normal file
|
@ -0,0 +1,93 @@
|
|||
// avatarFinderBeacon.js
|
||||
//
|
||||
// Created by Thijs Wenker on 12/7/16
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Shows 2km long red beams for each avatar outside of the 20 meter radius of your avatar, tries to ignore AC Agents.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
|
||||
var MIN_DISPLAY_DISTANCE = 20.0; // meters
|
||||
var BEAM_COLOR = {red: 255, green: 0, blue: 0};
|
||||
var SHOW_THROUGH_WALLS = false;
|
||||
var BEACON_LENGTH = 2000.0; // meters
|
||||
var TRY_TO_IGNORE_AC_AGENTS = true;
|
||||
|
||||
var HALF_BEACON_LENGTH = BEACON_LENGTH / 2.0;
|
||||
|
||||
var beacons = {};
|
||||
|
||||
// List of .fst files used by AC scripts, that should be ignored in the script in case TRY_TO_IGNORE_AC_AGENTS is enabled
|
||||
var POSSIBLE_AC_AVATARS = [
|
||||
'http://hifi-content.s3.amazonaws.com/ozan/dev/avatars/invisible_avatar/invisible_avatar.fst',
|
||||
'http://hifi-content.s3.amazonaws.com/ozan/dev/avatars/camera_man/pod/_latest/camera_man_pod.fst'
|
||||
];
|
||||
|
||||
AvatarFinderBeacon = function(avatar) {
|
||||
var visible = false;
|
||||
var avatarSessionUUID = avatar.sessionUUID;
|
||||
this.overlay = Overlays.addOverlay('line3d', {
|
||||
color: BEAM_COLOR,
|
||||
dashed: false,
|
||||
start: Vec3.sum(avatar.position, {x: 0, y: -HALF_BEACON_LENGTH, z: 0}),
|
||||
end: Vec3.sum(avatar.position, {x: 0, y: HALF_BEACON_LENGTH, z: 0}),
|
||||
rotation: {x: 0, y: 0, z: 0, w: 1},
|
||||
visible: visible,
|
||||
drawInFront: SHOW_THROUGH_WALLS,
|
||||
ignoreRayIntersection: true,
|
||||
parentID: avatarSessionUUID,
|
||||
parentJointIndex: -2
|
||||
});
|
||||
this.cleanup = function() {
|
||||
Overlays.deleteOverlay(this.overlay);
|
||||
};
|
||||
this.shouldShow = function() {
|
||||
return Vec3.distance(MyAvatar.position, avatar.position) >= MIN_DISPLAY_DISTANCE;
|
||||
};
|
||||
this.update = function() {
|
||||
avatar = AvatarList.getAvatar(avatarSessionUUID);
|
||||
Overlays.editOverlay(this.overlay, {
|
||||
visible: this.shouldShow()
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
function updateBeacon(avatarSessionUUID) {
|
||||
if (!(avatarSessionUUID in beacons)) {
|
||||
var avatar = AvatarList.getAvatar(avatarSessionUUID);
|
||||
if (TRY_TO_IGNORE_AC_AGENTS
|
||||
&& (POSSIBLE_AC_AVATARS.indexOf(avatar.skeletonModelURL) !== -1 || Vec3.length(avatar.position) === 0.0)) {
|
||||
return;
|
||||
}
|
||||
beacons[avatarSessionUUID] = new AvatarFinderBeacon(avatar);
|
||||
return;
|
||||
}
|
||||
beacons[avatarSessionUUID].update();
|
||||
}
|
||||
|
||||
Window.domainChanged.connect(function () {
|
||||
beacons = {};
|
||||
});
|
||||
|
||||
Script.update.connect(function() {
|
||||
AvatarList.getAvatarIdentifiers().forEach(function(avatarSessionUUID) {
|
||||
updateBeacon(avatarSessionUUID);
|
||||
});
|
||||
});
|
||||
|
||||
AvatarList.avatarRemovedEvent.connect(function(avatarSessionUUID) {
|
||||
if (avatarSessionUUID in beacons) {
|
||||
beacons[avatarSessionUUID].cleanup();
|
||||
delete beacons[avatarSessionUUID];
|
||||
}
|
||||
});
|
||||
|
||||
Script.scriptEnding.connect(function() {
|
||||
for (var sessionUUID in beacons) {
|
||||
if (!beacons.hasOwnProperty(sessionUUID)) {
|
||||
return;
|
||||
}
|
||||
beacons[sessionUUID].cleanup();
|
||||
}
|
||||
});
|
|
@ -13,42 +13,181 @@
|
|||
/* global Toolbars, Script, Users, Overlays, AvatarList, Controller, Camera, getControllerWorldLocation */
|
||||
|
||||
|
||||
(function() { // BEGIN LOCAL_SCOPE
|
||||
(function () { // BEGIN LOCAL_SCOPE
|
||||
|
||||
// grab the toolbar
|
||||
var toolbar = Toolbars.getToolbar("com.highfidelity.interface.toolbar.system");
|
||||
// grab the toolbar
|
||||
var toolbar = Toolbars.getToolbar("com.highfidelity.interface.toolbar.system");
|
||||
// Used for animating and disappearing the bubble
|
||||
var bubbleOverlayTimestamp;
|
||||
// Used for flashing the HUD button upon activation
|
||||
var bubbleButtonFlashState = false;
|
||||
// Used for flashing the HUD button upon activation
|
||||
var bubbleButtonTimestamp;
|
||||
// The bubble model itself
|
||||
var bubbleOverlay = Overlays.addOverlay("model", {
|
||||
url: Script.resolvePath("assets/models/bubble-v12.fbx"), // If you'd like to change the model, modify this line (and the dimensions below)
|
||||
dimensions: { x: 1.0, y: 0.75, z: 1.0 },
|
||||
position: { x: MyAvatar.position.x, y: -MyAvatar.scale * 2 + MyAvatar.position.y + MyAvatar.scale * 0.28, z: MyAvatar.position.z },
|
||||
rotation: Quat.fromPitchYawRollDegrees(MyAvatar.bodyPitch, 0, MyAvatar.bodyRoll),
|
||||
scale: { x: 2, y: MyAvatar.scale * 0.5 + 0.5, z: 2 },
|
||||
visible: false,
|
||||
ignoreRayIntersection: true
|
||||
});
|
||||
// The bubble activation sound
|
||||
var bubbleActivateSound = SoundCache.getSound(Script.resolvePath("assets/sounds/bubble.wav"));
|
||||
// Is the update() function connected?
|
||||
var updateConnected = false;
|
||||
|
||||
var ASSETS_PATH = Script.resolvePath("assets");
|
||||
var TOOLS_PATH = Script.resolvePath("assets/images/tools/");
|
||||
const BUBBLE_VISIBLE_DURATION_MS = 3000;
|
||||
const BUBBLE_RAISE_ANIMATION_DURATION_MS = 750;
|
||||
const BUBBLE_HUD_ICON_FLASH_INTERVAL_MS = 500;
|
||||
|
||||
function buttonImageURL() {
|
||||
return TOOLS_PATH + 'bubble.svg';
|
||||
}
|
||||
var ASSETS_PATH = Script.resolvePath("assets");
|
||||
var TOOLS_PATH = Script.resolvePath("assets/images/tools/");
|
||||
|
||||
function onBubbleToggled() {
|
||||
var bubbleActive = Users.getIgnoreRadiusEnabled();
|
||||
button.writeProperty('buttonState', bubbleActive ? 0 : 1);
|
||||
button.writeProperty('defaultState', bubbleActive ? 0 : 1);
|
||||
button.writeProperty('hoverState', bubbleActive ? 2 : 3);
|
||||
}
|
||||
function buttonImageURL() {
|
||||
return TOOLS_PATH + 'bubble.svg';
|
||||
}
|
||||
|
||||
// setup the mod button and add it to the toolbar
|
||||
var button = toolbar.addButton({
|
||||
objectName: 'bubble',
|
||||
imageURL: buttonImageURL(),
|
||||
visible: true,
|
||||
alpha: 0.9
|
||||
});
|
||||
onBubbleToggled();
|
||||
// Hides the bubble model overlay and resets the button flash state
|
||||
function hideOverlays() {
|
||||
Overlays.editOverlay(bubbleOverlay, {
|
||||
visible: false
|
||||
});
|
||||
bubbleButtonFlashState = false;
|
||||
}
|
||||
|
||||
button.clicked.connect(Users.toggleIgnoreRadius);
|
||||
Users.ignoreRadiusEnabledChanged.connect(onBubbleToggled);
|
||||
// Make the bubble overlay visible, set its position, and play the sound
|
||||
function createOverlays() {
|
||||
Audio.playSound(bubbleActivateSound, {
|
||||
position: { x: MyAvatar.position.x, y: MyAvatar.position.y, z: MyAvatar.position.z },
|
||||
localOnly: true,
|
||||
volume: 0.4
|
||||
});
|
||||
hideOverlays();
|
||||
if (updateConnected === true) {
|
||||
updateConnected = false;
|
||||
Script.update.disconnect(update);
|
||||
}
|
||||
|
||||
// cleanup the toolbar button and overlays when script is stopped
|
||||
Script.scriptEnding.connect(function() {
|
||||
toolbar.removeButton('bubble');
|
||||
button.clicked.disconnect(Users.toggleIgnoreRadius);
|
||||
Users.ignoreRadiusEnabledChanged.disconnect(onBubbleToggled);
|
||||
});
|
||||
Overlays.editOverlay(bubbleOverlay, {
|
||||
position: { x: MyAvatar.position.x, y: -MyAvatar.scale * 2 + MyAvatar.position.y + MyAvatar.scale * 0.28, z: MyAvatar.position.z },
|
||||
rotation: Quat.fromPitchYawRollDegrees(MyAvatar.bodyPitch, 0, MyAvatar.bodyRoll),
|
||||
scale: { x: 2, y: MyAvatar.scale * 0.5 + 0.5, z: 2 },
|
||||
visible: true
|
||||
});
|
||||
bubbleOverlayTimestamp = Date.now();
|
||||
bubbleButtonTimestamp = bubbleOverlayTimestamp;
|
||||
Script.update.connect(update);
|
||||
updateConnected = true;
|
||||
}
|
||||
|
||||
// Called from the C++ scripting interface to show the bubble overlay
|
||||
function enteredIgnoreRadius() {
|
||||
createOverlays();
|
||||
}
|
||||
|
||||
// Used to set the state of the bubble HUD button
|
||||
function writeButtonProperties(parameter) {
|
||||
button.writeProperty('buttonState', parameter ? 0 : 1);
|
||||
button.writeProperty('defaultState', parameter ? 0 : 1);
|
||||
button.writeProperty('hoverState', parameter ? 2 : 3);
|
||||
}
|
||||
|
||||
// The bubble script's update function
|
||||
update = function () {
|
||||
var timestamp = Date.now();
|
||||
var delay = (timestamp - bubbleOverlayTimestamp);
|
||||
var overlayAlpha = 1.0 - (delay / BUBBLE_VISIBLE_DURATION_MS);
|
||||
if (overlayAlpha > 0) {
|
||||
// Flash button
|
||||
if ((timestamp - bubbleButtonTimestamp) >= BUBBLE_VISIBLE_DURATION_MS) {
|
||||
writeButtonProperties(bubbleButtonFlashState);
|
||||
bubbleButtonTimestamp = timestamp;
|
||||
bubbleButtonFlashState = !bubbleButtonFlashState;
|
||||
}
|
||||
|
||||
if (delay < BUBBLE_RAISE_ANIMATION_DURATION_MS) {
|
||||
Overlays.editOverlay(bubbleOverlay, {
|
||||
// Quickly raise the bubble from the ground up
|
||||
position: {
|
||||
x: MyAvatar.position.x,
|
||||
y: (-((BUBBLE_RAISE_ANIMATION_DURATION_MS - delay) / BUBBLE_RAISE_ANIMATION_DURATION_MS)) * MyAvatar.scale * 2 + MyAvatar.position.y + MyAvatar.scale * 0.28,
|
||||
z: MyAvatar.position.z
|
||||
},
|
||||
rotation: Quat.fromPitchYawRollDegrees(MyAvatar.bodyPitch, 0, MyAvatar.bodyRoll),
|
||||
scale: {
|
||||
x: 2,
|
||||
y: ((1 - ((BUBBLE_RAISE_ANIMATION_DURATION_MS - delay) / BUBBLE_RAISE_ANIMATION_DURATION_MS)) * MyAvatar.scale * 0.5 + 0.5),
|
||||
z: 2
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Keep the bubble in place for a couple seconds
|
||||
Overlays.editOverlay(bubbleOverlay, {
|
||||
position: {
|
||||
x: MyAvatar.position.x,
|
||||
y: MyAvatar.position.y + MyAvatar.scale * 0.28,
|
||||
z: MyAvatar.position.z
|
||||
},
|
||||
rotation: Quat.fromPitchYawRollDegrees(MyAvatar.bodyPitch, 0, MyAvatar.bodyRoll),
|
||||
scale: {
|
||||
x: 2,
|
||||
y: MyAvatar.scale * 0.5 + 0.5,
|
||||
z: 2
|
||||
}
|
||||
});
|
||||
}
|
||||
} else {
|
||||
hideOverlays();
|
||||
if (updateConnected === true) {
|
||||
Script.update.disconnect(update);
|
||||
updateConnected = false;
|
||||
}
|
||||
var bubbleActive = Users.getIgnoreRadiusEnabled();
|
||||
writeButtonProperties(bubbleActive);
|
||||
}
|
||||
};
|
||||
|
||||
// When the space bubble is toggled...
|
||||
function onBubbleToggled() {
|
||||
var bubbleActive = Users.getIgnoreRadiusEnabled();
|
||||
writeButtonProperties(bubbleActive);
|
||||
if (bubbleActive) {
|
||||
createOverlays();
|
||||
} else {
|
||||
hideOverlays();
|
||||
if (updateConnected === true) {
|
||||
Script.update.disconnect(update);
|
||||
updateConnected = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Setup the bubble button and add it to the toolbar
|
||||
var button = toolbar.addButton({
|
||||
objectName: 'bubble',
|
||||
imageURL: buttonImageURL(),
|
||||
visible: true,
|
||||
alpha: 0.9
|
||||
});
|
||||
onBubbleToggled();
|
||||
|
||||
button.clicked.connect(Users.toggleIgnoreRadius);
|
||||
Users.ignoreRadiusEnabledChanged.connect(onBubbleToggled);
|
||||
Users.enteredIgnoreRadius.connect(enteredIgnoreRadius);
|
||||
|
||||
// Cleanup the toolbar button and overlays when script is stopped
|
||||
Script.scriptEnding.connect(function () {
|
||||
toolbar.removeButton('bubble');
|
||||
button.clicked.disconnect(Users.toggleIgnoreRadius);
|
||||
Users.ignoreRadiusEnabledChanged.disconnect(onBubbleToggled);
|
||||
Users.enteredIgnoreRadius.disconnect(enteredIgnoreRadius);
|
||||
Overlays.deleteOverlay(bubbleOverlay);
|
||||
bubbleButtonFlashState = false;
|
||||
if (updateConnected === true) {
|
||||
Script.update.disconnect(update);
|
||||
}
|
||||
});
|
||||
|
||||
}()); // END LOCAL_SCOPE
|
||||
|
|
Loading…
Reference in a new issue