diff --git a/README.md b/README.md index 48e0de03af..44bfb94634 100644 --- a/README.md +++ b/README.md @@ -58,9 +58,9 @@ In a new Terminal window, run: Any target can be terminated with Ctrl-C (SIGINT) in the associated Terminal window. -This assignment-client will grab one assignment from the domain-server. You can tell the assignment-client what type you want it to be with the `-t` option. You can also run an assignment-client that forks off *n* assignment-clients with the `-n` option. +This assignment-client will grab one assignment from the domain-server. You can tell the assignment-client what type you want it to be with the `-t` option. You can also run an assignment-client that forks off *n* assignment-clients with the `-n` option. The `-min` and `-max` options allow you to set a range of required assignment-clients, this allows you to have flexibility in the number of assignment-clients that are running. See `--help` for more options. - ./assignment-client -n 4 + ./assignment-client --min 6 --max 20 To test things out you'll want to run the Interface client. diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index e79085244f..0ba83864c4 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -9,6 +9,8 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // +#include "Agent.h" + #include #include #include @@ -46,14 +48,12 @@ #include "RecordingScriptingInterface.h" #include "AbstractAudioInterface.h" -#include "Agent.h" #include "AvatarAudioTimer.h" static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10; Agent::Agent(ReceivedMessage& message) : ThreadedAssignment(message), - _entityEditSender(), _receivedAudioStream(RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES) { DependencyManager::get()->setPacketSender(&_entityEditSender); @@ -68,7 +68,7 @@ Agent::Agent(ReceivedMessage& message) : DependencyManager::set(); DependencyManager::set(); DependencyManager::set(); - DependencyManager::set(); + DependencyManager::set(ScriptEngine::AGENT_SCRIPT); auto& packetReceiver = DependencyManager::get()->getPacketReceiver(); @@ -143,7 +143,7 @@ void Agent::handleAudioPacket(QSharedPointer message) { _receivedAudioStream.clearBuffer(); } -const QString AGENT_LOGGING_NAME = "agent"; +static const QString AGENT_LOGGING_NAME = "agent"; void Agent::run() { @@ -266,6 +266,9 @@ void Agent::handleSelectedAudioFormat(QSharedPointer message) { } void Agent::selectAudioFormat(const QString& selectedCodecName) { + if (_selectedCodecName == selectedCodecName) { + return; + } _selectedCodecName = selectedCodecName; qDebug() << "Selected Codec:" << _selectedCodecName; @@ -321,7 +324,7 @@ void Agent::scriptRequestFinished() { } void Agent::executeScript() { - _scriptEngine = std::unique_ptr(new ScriptEngine(_scriptContents, _payload)); + _scriptEngine = std::unique_ptr(new ScriptEngine(ScriptEngine::AGENT_SCRIPT, _scriptContents, _payload)); _scriptEngine->setParent(this); // be the parent of the script engine so it gets moved when we do // setup an Avatar for the script to use @@ -351,9 +354,15 @@ void Agent::executeScript() { Transform audioTransform; audioTransform.setTranslation(scriptedAvatar->getPosition()); audioTransform.setRotation(scriptedAvatar->getOrientation()); - AbstractAudioInterface::emitAudioPacket(audio.data(), audio.size(), audioSequenceNumber, + QByteArray encodedBuffer; + if (_encoder) { + _encoder->encode(audio, encodedBuffer); + } else { + encodedBuffer = audio; + } + AbstractAudioInterface::emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), audioSequenceNumber, audioTransform, scriptedAvatar->getPosition(), glm::vec3(0), - PacketType::MicrophoneAudioNoEcho); + PacketType::MicrophoneAudioNoEcho, _selectedCodecName); }); auto avatarHashMap = DependencyManager::set(); @@ -376,6 +385,9 @@ void Agent::executeScript() { _scriptEngine->registerGlobalObject("EntityViewer", &_entityViewer); + auto recordingInterface = DependencyManager::get(); + _scriptEngine->registerGlobalObject("Recording", recordingInterface.data()); + // we need to make sure that init has been called for our EntityScriptingInterface // so that it actually has a jurisdiction listener when we ask it for it next entityScriptingInterface->init(); @@ -499,8 +511,8 @@ void Agent::processAgentAvatar() { if (!_scriptEngine->isFinished() && _isAvatar) { auto scriptedAvatar = DependencyManager::get(); - QByteArray avatarByteArray = scriptedAvatar->toByteArray((randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) - ? AvatarData::SendAllData : AvatarData::CullSmallData); + AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData; + QByteArray avatarByteArray = scriptedAvatar->toByteArray(dataDetail, 0, scriptedAvatar->getLastSentJointData()); scriptedAvatar->doneEncoding(true); static AvatarDataSequenceNumber sequenceNumber = 0; diff --git a/assignment-client/src/AssignmentFactory.cpp b/assignment-client/src/AssignmentFactory.cpp index 75d474f566..38eb72649f 100644 --- a/assignment-client/src/AssignmentFactory.cpp +++ b/assignment-client/src/AssignmentFactory.cpp @@ -12,12 +12,13 @@ #include #include "Agent.h" +#include "assets/AssetServer.h" #include "AssignmentFactory.h" #include "audio/AudioMixer.h" #include "avatars/AvatarMixer.h" #include "entities/EntityServer.h" -#include "assets/AssetServer.h" #include "messages/MessagesMixer.h" +#include "scripts/EntityScriptServer.h" ThreadedAssignment* AssignmentFactory::unpackAssignment(ReceivedMessage& message) { @@ -39,7 +40,9 @@ ThreadedAssignment* AssignmentFactory::unpackAssignment(ReceivedMessage& message return new AssetServer(message); case Assignment::MessagesMixerType: return new MessagesMixer(message); + case Assignment::EntityScriptServerType: + return new EntityScriptServer(message); default: - return NULL; + return nullptr; } } diff --git a/assignment-client/src/assets/AssetServer.cpp b/assignment-client/src/assets/AssetServer.cpp index 2fbe2f6dfe..82dd23a9de 100644 --- a/assignment-client/src/assets/AssetServer.cpp +++ b/assignment-client/src/assets/AssetServer.cpp @@ -12,6 +12,8 @@ #include "AssetServer.h" +#include + #include #include #include @@ -21,15 +23,55 @@ #include #include +#include #include #include "NetworkLogging.h" #include "NodeType.h" #include "SendAssetTask.h" #include "UploadAssetTask.h" +#include + +static const uint8_t MIN_CORES_FOR_MULTICORE = 4; +static const uint8_t CPU_AFFINITY_COUNT_HIGH = 2; +static const uint8_t CPU_AFFINITY_COUNT_LOW = 1; +#ifdef Q_OS_WIN +static const int INTERFACE_RUNNING_CHECK_FREQUENCY_MS = 1000; +#endif const QString ASSET_SERVER_LOGGING_TARGET_NAME = "asset-server"; +bool interfaceRunning() { + bool result = false; + +#ifdef Q_OS_WIN + QSharedMemory sharedMemory { getInterfaceSharedMemoryName() }; + result = sharedMemory.attach(QSharedMemory::ReadOnly); + if (result) { + sharedMemory.detach(); + } +#endif + return result; +} + +void updateConsumedCores() { + static bool wasInterfaceRunning = false; + bool isInterfaceRunning = interfaceRunning(); + // If state is unchanged, return early + if (isInterfaceRunning == wasInterfaceRunning) { + return; + } + + wasInterfaceRunning = isInterfaceRunning; + auto coreCount = std::thread::hardware_concurrency(); + if (isInterfaceRunning) { + coreCount = coreCount > MIN_CORES_FOR_MULTICORE ? CPU_AFFINITY_COUNT_HIGH : CPU_AFFINITY_COUNT_LOW; + } + qDebug() << "Setting max consumed cores to " << coreCount; + setMaxCores(coreCount); +} + + AssetServer::AssetServer(ReceivedMessage& message) : ThreadedAssignment(message), _taskPool(this) @@ -45,6 +87,20 @@ AssetServer::AssetServer(ReceivedMessage& message) : packetReceiver.registerListener(PacketType::AssetGetInfo, this, "handleAssetGetInfo"); packetReceiver.registerListener(PacketType::AssetUpload, this, "handleAssetUpload"); packetReceiver.registerListener(PacketType::AssetMappingOperation, this, "handleAssetMappingOperation"); + +#ifdef Q_OS_WIN + updateConsumedCores(); + QTimer* timer = new QTimer(this); + auto timerConnection = connect(timer, &QTimer::timeout, [] { + updateConsumedCores(); + }); + connect(qApp, &QCoreApplication::aboutToQuit, [this, timerConnection] { + disconnect(timerConnection); + }); + timer->setInterval(INTERFACE_RUNNING_CHECK_FREQUENCY_MS); + timer->setTimerType(Qt::CoarseTimer); + timer->start(); +#endif } void AssetServer::run() { @@ -137,7 +193,7 @@ void AssetServer::completeSetup() { cleanupUnmappedFiles(); } - nodeList->addNodeTypeToInterestSet(NodeType::Agent); + nodeList->addSetOfNodeTypesToNodeInterestSet({ NodeType::Agent, NodeType::EntityScriptServer }); } else { qCritical() << "Asset Server assignment will not continue because mapping file could not be loaded."; setFinished(true); diff --git a/assignment-client/src/assets/SendAssetTask.cpp b/assignment-client/src/assets/SendAssetTask.cpp index d2b3c6c256..ca8733d660 100644 --- a/assignment-client/src/assets/SendAssetTask.cpp +++ b/assignment-client/src/assets/SendAssetTask.cpp @@ -21,6 +21,7 @@ #include #include "AssetUtils.h" +#include "ClientServerUtils.h" SendAssetTask::SendAssetTask(QSharedPointer message, const SharedNodePointer& sendToNode, const QDir& resourcesDir) : QRunnable(), diff --git a/assignment-client/src/assets/UploadAssetTask.cpp b/assignment-client/src/assets/UploadAssetTask.cpp index e09619a3cc..7e8e94c34d 100644 --- a/assignment-client/src/assets/UploadAssetTask.cpp +++ b/assignment-client/src/assets/UploadAssetTask.cpp @@ -18,6 +18,8 @@ #include #include +#include "ClientServerUtils.h" + UploadAssetTask::UploadAssetTask(QSharedPointer receivedMessage, SharedNodePointer senderNode, const QDir& resourcesDir) : diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index 19ebd4ea87..04acae6f05 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -28,6 +28,7 @@ #include #include +#include "AudioHelpers.h" #include "AudioRingBuffer.h" #include "AudioMixerClientData.h" #include "AvatarAudioStream.h" @@ -35,9 +36,8 @@ #include "AudioMixer.h" -static const float LOUDNESS_TO_DISTANCE_RATIO = 0.00001f; static const float DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE = 0.5f; // attenuation = -6dB * log2(distance) -static const float DEFAULT_NOISE_MUTING_THRESHOLD = 0.003f; +static const float DEFAULT_NOISE_MUTING_THRESHOLD = 1.0f; static const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer"; static const QString AUDIO_ENV_GROUP_KEY = "audio_env"; static const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer"; @@ -46,9 +46,6 @@ static const QString AUDIO_THREADING_GROUP_KEY = "audio_threading"; int AudioMixer::_numStaticJitterFrames{ -1 }; float AudioMixer::_noiseMutingThreshold{ DEFAULT_NOISE_MUTING_THRESHOLD }; float AudioMixer::_attenuationPerDoublingInDistance{ DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE }; -float AudioMixer::_trailingSleepRatio{ 1.0f }; -float AudioMixer::_performanceThrottlingRatio{ 0.0f }; -float AudioMixer::_minAudibilityThreshold{ LOUDNESS_TO_DISTANCE_RATIO / 2.0f }; QHash AudioMixer::_audioZones; QVector AudioMixer::_zoneSettings; QVector AudioMixer::_zoneReverbSettings; @@ -69,6 +66,7 @@ AudioMixer::AudioMixer(ReceivedMessage& message) : packetReceiver.registerListener(PacketType::NodeMuteRequest, this, "handleNodeMuteRequestPacket"); packetReceiver.registerListener(PacketType::RadiusIgnoreRequest, this, "handleRadiusIgnoreRequestPacket"); packetReceiver.registerListener(PacketType::RequestsDomainListData, this, "handleRequestsDomainListDataPacket"); + packetReceiver.registerListener(PacketType::PerAvatarGainSet, this, "handlePerAvatarGainSetDataPacket"); connect(nodeList.data(), &NodeList::nodeKilled, this, &AudioMixer::handleNodeKilled); } @@ -186,7 +184,8 @@ void AudioMixer::handleNodeKilled(SharedNodePointer killedNode) { nodeList->eachNode([&killedNode](const SharedNodePointer& node) { auto clientData = dynamic_cast(node->getLinkedData()); if (clientData) { - clientData->removeHRTFsForNode(killedNode->getUUID()); + QUuid killedUUID = killedNode->getUUID(); + clientData->removeHRTFsForNode(killedUUID); } }); } @@ -240,6 +239,20 @@ void AudioMixer::handleNodeIgnoreRequestPacket(QSharedPointer p sendingNode->parseIgnoreRequestMessage(packet); } +void AudioMixer::handlePerAvatarGainSetDataPacket(QSharedPointer packet, SharedNodePointer sendingNode) { + auto clientData = dynamic_cast(sendingNode->getLinkedData()); + if (clientData) { + QUuid listeningNodeUUID = sendingNode->getUUID(); + // parse the UUID from the packet + QUuid audioSourceUUID = QUuid::fromRfc4122(packet->readWithoutCopy(NUM_BYTES_RFC4122_UUID)); + uint8_t packedGain; + packet->readPrimitive(&packedGain); + float gain = unpackFloatGainFromByte(packedGain); + clientData->hrtfForStream(audioSourceUUID, QUuid()).setGainAdjustment(gain); + qDebug() << "Setting gain adjustment for hrtf[" << listeningNodeUUID << "][" << audioSourceUUID << "] to " << gain; + } +} + void AudioMixer::handleRadiusIgnoreRequestPacket(QSharedPointer packet, SharedNodePointer sendingNode) { sendingNode->parseIgnoreRadiusRequestMessage(packet); } @@ -277,35 +290,31 @@ void AudioMixer::sendStatsPacket() { // general stats statsObject["useDynamicJitterBuffers"] = _numStaticJitterFrames == -1; - statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f; - statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio; + + statsObject["threads"] = _slavePool.numThreads(); + + statsObject["trailing_mix_ratio"] = _trailingMixRatio; + statsObject["throttling_ratio"] = _throttlingRatio; statsObject["avg_streams_per_frame"] = (float)_stats.sumStreams / (float)_numStatFrames; statsObject["avg_listeners_per_frame"] = (float)_stats.sumListeners / (float)_numStatFrames; // timing stats QJsonObject timingStats; - uint64_t timing, trailing; - _sleepTiming.get(timing, trailing); - timingStats["us_per_sleep"] = (qint64)(timing / _numStatFrames); - timingStats["us_per_sleep_trailing"] = (qint64)(trailing / _numStatFrames); + auto addTiming = [&](Timer& timer, std::string name) { + uint64_t timing, trailing; + timer.get(timing, trailing); + timingStats[("us_per_" + name).c_str()] = (qint64)(timing / _numStatFrames); + timingStats[("us_per_" + name + "_trailing").c_str()] = (qint64)(trailing / _numStatFrames); + }; - _frameTiming.get(timing, trailing); - timingStats["us_per_frame"] = (qint64)(timing / _numStatFrames); - timingStats["us_per_frame_trailing"] = (qint64)(trailing / _numStatFrames); - - _prepareTiming.get(timing, trailing); - timingStats["us_per_prepare"] = (qint64)(timing / _numStatFrames); - timingStats["us_per_prepare_trailing"] = (qint64)(trailing / _numStatFrames); - - _mixTiming.get(timing, trailing); - timingStats["us_per_mix"] = (qint64)(timing / _numStatFrames); - timingStats["us_per_mix_trailing"] = (qint64)(trailing / _numStatFrames); - - _eventsTiming.get(timing, trailing); - timingStats["us_per_events"] = (qint64)(timing / _numStatFrames); - timingStats["us_per_events_trailing"] = (qint64)(trailing / _numStatFrames); + addTiming(_ticTiming, "tic"); + addTiming(_sleepTiming, "sleep"); + addTiming(_frameTiming, "frame"); + addTiming(_prepareTiming, "prepare"); + addTiming(_mixTiming, "mix"); + addTiming(_eventsTiming, "events"); // call it "avg_..." to keep it higher in the display, sorted alphabetically statsObject["avg_timing_stats"] = timingStats; @@ -315,7 +324,7 @@ void AudioMixer::sendStatsPacket() { mixStats["%_hrtf_mixes"] = percentageForMixStats(_stats.hrtfRenders); mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_stats.hrtfSilentRenders); - mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_stats.hrtfStruggleRenders); + mixStats["%_hrtf_throttle_mixes"] = percentageForMixStats(_stats.hrtfThrottleRenders); mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_stats.manualStereoMixes); mixStats["%_manual_echo_mixes"] = percentageForMixStats(_stats.manualEchoMixes); @@ -381,7 +390,7 @@ void AudioMixer::start() { auto nodeList = DependencyManager::get(); // prepare the NodeList - nodeList->addNodeTypeToInterestSet(NodeType::Agent); + nodeList->addSetOfNodeTypesToNodeInterestSet({ NodeType::Agent, NodeType::EntityScriptServer }); nodeList->linkedDataCreateCallback = [&](Node* node) { getOrCreateClientData(node); }; // parse out any AudioMixer settings @@ -391,25 +400,25 @@ void AudioMixer::start() { parseSettingsObject(settingsObject); } - // manageLoad state - auto frameTimestamp = p_high_resolution_clock::time_point::min(); - unsigned int framesSinceManagement = std::numeric_limits::max(); - // mix state unsigned int frame = 1; + auto frameTimestamp = p_high_resolution_clock::now(); while (!_isFinished) { + auto ticTimer = _ticTiming.timer(); + { auto timer = _sleepTiming.timer(); - manageLoad(frameTimestamp, framesSinceManagement); + auto frameDuration = timeFrame(frameTimestamp); + throttle(frameDuration, frame); } - auto timer = _frameTiming.timer(); + auto frameTimer = _frameTiming.timer(); nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) { // prepare frames; pop off any new audio from their streams { - auto timer = _prepareTiming.timer(); + auto prepareTimer = _prepareTiming.timer(); std::for_each(cbegin, cend, [&](const SharedNodePointer& node) { _stats.sumStreams += prepareFrame(node, frame); }); @@ -417,8 +426,8 @@ void AudioMixer::start() { // mix across slave threads { - auto timer = _mixTiming.timer(); - _slavePool.mix(cbegin, cend, frame); + auto mixTimer = _mixTiming.timer(); + _slavePool.mix(cbegin, cend, frame, _throttlingRatio); } }); @@ -433,7 +442,7 @@ void AudioMixer::start() { // play nice with qt event-looping { - auto timer = _eventsTiming.timer(); + auto eventsTimer = _eventsTiming.timer(); // since we're a while loop we need to yield to qt's event processing QCoreApplication::processEvents(); @@ -447,67 +456,68 @@ void AudioMixer::start() { } } -void AudioMixer::manageLoad(p_high_resolution_clock::time_point& frameTimestamp, unsigned int& framesSinceCutoffEvent) { - auto timeToSleep = std::chrono::microseconds(0); +std::chrono::microseconds AudioMixer::timeFrame(p_high_resolution_clock::time_point& timestamp) { + // advance the next frame + auto nextTimestamp = timestamp + std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS); + auto now = p_high_resolution_clock::now(); - // sleep until the next frame, if necessary - { - // advance the next frame - frameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS); - auto now = p_high_resolution_clock::now(); + // compute how long the last frame took + auto duration = std::chrono::duration_cast(now - timestamp); - // calculate sleep - if (frameTimestamp < now) { - frameTimestamp = now; - } else { - timeToSleep = std::chrono::duration_cast(frameTimestamp - now); - std::this_thread::sleep_for(timeToSleep); - } - } + // set the new frame timestamp + timestamp = std::max(now, nextTimestamp); - // manage mixer load - { - const int TRAILING_AVERAGE_FRAMES = 100; - const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES; - const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO; + // sleep until the next frame should start + // WIN32 sleep_until is broken until VS2015 Update 2 + // instead, std::max (above) guarantees that timestamp >= now, so we can sleep_for + std::this_thread::sleep_for(timestamp - now); - const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f; - const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f; + return duration; +} - const float RATIO_BACK_OFF = 0.02f; +void AudioMixer::throttle(std::chrono::microseconds duration, int frame) { + // throttle using a modified proportional-integral controller + const float FRAME_TIME = 10000.0f; + float mixRatio = duration.count() / FRAME_TIME; - _trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) + - // ratio of frame spent sleeping / total frame time - ((CURRENT_FRAME_RATIO * timeToSleep.count()) / (float) AudioConstants::NETWORK_FRAME_USECS); + // constants are determined based on a "regular" 16-CPU EC2 server - bool hasRatioChanged = false; + // target different mix and backoff ratios (they also have different backoff rates) + // this is to prevent oscillation, and encourage throttling to find a steady state + const float TARGET = 0.9f; + // on a "regular" machine with 100 avatars, this is the largest value where + // - overthrottling can be recovered + // - oscillations will not occur after the recovery + const float BACKOFF_TARGET = 0.44f; - if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) { - if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) { - qDebug() << "Mixer is struggling"; - // change our min required loudness to reduce some load - _performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio)); - hasRatioChanged = true; - } else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) { - qDebug() << "Mixer is recovering"; - // back off the required loudness - _performanceThrottlingRatio = std::max(0.0f, _performanceThrottlingRatio - RATIO_BACK_OFF); - hasRatioChanged = true; - } + // the mixer is known to struggle at about 80 on a "regular" machine + // so throttle 2/80 the streams to ensure smooth audio (throttling is linear) + const float THROTTLE_RATE = 2 / 80.0f; + const float BACKOFF_RATE = THROTTLE_RATE / 4; - if (hasRatioChanged) { - // set out min audability threshold from the new ratio - _minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio)); - framesSinceCutoffEvent = 0; + // recovery should be bounded so that large changes in user count is a tolerable experience + // throttling is linear, so most cases will not need a full recovery + const int RECOVERY_TIME = 180; - qDebug() << "Sleeping" << _trailingSleepRatio << "of frame"; - qDebug() << "Cutoff is" << _performanceThrottlingRatio; - qDebug() << "Minimum audibility to be mixed is" << _minAudibilityThreshold; - } - } + // weight more recent frames to determine if throttling is necessary, + const int TRAILING_FRAMES = (int)(100 * RECOVERY_TIME * BACKOFF_RATE); + const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_FRAMES; + const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO; + _trailingMixRatio = PREVIOUS_FRAMES_RATIO * _trailingMixRatio + CURRENT_FRAME_RATIO * mixRatio; - if (!hasRatioChanged) { - ++framesSinceCutoffEvent; + if (frame % TRAILING_FRAMES == 0) { + if (_trailingMixRatio > TARGET) { + int proportionalTerm = 1 + (_trailingMixRatio - TARGET) / 0.1f; + _throttlingRatio += THROTTLE_RATE * proportionalTerm; + _throttlingRatio = std::min(_throttlingRatio, 1.0f); + qDebug("audio-mixer is struggling (%f mix/sleep) - throttling %f of streams", + (double)_trailingMixRatio, (double)_throttlingRatio); + } else if (_throttlingRatio > 0.0f && _trailingMixRatio <= BACKOFF_TARGET) { + int proportionalTerm = 1 + (TARGET - _trailingMixRatio) / 0.2f; + _throttlingRatio -= BACKOFF_RATE * proportionalTerm; + _throttlingRatio = std::max(_throttlingRatio, 0.0f); + qDebug("audio-mixer is recovering (%f mix/sleep) - throttling %f of streams", + (double)_trailingMixRatio, (double)_throttlingRatio); } } } diff --git a/assignment-client/src/audio/AudioMixer.h b/assignment-client/src/audio/AudioMixer.h index d9759653fb..f9c4252ecf 100644 --- a/assignment-client/src/audio/AudioMixer.h +++ b/assignment-client/src/audio/AudioMixer.h @@ -46,7 +46,6 @@ public: static int getStaticJitterFrames() { return _numStaticJitterFrames; } static bool shouldMute(float quietestFrame) { return quietestFrame > _noiseMutingThreshold; } static float getAttenuationPerDoublingInDistance() { return _attenuationPerDoublingInDistance; } - static float getMinimumAudibilityThreshold() { return _performanceThrottlingRatio > 0.0f ? _minAudibilityThreshold : 0.0f; } static const QHash& getAudioZones() { return _audioZones; } static const QVector& getZoneSettings() { return _zoneSettings; } static const QVector& getReverbSettings() { return _zoneReverbSettings; } @@ -66,14 +65,15 @@ private slots: void handleRadiusIgnoreRequestPacket(QSharedPointer packet, SharedNodePointer sendingNode); void handleKillAvatarPacket(QSharedPointer packet, SharedNodePointer sendingNode); void handleNodeMuteRequestPacket(QSharedPointer packet, SharedNodePointer sendingNode); + void handlePerAvatarGainSetDataPacket(QSharedPointer packet, SharedNodePointer sendingNode); void start(); void removeHRTFsForFinishedInjector(const QUuid& streamID); private: // mixing helpers - // check and maybe throttle mixer load by changing audibility threshold - void manageLoad(p_high_resolution_clock::time_point& frameTimestamp, unsigned int& framesSinceManagement); + std::chrono::microseconds timeFrame(p_high_resolution_clock::time_point& timestamp); + void throttle(std::chrono::microseconds frameDuration, int frame); // pop a frame from any streams on the node // returns the number of available streams int prepareFrame(const SharedNodePointer& node, unsigned int frame); @@ -84,6 +84,9 @@ private: void parseSettingsObject(const QJsonObject& settingsObject); + float _trailingMixRatio { 0.0f }; + float _throttlingRatio { 0.0f }; + int _numStatFrames { 0 }; AudioMixerStats _stats; @@ -112,6 +115,7 @@ private: uint64_t _history[TIMER_TRAILING_SECONDS] {}; int _index { 0 }; }; + Timer _ticTiming; Timer _sleepTiming; Timer _frameTiming; Timer _prepareTiming; @@ -121,9 +125,6 @@ private: static int _numStaticJitterFrames; // -1 denotes dynamic jitter buffering static float _noiseMutingThreshold; static float _attenuationPerDoublingInDistance; - static float _trailingSleepRatio; - static float _performanceThrottlingRatio; - static float _minAudibilityThreshold; static QHash _audioZones; static QVector _zoneSettings; static QVector _zoneReverbSettings; diff --git a/assignment-client/src/audio/AudioMixerSlave.cpp b/assignment-client/src/audio/AudioMixerSlave.cpp index 28d3358eb5..4b02ca1567 100644 --- a/assignment-client/src/audio/AudioMixerSlave.cpp +++ b/assignment-client/src/audio/AudioMixerSlave.cpp @@ -36,6 +36,292 @@ #include "AudioMixerSlave.h" +using AudioStreamMap = AudioMixerClientData::AudioStreamMap; + +// packet helpers +std::unique_ptr createAudioPacket(PacketType type, int size, quint16 sequence, QString codec); +void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QByteArray& buffer); +void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data); +void sendMutePacket(const SharedNodePointer& node, AudioMixerClientData&); +void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data); + +// mix helpers +bool shouldIgnoreNode(const SharedNodePointer& listener, const SharedNodePointer& node); +float gainForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, + const glm::vec3& relativePosition, bool isEcho); +float azimuthForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, + const glm::vec3& relativePosition); + +void AudioMixerSlave::configure(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio) { + _begin = begin; + _end = end; + _frame = frame; + _throttlingRatio = throttlingRatio; +} + +void AudioMixerSlave::mix(const SharedNodePointer& node) { + // check that the node is valid + AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData(); + if (data == nullptr) { + return; + } + + // check that the stream is valid + auto avatarStream = data->getAvatarAudioStream(); + if (avatarStream == nullptr) { + return; + } + + // send mute packet, if necessary + if (AudioMixer::shouldMute(avatarStream->getQuietestFrameLoudness()) || data->shouldMuteClient()) { + sendMutePacket(node, *data); + } + + // send audio packets, if necessary + if (node->getType() == NodeType::Agent && node->getActiveSocket()) { + ++stats.sumListeners; + + // mix the audio + bool mixHasAudio = prepareMix(node); + + // send audio packet + if (mixHasAudio || data->shouldFlushEncoder()) { + QByteArray encodedBuffer; + if (mixHasAudio) { + // encode the audio + QByteArray decodedBuffer(reinterpret_cast(_bufferSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO); + data->encode(decodedBuffer, encodedBuffer); + } else { + // time to flush (resets shouldFlush until the next encode) + data->encodeFrameOfZeros(encodedBuffer); + } + + sendMixPacket(node, *data, encodedBuffer); + } else { + sendSilentPacket(node, *data); + } + + // send environment packet + sendEnvironmentPacket(node, *data); + + // send stats packet (about every second) + const unsigned int NUM_FRAMES_PER_SEC = (int)ceil(AudioConstants::NETWORK_FRAMES_PER_SEC); + if (data->shouldSendStats(_frame % NUM_FRAMES_PER_SEC)) { + data->sendAudioStreamStatsPackets(node); + } + } +} + +bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) { + AvatarAudioStream* listenerAudioStream = static_cast(listener->getLinkedData())->getAvatarAudioStream(); + AudioMixerClientData* listenerData = static_cast(listener->getLinkedData()); + + // zero out the mix for this listener + memset(_mixSamples, 0, sizeof(_mixSamples)); + + bool isThrottling = _throttlingRatio > 0.0f; + std::vector> throttledNodes; + + typedef void (AudioMixerSlave::*MixFunctor)( + AudioMixerClientData&, const QUuid&, const AvatarAudioStream&, const PositionalAudioStream&); + auto allStreams = [&](const SharedNodePointer& node, MixFunctor mixFunctor) { + AudioMixerClientData* nodeData = static_cast(node->getLinkedData()); + for (auto& streamPair : nodeData->getAudioStreams()) { + auto nodeStream = streamPair.second; + (this->*mixFunctor)(*listenerData, node->getUUID(), *listenerAudioStream, *nodeStream); + } + }; + + std::for_each(_begin, _end, [&](const SharedNodePointer& node) { + if (*node == *listener) { + AudioMixerClientData* nodeData = static_cast(node->getLinkedData()); + + // only mix the echo, if requested + for (auto& streamPair : nodeData->getAudioStreams()) { + auto nodeStream = streamPair.second; + if (nodeStream->shouldLoopbackForNode()) { + mixStream(*listenerData, node->getUUID(), *listenerAudioStream, *nodeStream); + } + } + } else if (!shouldIgnoreNode(listener, node)) { + if (!isThrottling) { + allStreams(node, &AudioMixerSlave::mixStream); + } else { + AudioMixerClientData* nodeData = static_cast(node->getLinkedData()); + + // compute the node's max relative volume + float nodeVolume; + for (auto& streamPair : nodeData->getAudioStreams()) { + auto nodeStream = streamPair.second; + float distance = glm::length(nodeStream->getPosition() - listenerAudioStream->getPosition()); + nodeVolume = std::max(nodeStream->getLastPopOutputTrailingLoudness() / distance, nodeVolume); + } + + // max-heapify the nodes by relative volume + throttledNodes.push_back(std::make_pair(nodeVolume, node)); + if (!throttledNodes.empty()) { + std::push_heap(throttledNodes.begin(), throttledNodes.end()); + } + } + } + }); + + if (isThrottling) { + // pop the loudest nodes off the heap and mix their streams + int numToRetain = (int)(std::distance(_begin, _end) * (1 - _throttlingRatio)); + for (int i = 0; i < numToRetain; i++) { + if (throttledNodes.empty()) { + break; + } + + std::pop_heap(throttledNodes.begin(), throttledNodes.end()); + + auto& node = throttledNodes.back().second; + allStreams(node, &AudioMixerSlave::mixStream); + + throttledNodes.pop_back(); + } + + // throttle the remaining nodes' streams + for (const std::pair& nodePair : throttledNodes) { + auto& node = nodePair.second; + allStreams(node, &AudioMixerSlave::throttleStream); + } + } + + // use the per listener AudioLimiter to render the mixed data... + listenerData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); + + // check for silent audio after the peak limiter has converted the samples + bool hasAudio = false; + for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) { + if (_bufferSamples[i] != 0) { + hasAudio = true; + break; + } + } + return hasAudio; +} + +void AudioMixerSlave::throttleStream(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID, + const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) { + addStream(listenerNodeData, sourceNodeID, listeningNodeStream, streamToAdd, true); +} + +void AudioMixerSlave::mixStream(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID, + const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) { + addStream(listenerNodeData, sourceNodeID, listeningNodeStream, streamToAdd, false); +} + +void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID, + const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, + bool throttle) { + ++stats.totalMixes; + + // to reduce artifacts we call the HRTF functor for every source, even if throttled or silent + // this ensures the correct tail from last mixed block and the correct spatialization of next first block + + // check if this is a server echo of a source back to itself + bool isEcho = (&streamToAdd == &listeningNodeStream); + + glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition(); + + float distance = glm::max(glm::length(relativePosition), EPSILON); + float gain = gainForSource(listeningNodeStream, streamToAdd, relativePosition, isEcho); + float azimuth = isEcho ? 0.0f : azimuthForSource(listeningNodeStream, listeningNodeStream, relativePosition); + static const int HRTF_DATASET_INDEX = 1; + + if (!streamToAdd.lastPopSucceeded()) { + bool forceSilentBlock = true; + + if (!streamToAdd.getLastPopOutput().isNull()) { + bool isInjector = dynamic_cast(&streamToAdd); + + // in an injector, just go silent - the injector has likely ended + // in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence + if (!isInjector) { + // calculate its fade factor, which depends on how many times it's already been repeated. + float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1); + if (fadeFactor > 0.0f) { + // apply the fadeFactor to the gain + gain *= fadeFactor; + forceSilentBlock = false; + } + } + } + + if (forceSilentBlock) { + // call renderSilent with a forced silent block to reduce artifacts + // (this is not done for stereo streams since they do not go through the HRTF) + if (!streamToAdd.isStereo() && !isEcho) { + // get the existing listener-source HRTF object, or create a new one + auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); + + static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {}; + hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, + AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); + + ++stats.hrtfSilentRenders; + } + + return; + } + } + + // grab the stream from the ring buffer + AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput(); + + // stereo sources are not passed through HRTF + if (streamToAdd.isStereo()) { + for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) { + _mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE); + } + + ++stats.manualStereoMixes; + return; + } + + // echo sources are not passed through HRTF + if (isEcho) { + for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) { + auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE); + _mixSamples[i] += monoSample; + _mixSamples[i + 1] += monoSample; + } + + ++stats.manualEchoMixes; + return; + } + + // get the existing listener-source HRTF object, or create a new one + auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); + + streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); + + if (streamToAdd.getLastPopOutputLoudness() == 0.0f) { + // call renderSilent to reduce artifacts + hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, + AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); + + ++stats.hrtfSilentRenders; + return; + } + + if (throttle) { + // call renderSilent with actual frame data and a gain of 0.0f to reduce artifacts + hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f, + AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); + + ++stats.hrtfThrottleRenders; + return; + } + + hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, + AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); + + ++stats.hrtfRenders; +} + std::unique_ptr createAudioPacket(PacketType type, int size, quint16 sequence, QString codec) { auto audioPacket = NLPacket::create(type, size); audioPacket->writePrimitive(sequence); @@ -73,6 +359,14 @@ void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data) data.incrementOutgoingMixedAudioSequenceNumber(); } +void sendMutePacket(const SharedNodePointer& node, AudioMixerClientData& data) { + auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0); + DependencyManager::get()->sendPacket(std::move(mutePacket), *node); + + // probably now we just reset the flag, once should do it (?) + data.setShouldMuteClient(false); +} + void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) { bool hasReverb = false; float reverbTime, wetLevel; @@ -134,285 +428,54 @@ void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& } } -void AudioMixerSlave::configure(ConstIter begin, ConstIter end, unsigned int frame) { - _begin = begin; - _end = end; - _frame = frame; -} - -void AudioMixerSlave::mix(const SharedNodePointer& node) { - // check that the node is valid - AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData(); - if (data == nullptr) { - return; - } - - auto avatarStream = data->getAvatarAudioStream(); - if (avatarStream == nullptr) { - return; - } - - // send mute packet, if necessary - if (AudioMixer::shouldMute(avatarStream->getQuietestFrameLoudness()) || data->shouldMuteClient()) { - auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0); - DependencyManager::get()->sendPacket(std::move(mutePacket), *node); - - // probably now we just reset the flag, once should do it (?) - data->setShouldMuteClient(false); - } - - // send audio packets, if necessary - if (node->getType() == NodeType::Agent && node->getActiveSocket()) { - ++stats.sumListeners; - - // mix the audio - bool mixHasAudio = prepareMix(node); - - // send audio packet - if (mixHasAudio || data->shouldFlushEncoder()) { - // encode the audio - QByteArray encodedBuffer; - if (mixHasAudio) { - QByteArray decodedBuffer(reinterpret_cast(_bufferSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO); - data->encode(decodedBuffer, encodedBuffer); - } else { - // time to flush, which resets the shouldFlush until next time we encode something - data->encodeFrameOfZeros(encodedBuffer); - } - - sendMixPacket(node, *data, encodedBuffer); - } else { - sendSilentPacket(node, *data); - } - - // send environment packet - sendEnvironmentPacket(node, *data); - - // send stats packet (about every second) - static const unsigned int NUM_FRAMES_PER_SEC = (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC); - if (data->shouldSendStats(_frame % NUM_FRAMES_PER_SEC)) { - data->sendAudioStreamStatsPackets(node); - } - } -} - -bool AudioMixerSlave::prepareMix(const SharedNodePointer& node) { - AvatarAudioStream* nodeAudioStream = static_cast(node->getLinkedData())->getAvatarAudioStream(); +bool shouldIgnoreNode(const SharedNodePointer& listener, const SharedNodePointer& node) { + AudioMixerClientData* listenerData = static_cast(listener->getLinkedData()); AudioMixerClientData* nodeData = static_cast(node->getLinkedData()); - // zero out the client mix for this node - memset(_mixSamples, 0, sizeof(_mixSamples)); + // when this is true, the AudioMixer will send Audio data to a client about avatars that have ignored them + bool getsAnyIgnored = listenerData->getRequestsDomainListData() && listener->getCanKick(); - // loop through all other nodes that have sufficient audio to mix - std::for_each(_begin, _end, [&](const SharedNodePointer& otherNode){ - // make sure that we have audio data for this other node - // and that it isn't being ignored by our listening node - // and that it isn't ignoring our listening node - AudioMixerClientData* otherData = static_cast(otherNode->getLinkedData()); + bool ignore = true; - // When this is true, the AudioMixer will send Audio data to a client about avatars that have ignored them - bool getsAnyIgnored = nodeData->getRequestsDomainListData() && node->getCanKick(); + if (nodeData && + // make sure that it isn't being ignored by our listening node + (!listener->isIgnoringNodeWithID(node->getUUID()) || (nodeData->getRequestsDomainListData() && node->getCanKick())) && + // and that it isn't ignoring our listening node + (!node->isIgnoringNodeWithID(listener->getUUID()) || getsAnyIgnored)) { - if (otherData - && (!node->isIgnoringNodeWithID(otherNode->getUUID()) || (otherData->getRequestsDomainListData() && otherNode->getCanKick())) - && (!otherNode->isIgnoringNodeWithID(node->getUUID()) || getsAnyIgnored)) { + // is either node enabling the space bubble / ignore radius? + if ((listener->isIgnoreRadiusEnabled() || node->isIgnoreRadiusEnabled())) { + // define the minimum bubble size + static const glm::vec3 minBubbleSize = glm::vec3(0.3f, 1.3f, 0.3f); - // check to see if we're ignoring in radius - bool insideIgnoreRadius = false; - // If the otherNode equals the node, we're doing a comparison on ourselves - if (*otherNode == *node) { - // We'll always be inside the radius in that case. - insideIgnoreRadius = true; - // Check to see if the space bubble is enabled - } else if ((node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled())) { - // Define the minimum bubble size - static const glm::vec3 minBubbleSize = glm::vec3(0.3f, 1.3f, 0.3f); - AudioMixerClientData* nodeData = reinterpret_cast(node->getLinkedData()); - // Set up the bounding box for the current node - AABox nodeBox(nodeData->getAvatarBoundingBoxCorner(), nodeData->getAvatarBoundingBoxScale()); - // Clamp the size of the bounding box to a minimum scale - if (glm::any(glm::lessThan(nodeData->getAvatarBoundingBoxScale(), minBubbleSize))) { - nodeBox.setScaleStayCentered(minBubbleSize); - } - // Set up the bounding box for the current other node - AABox otherNodeBox(otherData->getAvatarBoundingBoxCorner(), otherData->getAvatarBoundingBoxScale()); - // Clamp the size of the bounding box to a minimum scale - if (glm::any(glm::lessThan(otherData->getAvatarBoundingBoxScale(), minBubbleSize))) { - otherNodeBox.setScaleStayCentered(minBubbleSize); - } - // Quadruple the scale of both bounding boxes - nodeBox.embiggen(4.0f); - otherNodeBox.embiggen(4.0f); - - // Perform the collision check between the two bounding boxes - if (nodeBox.touches(otherNodeBox)) { - insideIgnoreRadius = true; - } + // set up the bounding box for the listener + AABox listenerBox(listenerData->getAvatarBoundingBoxCorner(), listenerData->getAvatarBoundingBoxScale()); + if (glm::any(glm::lessThan(listenerData->getAvatarBoundingBoxScale(), minBubbleSize))) { + listenerBox.setScaleStayCentered(minBubbleSize); } - // Enumerate the audio streams attached to the otherNode - auto streamsCopy = otherData->getAudioStreams(); - for (auto& streamPair : streamsCopy) { - auto otherNodeStream = streamPair.second; - bool isSelfWithEcho = ((*otherNode == *node) && (otherNodeStream->shouldLoopbackForNode())); - // Add all audio streams that should be added to the mix - if (isSelfWithEcho || (!isSelfWithEcho && !insideIgnoreRadius)) { - addStreamToMix(*nodeData, otherNode->getUUID(), *nodeAudioStream, *otherNodeStream); - } - } - } - }); - - // use the per listener AudioLimiter to render the mixed data... - nodeData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); - - // check for silent audio after the peak limiter has converted the samples - bool hasAudio = false; - for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) { - if (_bufferSamples[i] != 0) { - hasAudio = true; - break; - } - } - return hasAudio; -} - -void AudioMixerSlave::addStreamToMix(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID, - const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) { - // to reduce artifacts we calculate the gain and azimuth for every source for this listener - // even if we are not going to end up mixing in this source - - ++stats.totalMixes; - - // this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct - - // check if this is a server echo of a source back to itself - bool isEcho = (&streamToAdd == &listeningNodeStream); - - glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition(); - - // figure out the distance between source and listener - float distance = glm::max(glm::length(relativePosition), EPSILON); - - // figure out the gain for this source at the listener - float gain = gainForSource(listeningNodeStream, streamToAdd, relativePosition, isEcho); - - // figure out the azimuth to this source at the listener - float azimuth = isEcho ? 0.0f : azimuthForSource(listeningNodeStream, listeningNodeStream, relativePosition); - - float repeatedFrameFadeFactor = 1.0f; - - static const int HRTF_DATASET_INDEX = 1; - - if (!streamToAdd.lastPopSucceeded()) { - bool forceSilentBlock = true; - - if (!streamToAdd.getLastPopOutput().isNull()) { - bool isInjector = dynamic_cast(&streamToAdd); - - // in an injector, just go silent - the injector has likely ended - // in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence - - // we'll repeat the last block until it has a block to mix - // and we'll gradually fade that repeated block into silence. - - // calculate its fade factor, which depends on how many times it's already been repeated. - repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1); - if (!isInjector && repeatedFrameFadeFactor > 0.0f) { - // apply the repeatedFrameFadeFactor to the gain - gain *= repeatedFrameFadeFactor; - - forceSilentBlock = false; - } - } - - if (forceSilentBlock) { - // we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled - // in this case we will call renderSilent with a forced silent block - // this ensures the correct tail from the previously mixed block and the correct spatialization of first block - // of any upcoming audio - - if (!streamToAdd.isStereo() && !isEcho) { - // get the existing listener-source HRTF object, or create a new one - auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); - - // this is not done for stereo streams since they do not go through the HRTF - static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {}; - hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, - AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); - - ++stats.hrtfSilentRenders; + // set up the bounding box for the node + AABox nodeBox(nodeData->getAvatarBoundingBoxCorner(), nodeData->getAvatarBoundingBoxScale()); + // Clamp the size of the bounding box to a minimum scale + if (glm::any(glm::lessThan(nodeData->getAvatarBoundingBoxScale(), minBubbleSize))) { + nodeBox.setScaleStayCentered(minBubbleSize); } - return; - } - } + // quadruple the scale of both bounding boxes + listenerBox.embiggen(4.0f); + nodeBox.embiggen(4.0f); - // grab the stream from the ring buffer - AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput(); - - if (streamToAdd.isStereo() || isEcho) { - // this is a stereo source or server echo so we do not pass it through the HRTF - // simply apply our calculated gain to each sample - if (streamToAdd.isStereo()) { - for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) { - _mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE); - } - - ++stats.manualStereoMixes; + // perform the collision check between the two bounding boxes + ignore = listenerBox.touches(nodeBox); } else { - for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) { - auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE); - _mixSamples[i] += monoSample; - _mixSamples[i + 1] += monoSample; - } - - ++stats.manualEchoMixes; + ignore = false; } - - return; } - // get the existing listener-source HRTF object, or create a new one - auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier()); - - streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); - - // if the frame we're about to mix is silent, simply call render silent and move on - if (streamToAdd.getLastPopOutputLoudness() == 0.0f) { - // silent frame from source - - // we still need to call renderSilent via the HRTF for mono source - hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, - AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); - - ++stats.hrtfSilentRenders; - - return; - } - - float audibilityThreshold = AudioMixer::getMinimumAudibilityThreshold(); - if (audibilityThreshold > 0.0f && - streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= audibilityThreshold) { - // the mixer is struggling so we're going to drop off some streams - - // we call renderSilent via the HRTF with the actual frame data and a gain of 0.0 - hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f, - AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); - - ++stats.hrtfStruggleRenders; - - return; - } - - ++stats.hrtfRenders; - - // mono stream, call the HRTF with our block and calculated azimuth and gain - hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain, - AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); + return ignore; } -float AudioMixerSlave::gainForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, +float gainForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) { float gain = 1.0f; @@ -472,7 +535,7 @@ float AudioMixerSlave::gainForSource(const AvatarAudioStream& listeningNodeStrea return gain; } -float AudioMixerSlave::azimuthForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, +float azimuthForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition) { glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation()); @@ -482,7 +545,7 @@ float AudioMixerSlave::azimuthForSource(const AvatarAudioStream& listeningNodeSt // project the rotated source position vector onto the XZ plane rotatedSourcePosition.y = 0.0f; - static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f; + const float SOURCE_DISTANCE_THRESHOLD = 1e-30f; if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) { // produce an oriented angle about the y-axis diff --git a/assignment-client/src/audio/AudioMixerSlave.h b/assignment-client/src/audio/AudioMixerSlave.h index c4aabfbb4a..7b59500629 100644 --- a/assignment-client/src/audio/AudioMixerSlave.h +++ b/assignment-client/src/audio/AudioMixerSlave.h @@ -30,7 +30,7 @@ class AudioMixerSlave { public: using ConstIter = NodeList::const_iterator; - void configure(ConstIter begin, ConstIter end, unsigned int frame); + void configure(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio); // mix and broadcast non-ignored streams to the node // returns true if a mixed packet was sent to the node @@ -40,15 +40,14 @@ public: private: // create mix, returns true if mix has audio - bool prepareMix(const SharedNodePointer& node); - // add a stream to the mix - void addStreamToMix(AudioMixerClientData& listenerData, const QUuid& streamerID, + bool prepareMix(const SharedNodePointer& listener); + void throttleStream(AudioMixerClientData& listenerData, const QUuid& streamerID, const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer); - - float gainForSource(const AvatarAudioStream& listener, const PositionalAudioStream& streamer, - const glm::vec3& relativePosition, bool isEcho); - float azimuthForSource(const AvatarAudioStream& listener, const PositionalAudioStream& streamer, - const glm::vec3& relativePosition); + void mixStream(AudioMixerClientData& listenerData, const QUuid& streamerID, + const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer); + void addStream(AudioMixerClientData& listenerData, const QUuid& streamerID, + const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer, + bool throttle); // mixing buffers float _mixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO]; @@ -58,6 +57,7 @@ private: ConstIter _begin; ConstIter _end; unsigned int _frame { 0 }; + float _throttlingRatio { 0.0f }; }; #endif // hifi_AudioMixerSlave_h diff --git a/assignment-client/src/audio/AudioMixerSlavePool.cpp b/assignment-client/src/audio/AudioMixerSlavePool.cpp index 6446092448..9b20572b84 100644 --- a/assignment-client/src/audio/AudioMixerSlavePool.cpp +++ b/assignment-client/src/audio/AudioMixerSlavePool.cpp @@ -41,7 +41,7 @@ void AudioMixerSlaveThread::wait() { }); ++_pool._numStarted; } - configure(_pool._begin, _pool._end, _pool._frame); + configure(_pool._begin, _pool._end, _pool._frame, _pool._throttlingRatio); } void AudioMixerSlaveThread::notify(bool stopping) { @@ -64,13 +64,14 @@ bool AudioMixerSlaveThread::try_pop(SharedNodePointer& node) { static AudioMixerSlave slave; #endif -void AudioMixerSlavePool::mix(ConstIter begin, ConstIter end, unsigned int frame) { +void AudioMixerSlavePool::mix(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio) { _begin = begin; _end = end; _frame = frame; + _throttlingRatio = throttlingRatio; #ifdef AUDIO_SINGLE_THREADED - slave.configure(_begin, _end, frame); + slave.configure(_begin, _end, frame, throttlingRatio); std::for_each(begin, end, [&](const SharedNodePointer& node) { slave.mix(node); }); @@ -131,7 +132,7 @@ void AudioMixerSlavePool::setNumThreads(int numThreads) { } void AudioMixerSlavePool::resize(int numThreads) { - assert(_numThreads == _slaves.size()); + assert(_numThreads == (int)_slaves.size()); #ifdef AUDIO_SINGLE_THREADED qDebug("%s: running single threaded", __FUNCTION__, numThreads); @@ -182,6 +183,6 @@ void AudioMixerSlavePool::resize(int numThreads) { } _numThreads = _numStarted = _numFinished = numThreads; - assert(_numThreads == _slaves.size()); + assert(_numThreads == (int)_slaves.size()); #endif } diff --git a/assignment-client/src/audio/AudioMixerSlavePool.h b/assignment-client/src/audio/AudioMixerSlavePool.h index e8781950f3..19d2315d12 100644 --- a/assignment-client/src/audio/AudioMixerSlavePool.h +++ b/assignment-client/src/audio/AudioMixerSlavePool.h @@ -61,7 +61,7 @@ public: ~AudioMixerSlavePool() { resize(0); } // mix on slave threads - void mix(ConstIter begin, ConstIter end, unsigned int frame); + void mix(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio); // iterate over all slaves void each(std::function functor); @@ -90,6 +90,7 @@ private: // frame state Queue _queue; unsigned int _frame { 0 }; + float _throttlingRatio { 0.0f }; ConstIter _begin; ConstIter _end; }; diff --git a/assignment-client/src/audio/AudioMixerStats.cpp b/assignment-client/src/audio/AudioMixerStats.cpp index 94115ad5ff..a50c0d26c1 100644 --- a/assignment-client/src/audio/AudioMixerStats.cpp +++ b/assignment-client/src/audio/AudioMixerStats.cpp @@ -17,7 +17,7 @@ void AudioMixerStats::reset() { totalMixes = 0; hrtfRenders = 0; hrtfSilentRenders = 0; - hrtfStruggleRenders = 0; + hrtfThrottleRenders = 0; manualStereoMixes = 0; manualEchoMixes = 0; } @@ -28,7 +28,7 @@ void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) { totalMixes += otherStats.totalMixes; hrtfRenders += otherStats.hrtfRenders; hrtfSilentRenders += otherStats.hrtfSilentRenders; - hrtfStruggleRenders += otherStats.hrtfStruggleRenders; + hrtfThrottleRenders += otherStats.hrtfThrottleRenders; manualStereoMixes += otherStats.manualStereoMixes; manualEchoMixes += otherStats.manualEchoMixes; } diff --git a/assignment-client/src/audio/AudioMixerStats.h b/assignment-client/src/audio/AudioMixerStats.h index 5aefe611f0..cb85006061 100644 --- a/assignment-client/src/audio/AudioMixerStats.h +++ b/assignment-client/src/audio/AudioMixerStats.h @@ -20,7 +20,7 @@ struct AudioMixerStats { int hrtfRenders { 0 }; int hrtfSilentRenders { 0 }; - int hrtfStruggleRenders { 0 }; + int hrtfThrottleRenders { 0 }; int manualStereoMixes { 0 }; int manualEchoMixes { 0 }; diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index 11cbd73970..61164ee8d7 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -262,8 +262,12 @@ void AvatarMixer::broadcastAvatarData() { // setup a PacketList for the avatarPackets auto avatarPacketList = NLPacketList::create(PacketType::BulkAvatarData); - if (avatar.getSessionDisplayName().isEmpty() && // We haven't set it yet... - nodeData->getReceivedIdentity()) { // ... but we have processed identity (with possible displayName). + if (nodeData->getAvatarSessionDisplayNameMustChange()) { + const QString& existingBaseDisplayName = nodeData->getBaseDisplayName(); + if (--_sessionDisplayNames[existingBaseDisplayName].second <= 0) { + _sessionDisplayNames.remove(existingBaseDisplayName); + } + QString baseName = avatar.getDisplayName().trimmed(); const QRegularExpression curses{ "fuck|shit|damn|cock|cunt" }; // POC. We may eventually want something much more elaborate (subscription?). baseName = baseName.replace(curses, "*"); // Replace rather than remove, so that people have a clue that the person's a jerk. @@ -276,11 +280,14 @@ void AvatarMixer::broadcastAvatarData() { QPair& soFar = _sessionDisplayNames[baseName]; // Inserts and answers 0, 0 if not already present, which is what we want. int& highWater = soFar.first; nodeData->setBaseDisplayName(baseName); - avatar.setSessionDisplayName((highWater > 0) ? baseName + "_" + QString::number(highWater) : baseName); + QString sessionDisplayName = (highWater > 0) ? baseName + "_" + QString::number(highWater) : baseName; + avatar.setSessionDisplayName(sessionDisplayName); highWater++; soFar.second++; // refcount nodeData->flagIdentityChange(); - sendIdentityPacket(nodeData, node); // Tell new node about its sessionUUID. Others will find out below. + nodeData->setAvatarSessionDisplayNameMustChange(false); + sendIdentityPacket(nodeData, node); // Tell node whose name changed about its new session display name. Others will find out below. + qDebug() << "Giving session display name" << sessionDisplayName << "to node with ID" << node->getUUID(); } // this is an AGENT we have received head data from @@ -416,12 +423,17 @@ void AvatarMixer::broadcastAvatarData() { nodeData->incrementAvatarOutOfView(); } else { detail = distribution(generator) < AVATAR_SEND_FULL_UPDATE_RATIO - ? AvatarData::SendAllData : AvatarData::IncludeSmallData; + ? AvatarData::SendAllData : AvatarData::CullSmallData; nodeData->incrementAvatarInView(); } numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122()); - numAvatarDataBytes += avatarPacketList->write(otherAvatar.toByteArray(detail)); + auto lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); + QVector& lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID()); + bool distanceAdjust = true; + glm::vec3 viewerPosition = nodeData->getPosition(); + auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther, lastSentJointsForOther, distanceAdjust, viewerPosition, &lastSentJointsForOther); + numAvatarDataBytes += avatarPacketList->write(bytes); avatarPacketList->endSegment(); }); @@ -581,10 +593,15 @@ void AvatarMixer::handleAvatarIdentityPacket(QSharedPointer mes // parse the identity packet and update the change timestamp if appropriate AvatarData::Identity identity; AvatarData::parseAvatarIdentityPacket(message->getMessage(), identity); - if (avatar.processAvatarIdentity(identity)) { + bool identityChanged = false; + bool displayNameChanged = false; + avatar.processAvatarIdentity(identity, identityChanged, displayNameChanged); + if (identityChanged) { QMutexLocker nodeDataLocker(&nodeData->getMutex()); nodeData->flagIdentityChange(); - nodeData->setReceivedIdentity(); + if (displayNameChanged) { + nodeData->setAvatarSessionDisplayNameMustChange(true); + } } } } @@ -674,8 +691,8 @@ void AvatarMixer::run() { void AvatarMixer::domainSettingsRequestComplete() { auto nodeList = DependencyManager::get(); - nodeList->addNodeTypeToInterestSet(NodeType::Agent); - + nodeList->addSetOfNodeTypesToNodeInterestSet({ NodeType::Agent, NodeType::EntityScriptServer }); + // parse the settings to pull out the values we need parseDomainServerSettings(nodeList->getDomainHandler().getSettingsObject()); diff --git a/assignment-client/src/avatars/AvatarMixerClientData.h b/assignment-client/src/avatars/AvatarMixerClientData.h index f18cfdde1b..aa011f8baf 100644 --- a/assignment-client/src/avatars/AvatarMixerClientData.h +++ b/assignment-client/src/avatars/AvatarMixerClientData.h @@ -53,8 +53,8 @@ public: HRCTime getIdentityChangeTimestamp() const { return _identityChangeTimestamp; } void flagIdentityChange() { _identityChangeTimestamp = p_high_resolution_clock::now(); } - bool getReceivedIdentity() const { return _gotIdentity; } - void setReceivedIdentity() { _gotIdentity = true; } + bool getAvatarSessionDisplayNameMustChange() const { return _avatarSessionDisplayNameMustChange; } + void setAvatarSessionDisplayNameMustChange(bool set = true) { _avatarSessionDisplayNameMustChange = set; } void setFullRateDistance(float fullRateDistance) { _fullRateDistance = fullRateDistance; } float getFullRateDistance() const { return _fullRateDistance; } @@ -104,6 +104,22 @@ public: bool getRequestsDomainListData() { return _requestsDomainListData; } void setRequestsDomainListData(bool requesting) { _requestsDomainListData = requesting; } + quint64 getLastOtherAvatarEncodeTime(QUuid otherAvatar) { + quint64 result = 0; + if (_lastOtherAvatarEncodeTime.find(otherAvatar) != _lastOtherAvatarEncodeTime.end()) { + result = _lastOtherAvatarEncodeTime[otherAvatar]; + } + _lastOtherAvatarEncodeTime[otherAvatar] = usecTimestampNow(); + return result; + } + + QVector& getLastOtherAvatarSentJoints(QUuid otherAvatar) { + _lastOtherAvatarSentJoints[otherAvatar].resize(_avatar->getJointCount()); + return _lastOtherAvatarSentJoints[otherAvatar]; + } + + + private: AvatarSharedPointer _avatar { new AvatarData() }; @@ -111,8 +127,13 @@ private: std::unordered_map _lastBroadcastSequenceNumbers; std::unordered_set _hasReceivedFirstPacketsFrom; + // this is a map of the last time we encoded an "other" avatar for + // sending to "this" node + std::unordered_map _lastOtherAvatarEncodeTime; + std::unordered_map> _lastOtherAvatarSentJoints; + HRCTime _identityChangeTimestamp; - bool _gotIdentity { false }; + bool _avatarSessionDisplayNameMustChange{ false }; float _fullRateDistance = FLT_MAX; float _maxAvatarDistance = FLT_MAX; diff --git a/assignment-client/src/avatars/ScriptableAvatar.cpp b/assignment-client/src/avatars/ScriptableAvatar.cpp index b4c9a8e89d..95bcbb587e 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.cpp +++ b/assignment-client/src/avatars/ScriptableAvatar.cpp @@ -14,6 +14,13 @@ #include #include "ScriptableAvatar.h" +QByteArray ScriptableAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector* sentJointDataOut) { + _globalPosition = getPosition(); + return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut); +} + + // hold and priority unused but kept so that client side JS can run. void ScriptableAvatar::startAnimation(const QString& url, float fps, float priority, bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) { diff --git a/assignment-client/src/avatars/ScriptableAvatar.h b/assignment-client/src/avatars/ScriptableAvatar.h index 18d64f4ac5..be7a90adf9 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.h +++ b/assignment-client/src/avatars/ScriptableAvatar.h @@ -27,6 +27,10 @@ public: Q_INVOKABLE void stopAnimation(); Q_INVOKABLE AnimationDetails getAnimationDetails(); virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override; + + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector& lastSentJointData, + bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector* sentJointDataOut = nullptr) override; + private slots: void update(float deltatime); diff --git a/assignment-client/src/entities/AssignmentParentFinder.cpp b/assignment-client/src/entities/AssignmentParentFinder.cpp index a0232daff4..b8737bdc63 100644 --- a/assignment-client/src/entities/AssignmentParentFinder.cpp +++ b/assignment-client/src/entities/AssignmentParentFinder.cpp @@ -11,6 +11,8 @@ #include "AssignmentParentFinder.h" +#include + SpatiallyNestableWeakPointer AssignmentParentFinder::find(QUuid parentID, bool& success, SpatialParentTree* entityTree) const { SpatiallyNestableWeakPointer parent; @@ -25,10 +27,21 @@ SpatiallyNestableWeakPointer AssignmentParentFinder::find(QUuid parentID, bool& } else { parent = _tree->findEntityByEntityItemID(parentID); } - if (parent.expired()) { - success = false; - } else { + if (!parent.expired()) { success = true; + return parent; } + + // search avatars + if (DependencyManager::isSet()) { + auto avatarHashMap = DependencyManager::get(); + parent = avatarHashMap->getAvatarBySessionID(parentID); + if (!parent.expired()) { + success = true; + return parent; + } + } + + success = false; return parent; } diff --git a/assignment-client/src/entities/EntityServer.cpp b/assignment-client/src/entities/EntityServer.cpp index 23eec6197c..dc1a693590 100644 --- a/assignment-client/src/entities/EntityServer.cpp +++ b/assignment-client/src/entities/EntityServer.cpp @@ -9,9 +9,12 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // +#include #include #include #include +#include +#include #include "EntityServer.h" #include "EntityServerConsts.h" @@ -26,6 +29,10 @@ EntityServer::EntityServer(ReceivedMessage& message) : OctreeServer(message), _entitySimulation(NULL) { + ResourceManager::init(); + DependencyManager::set(); + DependencyManager::set(); + auto& packetReceiver = DependencyManager::get()->getPacketReceiver(); packetReceiver.registerListenerForTypes({ PacketType::EntityAdd, PacketType::EntityEdit, PacketType::EntityErase }, this, "handleEntityPacket"); @@ -285,6 +292,96 @@ void EntityServer::readAdditionalConfiguration(const QJsonObject& settingsSectio } else { tree->setEntityScriptSourceWhitelist(""); } + + if (readOptionString("entityEditFilter", settingsSectionObject, _entityEditFilter) && !_entityEditFilter.isEmpty()) { + // Tell the tree that we have a filter, so that it doesn't accept edits until we have a filter function set up. + std::static_pointer_cast(_tree)->setHasEntityFilter(true); + // Now fetch script from file asynchronously. + QUrl scriptURL(_entityEditFilter); + + // The following should be abstracted out for use in Agent.cpp (and maybe later AvatarMixer.cpp) + if (scriptURL.scheme().isEmpty() || (scriptURL.scheme() == URL_SCHEME_FILE)) { + qWarning() << "Cannot load script from local filesystem, because assignment may be on a different computer."; + scriptRequestFinished(); + return; + } + auto scriptRequest = ResourceManager::createResourceRequest(this, scriptURL); + if (!scriptRequest) { + qWarning() << "Could not create ResourceRequest for Agent script at" << scriptURL.toString(); + scriptRequestFinished(); + return; + } + // Agent.cpp sets up a timeout here, but that is unnecessary, as ResourceRequest has its own. + connect(scriptRequest, &ResourceRequest::finished, this, &EntityServer::scriptRequestFinished); + // FIXME: handle atp rquests setup here. See Agent::requestScript() + qInfo() << "Requesting script at URL" << qPrintable(scriptRequest->getUrl().toString()); + scriptRequest->send(); + qDebug() << "script request sent"; + } +} + +// Copied from ScriptEngine.cpp. We should make this a class method for reuse. +// Note: I've deliberately stopped short of using ScriptEngine instead of QScriptEngine, as that is out of project scope at this point. +static bool hasCorrectSyntax(const QScriptProgram& program) { + const auto syntaxCheck = QScriptEngine::checkSyntax(program.sourceCode()); + if (syntaxCheck.state() != QScriptSyntaxCheckResult::Valid) { + const auto error = syntaxCheck.errorMessage(); + const auto line = QString::number(syntaxCheck.errorLineNumber()); + const auto column = QString::number(syntaxCheck.errorColumnNumber()); + const auto message = QString("[SyntaxError] %1 in %2:%3(%4)").arg(error, program.fileName(), line, column); + qCritical() << qPrintable(message); + return false; + } + return true; +} +static bool hadUncaughtExceptions(QScriptEngine& engine, const QString& fileName) { + if (engine.hasUncaughtException()) { + const auto backtrace = engine.uncaughtExceptionBacktrace(); + const auto exception = engine.uncaughtException().toString(); + const auto line = QString::number(engine.uncaughtExceptionLineNumber()); + engine.clearExceptions(); + + static const QString SCRIPT_EXCEPTION_FORMAT = "[UncaughtException] %1 in %2:%3"; + auto message = QString(SCRIPT_EXCEPTION_FORMAT).arg(exception, fileName, line); + if (!backtrace.empty()) { + static const auto lineSeparator = "\n "; + message += QString("\n[Backtrace]%1%2").arg(lineSeparator, backtrace.join(lineSeparator)); + } + qCritical() << qPrintable(message); + return true; + } + return false; +} +void EntityServer::scriptRequestFinished() { + qDebug() << "script request completed"; + auto scriptRequest = qobject_cast(sender()); + const QString urlString = scriptRequest->getUrl().toString(); + if (scriptRequest && scriptRequest->getResult() == ResourceRequest::Success) { + auto scriptContents = scriptRequest->getData(); + qInfo() << "Downloaded script:" << scriptContents; + QScriptProgram program(scriptContents, urlString); + if (hasCorrectSyntax(program)) { + _entityEditFilterEngine.evaluate(scriptContents); + if (!hadUncaughtExceptions(_entityEditFilterEngine, urlString)) { + std::static_pointer_cast(_tree)->initEntityEditFilterEngine(&_entityEditFilterEngine, [this]() { + return hadUncaughtExceptions(_entityEditFilterEngine, _entityEditFilter); + }); + scriptRequest->deleteLater(); + qDebug() << "script request filter processed"; + return; + } + } + } else if (scriptRequest) { + qCritical() << "Failed to download script at" << urlString; + // See HTTPResourceRequest::onRequestFinished for interpretation of codes. For example, a 404 is code 6 and 403 is 3. A timeout is 2. Go figure. + qCritical() << "ResourceRequest error was" << scriptRequest->getResult(); + } else { + qCritical() << "Failed to create script request."; + } + // Hard stop of the assignment client on failure. We don't want anyone to think they have a filter in place when they don't. + // Alas, only indications will be the above logging with assignment client restarting repeatedly, and clients will not see any entities. + qDebug() << "script request failure causing stop"; + stop(); } void EntityServer::nodeAdded(SharedNodePointer node) { diff --git a/assignment-client/src/entities/EntityServer.h b/assignment-client/src/entities/EntityServer.h index 0486a97ede..f142145d5f 100644 --- a/assignment-client/src/entities/EntityServer.h +++ b/assignment-client/src/entities/EntityServer.h @@ -69,6 +69,7 @@ protected: private slots: void handleEntityPacket(QSharedPointer message, SharedNodePointer senderNode); + void scriptRequestFinished(); private: SimpleEntitySimulationPointer _entitySimulation; @@ -76,6 +77,9 @@ private: QReadWriteLock _viewerSendingStatsLock; QMap> _viewerSendingStats; + + QString _entityEditFilter{}; + QScriptEngine _entityEditFilterEngine{}; }; #endif // hifi_EntityServer_h diff --git a/assignment-client/src/messages/MessagesMixer.cpp b/assignment-client/src/messages/MessagesMixer.cpp index 3baea67486..7622c78f35 100644 --- a/assignment-client/src/messages/MessagesMixer.cpp +++ b/assignment-client/src/messages/MessagesMixer.cpp @@ -44,8 +44,7 @@ void MessagesMixer::handleMessages(QSharedPointer receivedMessa nodeList->eachMatchingNode( [&](const SharedNodePointer& node)->bool { - return node->getType() == NodeType::Agent && node->getActiveSocket() && - _channelSubscribers[channel].contains(node->getUUID()); + return node->getActiveSocket() && _channelSubscribers[channel].contains(node->getUUID()); }, [&](const SharedNodePointer& node) { auto packetList = MessagesClient::encodeMessagesPacket(channel, message, senderID); @@ -83,5 +82,6 @@ void MessagesMixer::sendStatsPacket() { void MessagesMixer::run() { ThreadedAssignment::commonInit(MESSAGES_MIXER_LOGGING_NAME, NodeType::MessagesMixer); - DependencyManager::get()->addNodeTypeToInterestSet(NodeType::Agent); -} \ No newline at end of file + auto nodeList = DependencyManager::get(); + nodeList->addSetOfNodeTypesToNodeInterestSet({ NodeType::Agent, NodeType::EntityScriptServer }); +} diff --git a/assignment-client/src/octree/OctreeSendThread.cpp b/assignment-client/src/octree/OctreeSendThread.cpp index 0fbaf978e2..afc17d71aa 100644 --- a/assignment-client/src/octree/OctreeSendThread.cpp +++ b/assignment-client/src/octree/OctreeSendThread.cpp @@ -316,8 +316,9 @@ int OctreeSendThread::packetDistributor(SharedNodePointer node, OctreeQueryNode* int truePacketsSent = 0; int trueBytesSent = 0; int packetsSentThisInterval = 0; - bool isFullScene = ((!viewFrustumChanged) && nodeData->getViewFrustumJustStoppedChanging()) - || nodeData->hasLodChanged(); + bool isFullScene = nodeData->haveJSONParametersChanged() || + (nodeData->getUsesFrustum() + && ((!viewFrustumChanged && nodeData->getViewFrustumJustStoppedChanging()) || nodeData->hasLodChanged())); bool somethingToSend = true; // assume we have something @@ -432,7 +433,9 @@ int OctreeSendThread::packetDistributor(SharedNodePointer node, OctreeQueryNode* boundaryLevelAdjust, octreeSizeScale, nodeData->getLastTimeBagEmpty(), isFullScene, &nodeData->stats, _myServer->getJurisdiction(), - &nodeData->extraEncodeData); + &nodeData->extraEncodeData, + nodeData->getUsesFrustum(), + nodeData); nodeData->copyCurrentViewFrustum(params.viewFrustum); if (viewFrustumChanged) { nodeData->copyLastKnownViewFrustum(params.lastViewFrustum); diff --git a/assignment-client/src/octree/OctreeServer.cpp b/assignment-client/src/octree/OctreeServer.cpp index 3e36250a82..c36a9be050 100644 --- a/assignment-client/src/octree/OctreeServer.cpp +++ b/assignment-client/src/octree/OctreeServer.cpp @@ -660,6 +660,7 @@ bool OctreeServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url quint64 averageUpdateTime = _tree->getAverageUpdateTime(); quint64 averageCreateTime = _tree->getAverageCreateTime(); quint64 averageLoggingTime = _tree->getAverageLoggingTime(); + quint64 averageFilterTime = _tree->getAverageFilterTime(); int FLOAT_PRECISION = 3; @@ -699,6 +700,8 @@ bool OctreeServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url .arg(locale.toString((uint)averageCreateTime).rightJustified(COLUMN_WIDTH, ' ')); statsString += QString(" Average Logging Time: %1 usecs\r\n") .arg(locale.toString((uint)averageLoggingTime).rightJustified(COLUMN_WIDTH, ' ')); + statsString += QString(" Average Filter Time: %1 usecs\r\n") + .arg(locale.toString((uint)averageFilterTime).rightJustified(COLUMN_WIDTH, ' ')); int senderNumber = 0; @@ -1136,8 +1139,8 @@ void OctreeServer::domainSettingsRequestComplete() { auto nodeList = DependencyManager::get(); // we need to ask the DS about agents so we can ping/reply with them - nodeList->addNodeTypeToInterestSet(NodeType::Agent); - + nodeList->addSetOfNodeTypesToNodeInterestSet({ NodeType::Agent, NodeType::EntityScriptServer }); + auto& packetReceiver = DependencyManager::get()->getPacketReceiver(); packetReceiver.registerListener(getMyQueryMessageType(), this, "handleOctreeQueryPacket"); packetReceiver.registerListener(PacketType::OctreeDataNack, this, "handleOctreeDataNackPacket"); diff --git a/assignment-client/src/scripts/EntityScriptServer.cpp b/assignment-client/src/scripts/EntityScriptServer.cpp new file mode 100644 index 0000000000..7f4593910e --- /dev/null +++ b/assignment-client/src/scripts/EntityScriptServer.cpp @@ -0,0 +1,372 @@ +// +// EntityScriptServer.cpp +// assignment-client/src/scripts +// +// Created by Clément Brisset on 1/5/17. +// Copyright 2013 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "EntityScriptServer.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ClientServerUtils.h" +#include "../entities/AssignmentParentFinder.h" + +int EntityScriptServer::_entitiesScriptEngineCount = 0; + +EntityScriptServer::EntityScriptServer(ReceivedMessage& message) : ThreadedAssignment(message) { + DependencyManager::get()->setPacketSender(&_entityEditSender); + + ResourceManager::init(); + + DependencyManager::registerInheritance(); + + DependencyManager::set(); + DependencyManager::set(); + DependencyManager::set(); + + DependencyManager::set(); + DependencyManager::set(ScriptEngine::ENTITY_SERVER_SCRIPT); + + + auto& packetReceiver = DependencyManager::get()->getPacketReceiver(); + packetReceiver.registerListenerForTypes({ PacketType::OctreeStats, PacketType::EntityData, PacketType::EntityErase }, + this, "handleOctreePacket"); + packetReceiver.registerListener(PacketType::Jurisdiction, this, "handleJurisdictionPacket"); + packetReceiver.registerListener(PacketType::SelectedAudioFormat, this, "handleSelectedAudioFormat"); + + auto avatarHashMap = DependencyManager::set(); + packetReceiver.registerListener(PacketType::BulkAvatarData, avatarHashMap.data(), "processAvatarDataPacket"); + packetReceiver.registerListener(PacketType::KillAvatar, avatarHashMap.data(), "processKillAvatar"); + packetReceiver.registerListener(PacketType::AvatarIdentity, avatarHashMap.data(), "processAvatarIdentityPacket"); + + packetReceiver.registerListener(PacketType::ReloadEntityServerScript, this, "handleReloadEntityServerScriptPacket"); + packetReceiver.registerListener(PacketType::EntityScriptGetStatus, this, "handleEntityScriptGetStatusPacket"); +} + +static const QString ENTITY_SCRIPT_SERVER_LOGGING_NAME = "entity-script-server"; + +void EntityScriptServer::handleReloadEntityServerScriptPacket(QSharedPointer message, SharedNodePointer senderNode) { + // These are temporary checks until we can ensure that nodes eventually disconnect if the Domain Server stops telling them + // about each other. + if (senderNode->getCanRez() || senderNode->getCanRezTmp()) { + auto entityID = QUuid::fromRfc4122(message->read(NUM_BYTES_RFC4122_UUID)); + + if (_entityViewer.getTree() && !_shuttingDown) { + qDebug() << "Reloading: " << entityID; + _entitiesScriptEngine->unloadEntityScript(entityID); + checkAndCallPreload(entityID, true); + } + } +} + +void EntityScriptServer::handleEntityScriptGetStatusPacket(QSharedPointer message, SharedNodePointer senderNode) { + // These are temporary checks until we can ensure that nodes eventually disconnect if the Domain Server stops telling them + // about each other. + if (senderNode->getCanRez() || senderNode->getCanRezTmp()) { + MessageID messageID; + message->readPrimitive(&messageID); + auto entityID = QUuid::fromRfc4122(message->read(NUM_BYTES_RFC4122_UUID)); + + auto replyPacketList = NLPacketList::create(PacketType::EntityScriptGetStatusReply, QByteArray(), true, true); + replyPacketList->writePrimitive(messageID); + + EntityScriptDetails details; + if (_entitiesScriptEngine->getEntityScriptDetails(entityID, details)) { + replyPacketList->writePrimitive(true); + replyPacketList->writePrimitive(details.status); + replyPacketList->writeString(details.errorInfo); + } else { + replyPacketList->writePrimitive(false); + } + + auto nodeList = DependencyManager::get(); + nodeList->sendPacketList(std::move(replyPacketList), *senderNode); + } +} + +void EntityScriptServer::run() { + // make sure we request our script once the agent connects to the domain + auto nodeList = DependencyManager::get(); + + ThreadedAssignment::commonInit(ENTITY_SCRIPT_SERVER_LOGGING_NAME, NodeType::EntityScriptServer); + + // Setup MessagesClient + auto messagesClient = DependencyManager::set(); + QThread* messagesThread = new QThread; + messagesThread->setObjectName("Messages Client Thread"); + messagesClient->moveToThread(messagesThread); + connect(messagesThread, &QThread::started, messagesClient.data(), &MessagesClient::init); + messagesThread->start(); + + // make sure we hear about connected nodes so we can grab an ATP script if a request is pending + connect(nodeList.data(), &LimitedNodeList::nodeActivated, this, &EntityScriptServer::nodeActivated); + connect(nodeList.data(), &LimitedNodeList::nodeKilled, this, &EntityScriptServer::nodeKilled); + + nodeList->addSetOfNodeTypesToNodeInterestSet({ + NodeType::Agent, NodeType::AudioMixer, NodeType::AvatarMixer, + NodeType::EntityServer, NodeType::MessagesMixer, NodeType::AssetServer + }); + + // Setup Script Engine + resetEntitiesScriptEngine(); + + // we need to make sure that init has been called for our EntityScriptingInterface + // so that it actually has a jurisdiction listener when we ask it for it next + auto entityScriptingInterface = DependencyManager::get(); + entityScriptingInterface->init(); + _entityViewer.setJurisdictionListener(entityScriptingInterface->getJurisdictionListener()); + + _entityViewer.init(); + + // setup the JSON filter that asks for entities with a non-default serverScripts property + QJsonObject queryJSONParameters; + static const QString SERVER_SCRIPTS_PROPERTY = "serverScripts"; + queryJSONParameters[SERVER_SCRIPTS_PROPERTY] = EntityQueryFilterSymbol::NonDefault; + + // setup the JSON parameters so that OctreeQuery does not use a frustum and uses our JSON filter + _entityViewer.getOctreeQuery().setUsesFrustum(false); + _entityViewer.getOctreeQuery().setJSONParameters(queryJSONParameters); + + entityScriptingInterface->setEntityTree(_entityViewer.getTree()); + + DependencyManager::set(_entityViewer.getTree()); + + + auto tree = _entityViewer.getTree().get(); + connect(tree, &EntityTree::deletingEntity, this, &EntityScriptServer::deletingEntity, Qt::QueuedConnection); + connect(tree, &EntityTree::addingEntity, this, &EntityScriptServer::addingEntity, Qt::QueuedConnection); + connect(tree, &EntityTree::entityServerScriptChanging, this, &EntityScriptServer::entityServerScriptChanging, Qt::QueuedConnection); +} + +void EntityScriptServer::nodeActivated(SharedNodePointer activatedNode) { + if (activatedNode->getType() == NodeType::AudioMixer) { + negotiateAudioFormat(); + } +} + +void EntityScriptServer::negotiateAudioFormat() { + auto nodeList = DependencyManager::get(); + auto negotiateFormatPacket = NLPacket::create(PacketType::NegotiateAudioFormat); + auto codecPlugins = PluginManager::getInstance()->getCodecPlugins(); + quint8 numberOfCodecs = (quint8)codecPlugins.size(); + negotiateFormatPacket->writePrimitive(numberOfCodecs); + for (auto& plugin : codecPlugins) { + auto codecName = plugin->getName(); + negotiateFormatPacket->writeString(codecName); + } + + // grab our audio mixer from the NodeList, if it exists + SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer); + + if (audioMixer) { + // send off this mute packet + nodeList->sendPacket(std::move(negotiateFormatPacket), *audioMixer); + } +} + +void EntityScriptServer::handleSelectedAudioFormat(QSharedPointer message) { + QString selectedCodecName = message->readString(); + selectAudioFormat(selectedCodecName); +} + +void EntityScriptServer::selectAudioFormat(const QString& selectedCodecName) { + _selectedCodecName = selectedCodecName; + + qDebug() << "Selected Codec:" << _selectedCodecName; + + // release any old codec encoder/decoder first... + if (_codec && _encoder) { + _codec->releaseEncoder(_encoder); + _encoder = nullptr; + _codec = nullptr; + } + + auto codecPlugins = PluginManager::getInstance()->getCodecPlugins(); + for (auto& plugin : codecPlugins) { + if (_selectedCodecName == plugin->getName()) { + _codec = plugin; + _encoder = plugin->createEncoder(AudioConstants::SAMPLE_RATE, AudioConstants::MONO); + qDebug() << "Selected Codec Plugin:" << _codec.get(); + break; + } + } +} + +void EntityScriptServer::resetEntitiesScriptEngine() { + auto engineName = QString("Entities %1").arg(++_entitiesScriptEngineCount); + auto newEngine = QSharedPointer(new ScriptEngine(ScriptEngine::ENTITY_SERVER_SCRIPT, NO_SCRIPT, engineName)); + + auto webSocketServerConstructorValue = newEngine->newFunction(WebSocketServerClass::constructor); + newEngine->globalObject().setProperty("WebSocketServer", webSocketServerConstructorValue); + + newEngine->registerGlobalObject("SoundCache", DependencyManager::get().data()); + + // connect this script engines printedMessage signal to the global ScriptEngines these various messages + auto scriptEngines = DependencyManager::get().data(); + connect(newEngine.data(), &ScriptEngine::printedMessage, scriptEngines, &ScriptEngines::onPrintedMessage); + connect(newEngine.data(), &ScriptEngine::errorMessage, scriptEngines, &ScriptEngines::onErrorMessage); + connect(newEngine.data(), &ScriptEngine::warningMessage, scriptEngines, &ScriptEngines::onWarningMessage); + connect(newEngine.data(), &ScriptEngine::infoMessage, scriptEngines, &ScriptEngines::onInfoMessage); + + connect(newEngine.data(), &ScriptEngine::update, this, [this] { + _entityViewer.queryOctree(); + }); + + + newEngine->runInThread(); + DependencyManager::get()->setEntitiesScriptEngine(newEngine.data()); + + _entitiesScriptEngine.swap(newEngine); +} + + +void EntityScriptServer::clear() { + // unload and stop the engine + if (_entitiesScriptEngine) { + // do this here (instead of in deleter) to avoid marshalling unload signals back to this thread + _entitiesScriptEngine->unloadAllEntityScripts(); + _entitiesScriptEngine->stop(); + } + + // reset the engine + if (!_shuttingDown) { + resetEntitiesScriptEngine(); + } + + _entityViewer.clear(); +} + +void EntityScriptServer::shutdownScriptEngine() { + if (_entitiesScriptEngine) { + _entitiesScriptEngine->disconnectNonEssentialSignals(); // disconnect all slots/signals from the script engine, except essential + } + _shuttingDown = true; + + clear(); // always clear() on shutdown +} + +void EntityScriptServer::addingEntity(const EntityItemID& entityID) { + checkAndCallPreload(entityID); +} + +void EntityScriptServer::deletingEntity(const EntityItemID& entityID) { + if (_entityViewer.getTree() && !_shuttingDown && _entitiesScriptEngine) { + _entitiesScriptEngine->unloadEntityScript(entityID); + } +} + +void EntityScriptServer::entityServerScriptChanging(const EntityItemID& entityID, const bool reload) { + if (_entityViewer.getTree() && !_shuttingDown) { + _entitiesScriptEngine->unloadEntityScript(entityID); + checkAndCallPreload(entityID, reload); + } +} + +void EntityScriptServer::checkAndCallPreload(const EntityItemID& entityID, const bool reload) { + if (_entityViewer.getTree() && !_shuttingDown && _entitiesScriptEngine) { + + EntityItemPointer entity = _entityViewer.getTree()->findEntityByEntityItemID(entityID); + EntityScriptDetails details; + bool notRunning = !_entitiesScriptEngine->getEntityScriptDetails(entityID, details); + if (entity && (reload || notRunning || details.scriptText != entity->getServerScripts())) { + QString scriptUrl = entity->getServerScripts(); + if (!scriptUrl.isEmpty()) { + scriptUrl = ResourceManager::normalizeURL(scriptUrl); + qDebug() << "Loading entity server script" << scriptUrl << "for" << entityID; + ScriptEngine::loadEntityScript(_entitiesScriptEngine, entityID, scriptUrl, reload); + } + } + } +} + +void EntityScriptServer::nodeKilled(SharedNodePointer killedNode) { + if (!_shuttingDown && killedNode->getType() == NodeType::EntityServer) { + if (_entitiesScriptEngine) { + _entitiesScriptEngine->unloadAllEntityScripts(); + _entitiesScriptEngine->stop(); + } + + resetEntitiesScriptEngine(); + + _entityViewer.clear(); + } +} + +void EntityScriptServer::sendStatsPacket() { + +} + +void EntityScriptServer::handleOctreePacket(QSharedPointer message, SharedNodePointer senderNode) { + auto packetType = message->getType(); + + if (packetType == PacketType::OctreeStats) { + + int statsMessageLength = OctreeHeadlessViewer::parseOctreeStats(message, senderNode); + if (message->getSize() > statsMessageLength) { + // pull out the piggybacked packet and create a new QSharedPointer for it + int piggyBackedSizeWithHeader = message->getSize() - statsMessageLength; + + auto buffer = std::unique_ptr(new char[piggyBackedSizeWithHeader]); + memcpy(buffer.get(), message->getRawMessage() + statsMessageLength, piggyBackedSizeWithHeader); + + auto newPacket = NLPacket::fromReceivedPacket(std::move(buffer), piggyBackedSizeWithHeader, message->getSenderSockAddr()); + message = QSharedPointer::create(*newPacket); + } else { + return; // bail since no piggyback data + } + + packetType = message->getType(); + } // fall through to piggyback message + + if (packetType == PacketType::EntityData) { + _entityViewer.processDatagram(*message, senderNode); + } else if (packetType == PacketType::EntityErase) { + _entityViewer.processEraseMessage(*message, senderNode); + } +} + +void EntityScriptServer::handleJurisdictionPacket(QSharedPointer message, SharedNodePointer senderNode) { + NodeType_t nodeType; + message->peekPrimitive(&nodeType); + + // PacketType_JURISDICTION, first byte is the node type... + if (nodeType == NodeType::EntityServer) { + DependencyManager::get()->getJurisdictionListener()-> + queueReceivedPacket(message, senderNode); + } +} + +void EntityScriptServer::aboutToFinish() { + shutdownScriptEngine(); + + // our entity tree is going to go away so tell that to the EntityScriptingInterface + DependencyManager::get()->setEntityTree(nullptr); + + ResourceManager::cleanup(); + + // cleanup the AudioInjectorManager (and any still running injectors) + DependencyManager::destroy(); + DependencyManager::destroy(); + + // cleanup codec & encoder + if (_codec && _encoder) { + _codec->releaseEncoder(_encoder); + _encoder = nullptr; + } +} diff --git a/assignment-client/src/scripts/EntityScriptServer.h b/assignment-client/src/scripts/EntityScriptServer.h new file mode 100644 index 0000000000..9ea2d43aec --- /dev/null +++ b/assignment-client/src/scripts/EntityScriptServer.h @@ -0,0 +1,70 @@ +// +// EntityScriptServer.h +// assignment-client/src/scripts +// +// Created by Clément Brisset on 1/5/17. +// Copyright 2013 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_EntityScriptServer_h +#define hifi_EntityScriptServer_h + +#include + +#include +#include +#include +#include +#include + +class EntityScriptServer : public ThreadedAssignment { + Q_OBJECT + +public: + EntityScriptServer(ReceivedMessage& message); + + virtual void aboutToFinish() override; + +public slots: + void run() override; + void nodeActivated(SharedNodePointer activatedNode); + void nodeKilled(SharedNodePointer killedNode); + void sendStatsPacket() override; + +private slots: + void handleOctreePacket(QSharedPointer message, SharedNodePointer senderNode); + void handleJurisdictionPacket(QSharedPointer message, SharedNodePointer senderNode); + void handleSelectedAudioFormat(QSharedPointer message); + + void handleReloadEntityServerScriptPacket(QSharedPointer message, SharedNodePointer senderNode); + void handleEntityScriptGetStatusPacket(QSharedPointer message, SharedNodePointer senderNode); + +private: + void negotiateAudioFormat(); + void selectAudioFormat(const QString& selectedCodecName); + + void resetEntitiesScriptEngine(); + void clear(); + void shutdownScriptEngine(); + + void addingEntity(const EntityItemID& entityID); + void deletingEntity(const EntityItemID& entityID); + void entityServerScriptChanging(const EntityItemID& entityID, const bool reload); + void checkAndCallPreload(const EntityItemID& entityID, const bool reload = false); + + bool _shuttingDown { false }; + + static int _entitiesScriptEngineCount; + QSharedPointer _entitiesScriptEngine; + EntityEditPacketSender _entityEditSender; + EntityTreeHeadlessViewer _entityViewer; + + QString _selectedCodecName; + CodecPluginPointer _codec; + Encoder* _encoder { nullptr }; +}; + +#endif // hifi_EntityScriptServer_h diff --git a/cmake/externals/LibOVRPlatform/CMakeLists.txt b/cmake/externals/LibOVRPlatform/CMakeLists.txt new file mode 100644 index 0000000000..3622972a13 --- /dev/null +++ b/cmake/externals/LibOVRPlatform/CMakeLists.txt @@ -0,0 +1,32 @@ +include(ExternalProject) +include(SelectLibraryConfigurations) + +set(EXTERNAL_NAME LibOVRPlatform) + +string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER) + +if (WIN32) + + ExternalProject_Add( + ${EXTERNAL_NAME} + URL http://hifi-public.s3.amazonaws.com/dependencies/OVRPlatformSDK_v1.10.0.zip + URL_MD5 e6c8264af16d904e6506acd5172fa0a9 + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + LOG_DOWNLOAD 1 + ) + + ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR) + + if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") + set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${SOURCE_DIR}/Windows/LibOVRPlatform64_1.lib CACHE TYPE INTERNAL) + else() + set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${SOURCE_DIR}/Windows/LibOVRPlatform32_1.lib CACHE TYPE INTERNAL) + endif() + + set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${SOURCE_DIR}/Include CACHE TYPE INTERNAL) +endif () + +# Hide this external target (for ide users) +set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals") diff --git a/cmake/externals/quazip/CMakeLists.txt b/cmake/externals/quazip/CMakeLists.txt index b8b3fe43d8..3a86852d76 100644 --- a/cmake/externals/quazip/CMakeLists.txt +++ b/cmake/externals/quazip/CMakeLists.txt @@ -38,10 +38,10 @@ set(${EXTERNAL_NAME_UPPER}_DLL_PATH ${INSTALL_DIR}/lib CACHE FILEPATH "Location if (APPLE) set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${INSTALL_DIR}/lib/libquazip5.1.0.0.dylib CACHE FILEPATH "Location of QuaZip release library") - set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG ${INSTALL_DIR}/lib/libquazip5.1.0.0.dylib CACHE FILEPATH "Location of QuaZip release library") + set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG ${INSTALL_DIR}/lib/libquazip5d.1.0.0.dylib CACHE FILEPATH "Location of QuaZip release library") elseif (WIN32) set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${INSTALL_DIR}/lib/quazip5.lib CACHE FILEPATH "Location of QuaZip release library") - set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG ${INSTALL_DIR}/lib/quazip5.lib CACHE FILEPATH "Location of QuaZip release library") + set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG ${INSTALL_DIR}/lib/quazip5d.lib CACHE FILEPATH "Location of QuaZip release library") else () set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${INSTALL_DIR}/lib/libquazip5.so CACHE FILEPATH "Location of QuaZip release library") set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG ${INSTALL_DIR}/lib/libquazip5.so CACHE FILEPATH "Location of QuaZip release library") diff --git a/cmake/externals/wasapi/CMakeLists.txt b/cmake/externals/wasapi/CMakeLists.txt index 7cfca4f3ba..d4d4b42e10 100644 --- a/cmake/externals/wasapi/CMakeLists.txt +++ b/cmake/externals/wasapi/CMakeLists.txt @@ -6,8 +6,8 @@ if (WIN32) include(ExternalProject) ExternalProject_Add( ${EXTERNAL_NAME} - URL http://hifi-public.s3.amazonaws.com/dependencies/qtaudio_wasapi5.zip - URL_MD5 0530753e855ffc00232cc969bf1c84a8 + URL http://hifi-public.s3.amazonaws.com/dependencies/qtaudio_wasapi7.zip + URL_MD5 bc2861e50852dd590cdc773a14a041a7 CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" diff --git a/cmake/modules/FindLibOVRPlatform.cmake b/cmake/modules/FindLibOVRPlatform.cmake new file mode 100644 index 0000000000..e9caa2cb98 --- /dev/null +++ b/cmake/modules/FindLibOVRPlatform.cmake @@ -0,0 +1,44 @@ +# +# FindLibOVRPlatform.cmake +# +# Try to find the LibOVRPlatform library to use the Oculus Platform SDK +# +# You must provide a LIBOVRPLATFORM_ROOT_DIR which contains Windows and Include directories +# +# Once done this will define +# +# LIBOVRPLATFORM_FOUND - system found Oculus Platform SDK +# LIBOVRPLATFORM_INCLUDE_DIRS - the Oculus Platform include directory +# LIBOVRPLATFORM_LIBRARIES - Link this to use Oculus Platform +# +# Created on December 16, 2016 by Stephen Birarda +# Copyright 2016 High Fidelity, Inc. +# +# Distributed under the Apache License, Version 2.0. +# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +# + + +if (WIN32) + # setup hints for LIBOVRPLATFORM search + include("${MACRO_DIR}/HifiLibrarySearchHints.cmake") + hifi_library_search_hints("LibOVRPlatform") + + find_path(LIBOVRPLATFORM_INCLUDE_DIRS OVR_Platform.h PATH_SUFFIXES Include HINTS ${LIBOVRPLATFORM_SEARCH_DIRS}) + + if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") + set(_LIB_NAME LibOVRPlatform64_1.lib) + else() + set(_LIB_NAME LibOVRPlatform32_1.lib) + endif() + + find_library(LIBOVRPLATFORM_LIBRARY_RELEASE NAMES ${_LIB_NAME} PATH_SUFFIXES Windows HINTS ${LIBOVRPLATFORM_SEARCH_DIRS}) + + include(SelectLibraryConfigurations) + select_library_configurations(LIBOVRPLATFORM) + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(LIBOVRPLATFORM DEFAULT_MSG LIBOVRPLATFORM_INCLUDE_DIRS LIBOVRPLATFORM_LIBRARIES) + + mark_as_advanced(LIBOVRPLATFORM_INCLUDE_DIRS LIBOVRPLATFORM_LIBRARIES LIBOVRPLATFORM_SEARCH_DIRS) +endif () diff --git a/domain-server/resources/describe-settings.json b/domain-server/resources/describe-settings.json index dd0e4ad4a1..20d2711743 100644 --- a/domain-server/resources/describe-settings.json +++ b/domain-server/resources/describe-settings.json @@ -40,7 +40,7 @@ { "name": "local_port", "label": "Local UDP Port", - "help": "This is the local port your domain-server binds to for UDP connections.
Depending on your router, this may need to be changed to run multiple full automatic networking domain-servers in the same network.", + "help": "This is the local port your domain-server binds to for UDP connections.
Depending on your router, this may need to be changed to unique values for each domain-server in order to run multiple full automatic networking domain-servers in the same network. You can use the value 0 to have the domain-server select a random port, which will help in preventing port collisions.", "default": "40102", "type": "int", "advanced": true @@ -372,6 +372,13 @@ "help": "Password used for basic HTTP authentication. Leave this blank if you do not want to change it.", "value-hidden": true }, + { + "name": "verify_http_password", + "label": "Verify HTTP Password", + "type": "password", + "help": "Must match the password entered above for change to be saved.", + "value-hidden": true + }, { "name": "maximum_user_capacity", "label": "Maximum User Capacity", @@ -1089,9 +1096,9 @@ { "name": "noise_muting_threshold", "label": "Noise Muting Threshold", - "help": "Loudness value for noise background between 0 and 1.0 (0: mute everyone, 1.0: never mute)", - "placeholder": "0.003", - "default": "0.003", + "help": "Loudness value for noise background between 0 and 1.0 (0: mute everyone, 1.0: never mute). 0.003 is a typical setting to mute loud people.", + "placeholder": "1.0", + "default": "1.0", "advanced": false }, { @@ -1285,11 +1292,19 @@ { "name": "entityScriptSourceWhitelist", "label": "Entity Scripts Allowed from:", - "help": "The domains that entity scripts are allowed from. A comma separated list of domains that entity scripts are allowed from, if someone attempts to create and entity or edit an entity to have a different domain, it will be rejected. If left blank, any domain is allowed.", + "help": "Comma separated list of URLs (with optional paths) that entity scripts are allowed from. If someone attempts to create and entity or edit an entity to have a different domain, it will be rejected. If left blank, any domain is allowed.", "placeholder": "", "default": "", "advanced": true }, + { + "name": "entityEditFilter", + "label": "Filter Entity Edits", + "help": "Check all entity edits against this filter function.", + "placeholder": "url whose content is like: function filter(properties) { return properties; }", + "default": "", + "advanced": true + }, { "name": "persistFilePath", "label": "Entities File Path", diff --git a/domain-server/resources/web/css/style.css b/domain-server/resources/web/css/style.css index ad426671a4..553f408e15 100644 --- a/domain-server/resources/web/css/style.css +++ b/domain-server/resources/web/css/style.css @@ -125,6 +125,10 @@ tr.new-row { background-color: #dff0d8; } +tr.invalid-input { + background-color: #f2dede; +} + .graphable-stat { text-align: center; color: #5286BC; diff --git a/domain-server/resources/web/settings/index.shtml b/domain-server/resources/web/settings/index.shtml index 7af21fe84b..3eb7a53726 100644 --- a/domain-server/resources/web/settings/index.shtml +++ b/domain-server/resources/web/settings/index.shtml @@ -12,7 +12,7 @@