diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index b638c51342..bbce49b941 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1372,6 +1372,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo }); connect(this, &Application::activeDisplayPluginChanged, reinterpret_cast(audioScriptingInterface.data()), &scripting::Audio::onContextChanged); + connect(this, &Application::interstitialModeChanged, audioIO.data(), &AudioClient::setInterstitialStatus); } // Create the rendering engine. This can be slow on some machines due to lots of @@ -2252,6 +2253,25 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo // Preload Tablet sounds DependencyManager::get()->preloadSounds(); + connect(this, &Application::interstitialModeChanged, this, [this] (bool interstitialMode) { + if (!interstitialMode) { + DependencyManager::get()->negotiateAudioFormat(); + _queryExpiry = SteadyClock::now(); + if (_avatarOverrideUrl.isValid()) { + getMyAvatar()->useFullAvatarURL(_avatarOverrideUrl); + } + static const QUrl empty{}; + if (getMyAvatar()->getFullAvatarURLFromPreferences() != getMyAvatar()->cannonicalSkeletonModelURL(empty)) { + getMyAvatar()->resetFullAvatarURL(); + } + getMyAvatar()->markIdentityDataChanged(); + getMyAvatar()->resetLastSent(); + + // transmit a "sendAll" packet to the AvatarMixer we just connected to. + getMyAvatar()->sendAvatarDataPacket(true); + } + }); + _pendingIdleEvent = false; _pendingRenderEvent = false; @@ -3412,13 +3432,14 @@ bool Application::isServerlessMode() const { return false; } -bool Application::isInterstitialPage() { +bool Application::isInterstitialMode() const { return _interstitialMode; } -void Application::setInterstitialMode(bool interstitialMode) { +void Application::setIsInterstitialMode(bool interstitialMode) { if (_interstitialMode != interstitialMode) { _interstitialMode = interstitialMode; + emit interstitialModeChanged(_interstitialMode); } } @@ -5481,8 +5502,6 @@ static bool domainLoadingInProgress = false; void Application::update(float deltaTime) { PROFILE_RANGE_EX(app, __FUNCTION__, 0xffff0000, (uint64_t)_renderFrameCount + 1); - auto audioClient = DependencyManager::get(); - audioClient->setMuted(true); if (!_physicsEnabled) { if (!domainLoadingInProgress) { PROFILE_ASYNC_BEGIN(app, "Scene Loading", ""); @@ -5504,6 +5523,7 @@ void Application::update(float deltaTime) { // scene is ready to compute its collision shape. if (nearbyEntitiesAreReadyForPhysics() && getMyAvatar()->isReadyForPhysics()) { _physicsEnabled = true; + setIsInterstitialMode(false); getMyAvatar()->updateMotionBehaviorFromMenu(); } } @@ -5909,7 +5929,7 @@ void Application::update(float deltaTime) { // send packet containing downstream audio stats to the AudioMixer { quint64 sinceLastNack = now - _lastSendDownstreamAudioStats; - if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS) { + if (sinceLastNack > TOO_LONG_SINCE_LAST_SEND_DOWNSTREAM_AUDIO_STATS && !isInterstitialMode()) { _lastSendDownstreamAudioStats = now; QMetaObject::invokeMethod(DependencyManager::get().data(), "sendDownstreamAudioStatsPacket", Qt::QueuedConnection); @@ -6072,21 +6092,23 @@ void Application::updateRenderArgs(float deltaTime) { } void Application::queryAvatars() { - auto avatarPacket = NLPacket::create(PacketType::AvatarQuery); - auto destinationBuffer = reinterpret_cast(avatarPacket->getPayload()); - unsigned char* bufferStart = destinationBuffer; + if (!isInterstitialMode()) { + auto avatarPacket = NLPacket::create(PacketType::AvatarQuery); + auto destinationBuffer = reinterpret_cast(avatarPacket->getPayload()); + unsigned char* bufferStart = destinationBuffer; - uint8_t numFrustums = (uint8_t)_conicalViews.size(); - memcpy(destinationBuffer, &numFrustums, sizeof(numFrustums)); - destinationBuffer += sizeof(numFrustums); + uint8_t numFrustums = (uint8_t)_conicalViews.size(); + memcpy(destinationBuffer, &numFrustums, sizeof(numFrustums)); + destinationBuffer += sizeof(numFrustums); - for (const auto& view : _conicalViews) { - destinationBuffer += view.serialize(destinationBuffer); + for (const auto& view : _conicalViews) { + destinationBuffer += view.serialize(destinationBuffer); + } + + avatarPacket->setPayloadSize(destinationBuffer - bufferStart); + + DependencyManager::get()->broadcastToNodes(std::move(avatarPacket), NodeSet() << NodeType::AvatarMixer); } - - avatarPacket->setPayloadSize(destinationBuffer - bufferStart); - - DependencyManager::get()->broadcastToNodes(std::move(avatarPacket), NodeSet() << NodeType::AvatarMixer); } @@ -6293,6 +6315,7 @@ void Application::clearDomainOctreeDetails() { qCDebug(interfaceapp) << "Clearing domain octree details..."; resetPhysicsReadyInformation(); + setIsInterstitialMode(true); _octreeServerSceneStats.withWriteLock([&] { _octreeServerSceneStats.clear(); @@ -6367,11 +6390,11 @@ void Application::nodeActivated(SharedNodePointer node) { _octreeQuery.incrementConnectionID(); } - if (node->getType() == NodeType::AudioMixer) { + if (node->getType() == NodeType::AudioMixer && !isInterstitialMode()) { DependencyManager::get()->negotiateAudioFormat(); } - if (node->getType() == NodeType::AvatarMixer) { + if (node->getType() == NodeType::AvatarMixer && !isInterstitialMode()) { _queryExpiry = SteadyClock::now(); // new avatar mixer, send off our identity packet on next update loop diff --git a/interface/src/Application.h b/interface/src/Application.h index c084c0033f..6bdfef78e1 100644 --- a/interface/src/Application.h +++ b/interface/src/Application.h @@ -328,6 +328,7 @@ signals: void activeDisplayPluginChanged(); void uploadRequest(QString path); + void interstitialModeChanged(bool interstitialMode); public slots: QVector pasteEntities(float x, float y, float z); diff --git a/interface/src/avatar/AvatarManager.cpp b/interface/src/avatar/AvatarManager.cpp index fab512f787..0fcc253f53 100644 --- a/interface/src/avatar/AvatarManager.cpp +++ b/interface/src/avatar/AvatarManager.cpp @@ -121,7 +121,7 @@ void AvatarManager::updateMyAvatar(float deltaTime) { quint64 now = usecTimestampNow(); quint64 dt = now - _lastSendAvatarDataTime; - if (dt > MIN_TIME_BETWEEN_MY_AVATAR_DATA_SENDS) { + if (dt > MIN_TIME_BETWEEN_MY_AVATAR_DATA_SENDS && !qApp->isInterstitialMode()) { // send head/hand data to the avatar mixer and voxel server PerformanceTimer perfTimer("send"); _myAvatar->sendAvatarDataPacket(); @@ -755,13 +755,13 @@ void AvatarManager::setAvatarSortCoefficient(const QString& name, const QScriptV QString currentSessionUUID = avatar->getSessionUUID().toString(); if (specificAvatarIdentifiers.isEmpty() || specificAvatarIdentifiers.contains(currentSessionUUID)) { QJsonObject thisAvatarPalData; - + auto myAvatar = DependencyManager::get()->getMyAvatar(); if (currentSessionUUID == myAvatar->getSessionUUID().toString()) { currentSessionUUID = ""; } - + thisAvatarPalData.insert("sessionUUID", currentSessionUUID); thisAvatarPalData.insert("sessionDisplayName", avatar->getSessionDisplayName()); thisAvatarPalData.insert("audioLoudness", avatar->getAudioLoudness()); diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index 98fbd8fea2..ec5ca903a0 100755 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -2212,6 +2212,7 @@ void MyAvatar::setHasScriptedBlendshapes(bool hasScriptedBlendshapes) { // send a forced avatarData update to make sure the script can send neutal blendshapes on unload // without having to wait for the update loop, make sure _hasScriptedBlendShapes is still true // before sending the update, or else it won't send the neutal blendshapes to the receiving clients + sendAvatarDataPacket(true); } _hasScriptedBlendShapes = hasScriptedBlendshapes; diff --git a/libraries/audio-client/src/AudioClient.cpp b/libraries/audio-client/src/AudioClient.cpp index 0eadcc5b66..a1487fa3ec 100644 --- a/libraries/audio-client/src/AudioClient.cpp +++ b/libraries/audio-client/src/AudioClient.cpp @@ -667,8 +667,7 @@ void AudioClient::stop() { } void AudioClient::handleAudioEnvironmentDataPacket(QSharedPointer message) { - - /*char bitset; + char bitset; message->readPrimitive(&bitset); bool hasReverb = oneAtBit(bitset, HAS_REVERB_BIT); @@ -680,11 +679,10 @@ void AudioClient::handleAudioEnvironmentDataPacket(QSharedPointer message) { - /* if (message->getType() == PacketType::SilentAudioFrame) { _silentInbound.increment(); } else { @@ -709,7 +707,7 @@ void AudioClient::handleAudioDataPacket(QSharedPointer message) // Audio output must exist and be correctly set up if we're going to process received audio _receivedAudioStream.parseData(*message); #endif -}*/ + } } AudioClient::Gate::Gate(AudioClient* audioClient) : @@ -1042,80 +1040,82 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) { } void AudioClient::handleAudioInput(QByteArray& audioBuffer) { - if (_muted) { - _lastInputLoudness = 0.0f; - _timeSinceLastClip = 0.0f; - } else { - int16_t* samples = reinterpret_cast(audioBuffer.data()); - int numSamples = audioBuffer.size() / AudioConstants::SAMPLE_SIZE; - int numFrames = numSamples / (_isStereoInput ? AudioConstants::STEREO : AudioConstants::MONO); - - if (_isNoiseGateEnabled) { - // The audio gate includes DC removal - _audioGate->render(samples, samples, numFrames); - } else { - _audioGate->removeDC(samples, samples, numFrames); - } - - int32_t loudness = 0; - assert(numSamples < 65536); // int32_t loudness cannot overflow - bool didClip = false; - for (int i = 0; i < numSamples; ++i) { - const int32_t CLIPPING_THRESHOLD = (int32_t)(AudioConstants::MAX_SAMPLE_VALUE * 0.9f); - int32_t sample = std::abs((int32_t)samples[i]); - loudness += sample; - didClip |= (sample > CLIPPING_THRESHOLD); - } - _lastInputLoudness = (float)loudness / numSamples; - - if (didClip) { + if (!_interstitialMode) { + if (_muted) { + _lastInputLoudness = 0.0f; _timeSinceLastClip = 0.0f; - } else if (_timeSinceLastClip >= 0.0f) { - _timeSinceLastClip += (float)numSamples / (float)AudioConstants::SAMPLE_RATE; + } else { + int16_t* samples = reinterpret_cast(audioBuffer.data()); + int numSamples = audioBuffer.size() / AudioConstants::SAMPLE_SIZE; + int numFrames = numSamples / (_isStereoInput ? AudioConstants::STEREO : AudioConstants::MONO); + + if (_isNoiseGateEnabled) { + // The audio gate includes DC removal + _audioGate->render(samples, samples, numFrames); + } else { + _audioGate->removeDC(samples, samples, numFrames); + } + + int32_t loudness = 0; + assert(numSamples < 65536); // int32_t loudness cannot overflow + bool didClip = false; + for (int i = 0; i < numSamples; ++i) { + const int32_t CLIPPING_THRESHOLD = (int32_t)(AudioConstants::MAX_SAMPLE_VALUE * 0.9f); + int32_t sample = std::abs((int32_t)samples[i]); + loudness += sample; + didClip |= (sample > CLIPPING_THRESHOLD); + } + _lastInputLoudness = (float)loudness / numSamples; + + if (didClip) { + _timeSinceLastClip = 0.0f; + } else if (_timeSinceLastClip >= 0.0f) { + _timeSinceLastClip += (float)numSamples / (float)AudioConstants::SAMPLE_RATE; + } + + emit inputReceived(audioBuffer); } - emit inputReceived(audioBuffer); + emit inputLoudnessChanged(_lastInputLoudness); + + // state machine to detect gate opening and closing + bool audioGateOpen = (_lastInputLoudness != 0.0f); + bool openedInLastBlock = !_audioGateOpen && audioGateOpen; // the gate just opened + bool closedInLastBlock = _audioGateOpen && !audioGateOpen; // the gate just closed + _audioGateOpen = audioGateOpen; + + if (openedInLastBlock) { + emit noiseGateOpened(); + } else if (closedInLastBlock) { + emit noiseGateClosed(); + } + + // the codec must be flushed to silence before sending silent packets, + // so delay the transition to silent packets by one packet after becoming silent. + auto packetType = _shouldEchoToServer ? PacketType::MicrophoneAudioWithEcho : PacketType::MicrophoneAudioNoEcho; + if (!audioGateOpen && !closedInLastBlock) { + packetType = PacketType::SilentAudioFrame; + _silentOutbound.increment(); + } else { + _audioOutbound.increment(); + } + + Transform audioTransform; + audioTransform.setTranslation(_positionGetter()); + audioTransform.setRotation(_orientationGetter()); + + QByteArray encodedBuffer; + if (_encoder) { + _encoder->encode(audioBuffer, encodedBuffer); + } else { + encodedBuffer = audioBuffer; + } + + emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, _isStereoInput, + audioTransform, avatarBoundingBoxCorner, avatarBoundingBoxScale, + packetType, _selectedCodecName); + _stats.sentPacket(); } - - emit inputLoudnessChanged(_lastInputLoudness); - - // state machine to detect gate opening and closing - bool audioGateOpen = (_lastInputLoudness != 0.0f); - bool openedInLastBlock = !_audioGateOpen && audioGateOpen; // the gate just opened - bool closedInLastBlock = _audioGateOpen && !audioGateOpen; // the gate just closed - _audioGateOpen = audioGateOpen; - - if (openedInLastBlock) { - emit noiseGateOpened(); - } else if (closedInLastBlock) { - emit noiseGateClosed(); - } - - // the codec must be flushed to silence before sending silent packets, - // so delay the transition to silent packets by one packet after becoming silent. - auto packetType = _shouldEchoToServer ? PacketType::MicrophoneAudioWithEcho : PacketType::MicrophoneAudioNoEcho; - if (!audioGateOpen && !closedInLastBlock) { - packetType = PacketType::SilentAudioFrame; - _silentOutbound.increment(); - } else { - _audioOutbound.increment(); - } - - Transform audioTransform; - audioTransform.setTranslation(_positionGetter()); - audioTransform.setRotation(_orientationGetter()); - - QByteArray encodedBuffer; - if (_encoder) { - _encoder->encode(audioBuffer, encodedBuffer); - } else { - encodedBuffer = audioBuffer; - } - - emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, _isStereoInput, - audioTransform, avatarBoundingBoxCorner, avatarBoundingBoxScale, - packetType, _selectedCodecName); - _stats.sentPacket(); } void AudioClient::handleMicAudioInput() { @@ -2017,7 +2017,7 @@ void AudioClient::loadSettings() { _receivedAudioStream.setDynamicJitterBufferEnabled(dynamicJitterBufferEnabled.get()); _receivedAudioStream.setStaticJitterBufferFrames(staticJitterBufferFrames.get()); - qCDebug(audioclient) << "---- Initializing Audio Client ----"; + auto codecPlugins = PluginManager::getInstance()->getCodecPlugins(); for (auto& plugin : codecPlugins) { qCDebug(audioclient) << "Codec available:" << plugin->getName(); diff --git a/libraries/audio-client/src/AudioClient.h b/libraries/audio-client/src/AudioClient.h index 9ee7bcfeba..b665f85a13 100644 --- a/libraries/audio-client/src/AudioClient.h +++ b/libraries/audio-client/src/AudioClient.h @@ -188,6 +188,7 @@ public slots: void handleRecordedAudioInput(const QByteArray& audio); void reset(); void audioMixerKilled(); + void setInterstitialStatus(bool interstitialMode) { _interstitialMode = interstitialMode; } void setMuted(bool muted, bool emitSignal = true); bool isMuted() { return _muted; } @@ -417,6 +418,7 @@ private: QVector _activeLocalAudioInjectors; bool _isPlayingBackRecording { false }; + bool _interstitialMode { true }; CodecPluginPointer _codec; QString _selectedCodecName;