mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 12:04:18 +02:00
Merge branch 'master' into out-of-body-experience
This commit is contained in:
commit
81db8f7e08
53 changed files with 501 additions and 199 deletions
|
@ -15,6 +15,7 @@
|
|||
#include <QtNetwork/QNetworkDiskCache>
|
||||
#include <QtNetwork/QNetworkRequest>
|
||||
#include <QtNetwork/QNetworkReply>
|
||||
#include <QThread>
|
||||
|
||||
#include <AssetClient.h>
|
||||
#include <AvatarHashMap.h>
|
||||
|
@ -33,6 +34,9 @@
|
|||
#include <recording/Recorder.h>
|
||||
#include <recording/Frame.h>
|
||||
|
||||
#include <plugins/CodecPlugin.h>
|
||||
#include <plugins/PluginManager.h>
|
||||
|
||||
#include <WebSocketServerClass.h>
|
||||
#include <EntityScriptingInterface.h> // TODO: consider moving to scriptengine.h
|
||||
|
||||
|
@ -42,6 +46,7 @@
|
|||
#include "AbstractAudioInterface.h"
|
||||
|
||||
#include "Agent.h"
|
||||
#include "AvatarAudioTimer.h"
|
||||
|
||||
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10;
|
||||
|
||||
|
@ -72,6 +77,17 @@ Agent::Agent(ReceivedMessage& message) :
|
|||
{ PacketType::OctreeStats, PacketType::EntityData, PacketType::EntityErase },
|
||||
this, "handleOctreePacket");
|
||||
packetReceiver.registerListener(PacketType::Jurisdiction, this, "handleJurisdictionPacket");
|
||||
packetReceiver.registerListener(PacketType::SelectedAudioFormat, this, "handleSelectedAudioFormat");
|
||||
}
|
||||
|
||||
void Agent::playAvatarSound(SharedSoundPointer sound) {
|
||||
// this must happen on Agent's main thread
|
||||
if (QThread::currentThread() != thread()) {
|
||||
QMetaObject::invokeMethod(this, "playAvatarSound", Q_ARG(SharedSoundPointer, sound));
|
||||
return;
|
||||
} else {
|
||||
setAvatarSound(sound);
|
||||
}
|
||||
}
|
||||
|
||||
void Agent::handleOctreePacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode) {
|
||||
|
@ -118,7 +134,6 @@ void Agent::handleAudioPacket(QSharedPointer<ReceivedMessage> message) {
|
|||
_receivedAudioStream.parseData(*message);
|
||||
|
||||
_lastReceivedAudioLoudness = _receivedAudioStream.getNextOutputFrameLoudness();
|
||||
|
||||
_receivedAudioStream.clearBuffer();
|
||||
}
|
||||
|
||||
|
@ -214,6 +229,59 @@ void Agent::nodeActivated(SharedNodePointer activatedNode) {
|
|||
|
||||
_pendingScriptRequest = nullptr;
|
||||
}
|
||||
if (activatedNode->getType() == NodeType::AudioMixer) {
|
||||
negotiateAudioFormat();
|
||||
}
|
||||
}
|
||||
|
||||
void Agent::negotiateAudioFormat() {
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
auto negotiateFormatPacket = NLPacket::create(PacketType::NegotiateAudioFormat);
|
||||
auto codecPlugins = PluginManager::getInstance()->getCodecPlugins();
|
||||
quint8 numberOfCodecs = (quint8)codecPlugins.size();
|
||||
negotiateFormatPacket->writePrimitive(numberOfCodecs);
|
||||
for (auto& plugin : codecPlugins) {
|
||||
auto codecName = plugin->getName();
|
||||
negotiateFormatPacket->writeString(codecName);
|
||||
}
|
||||
|
||||
// grab our audio mixer from the NodeList, if it exists
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
|
||||
if (audioMixer) {
|
||||
// send off this mute packet
|
||||
nodeList->sendPacket(std::move(negotiateFormatPacket), *audioMixer);
|
||||
}
|
||||
}
|
||||
|
||||
void Agent::handleSelectedAudioFormat(QSharedPointer<ReceivedMessage> message) {
|
||||
QString selectedCodecName = message->readString();
|
||||
selectAudioFormat(selectedCodecName);
|
||||
}
|
||||
|
||||
void Agent::selectAudioFormat(const QString& selectedCodecName) {
|
||||
_selectedCodecName = selectedCodecName;
|
||||
|
||||
qDebug() << "Selected Codec:" << _selectedCodecName;
|
||||
|
||||
// release any old codec encoder/decoder first...
|
||||
if (_codec && _encoder) {
|
||||
_codec->releaseEncoder(_encoder);
|
||||
_encoder = nullptr;
|
||||
_codec = nullptr;
|
||||
}
|
||||
_receivedAudioStream.cleanupCodec();
|
||||
|
||||
auto codecPlugins = PluginManager::getInstance()->getCodecPlugins();
|
||||
for (auto& plugin : codecPlugins) {
|
||||
if (_selectedCodecName == plugin->getName()) {
|
||||
_codec = plugin;
|
||||
_receivedAudioStream.setupCodec(plugin, _selectedCodecName, AudioConstants::STEREO);
|
||||
_encoder = plugin->createEncoder(AudioConstants::SAMPLE_RATE, AudioConstants::MONO);
|
||||
qDebug() << "Selected Codec Plugin:" << _codec.get();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Agent::scriptRequestFinished() {
|
||||
|
@ -314,10 +382,18 @@ void Agent::executeScript() {
|
|||
entityScriptingInterface->setEntityTree(_entityViewer.getTree());
|
||||
|
||||
DependencyManager::set<AssignmentParentFinder>(_entityViewer.getTree());
|
||||
|
||||
// wire up our additional agent related processing to the update signal
|
||||
QObject::connect(_scriptEngine.get(), &ScriptEngine::update, this, &Agent::processAgentAvatarAndAudio);
|
||||
|
||||
|
||||
// 100Hz timer for audio
|
||||
AvatarAudioTimer* audioTimerWorker = new AvatarAudioTimer();
|
||||
audioTimerWorker->moveToThread(&_avatarAudioTimerThread);
|
||||
connect(audioTimerWorker, &AvatarAudioTimer::avatarTick, this, &Agent::processAgentAvatarAudio);
|
||||
connect(this, &Agent::startAvatarAudioTimer, audioTimerWorker, &AvatarAudioTimer::start);
|
||||
connect(this, &Agent::stopAvatarAudioTimer, audioTimerWorker, &AvatarAudioTimer::stop);
|
||||
connect(&_avatarAudioTimerThread, &QThread::finished, audioTimerWorker, &QObject::deleteLater);
|
||||
_avatarAudioTimerThread.start();
|
||||
|
||||
// 60Hz timer for avatar
|
||||
QObject::connect(_scriptEngine.get(), &ScriptEngine::update, this, &Agent::processAgentAvatar);
|
||||
_scriptEngine->run();
|
||||
|
||||
Frame::clearFrameHandler(AUDIO_FRAME_TYPE);
|
||||
|
@ -343,6 +419,10 @@ void Agent::setIsAvatar(bool isAvatar) {
|
|||
|
||||
// start the timers
|
||||
_avatarIdentityTimer->start(AVATAR_IDENTITY_PACKET_SEND_INTERVAL_MSECS);
|
||||
|
||||
// tell the avatarAudioTimer to start ticking
|
||||
emit startAvatarAudioTimer();
|
||||
|
||||
}
|
||||
|
||||
if (!_isAvatar) {
|
||||
|
@ -367,6 +447,7 @@ void Agent::setIsAvatar(bool isAvatar) {
|
|||
nodeList->sendPacketList(std::move(packetList), *node);
|
||||
});
|
||||
}
|
||||
emit stopAvatarAudioTimer();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -377,11 +458,9 @@ void Agent::sendAvatarIdentityPacket() {
|
|||
}
|
||||
}
|
||||
|
||||
void Agent::processAgentAvatarAndAudio(float deltaTime) {
|
||||
void Agent::processAgentAvatar() {
|
||||
if (!_scriptEngine->isFinished() && _isAvatar) {
|
||||
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
|
||||
const int SCRIPT_AUDIO_BUFFER_SAMPLES = AudioConstants::SAMPLE_RATE / SCRIPT_FPS + 0.5;
|
||||
const int SCRIPT_AUDIO_BUFFER_BYTES = SCRIPT_AUDIO_BUFFER_SAMPLES * sizeof(int16_t);
|
||||
|
||||
QByteArray avatarByteArray = scriptedAvatar->toByteArray(true, randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
|
||||
scriptedAvatar->doneEncoding(true);
|
||||
|
@ -395,95 +474,106 @@ void Agent::processAgentAvatarAndAudio(float deltaTime) {
|
|||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
nodeList->broadcastToNodes(std::move(avatarPacket), NodeSet() << NodeType::AvatarMixer);
|
||||
}
|
||||
}
|
||||
|
||||
if (_isListeningToAudioStream || _avatarSound) {
|
||||
// if we have an avatar audio stream then send it out to our audio-mixer
|
||||
bool silentFrame = true;
|
||||
void Agent::processAgentAvatarAudio() {
|
||||
if (_isAvatar && (_isListeningToAudioStream || _avatarSound)) {
|
||||
// if we have an avatar audio stream then send it out to our audio-mixer
|
||||
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
|
||||
bool silentFrame = true;
|
||||
|
||||
int16_t numAvailableSamples = SCRIPT_AUDIO_BUFFER_SAMPLES;
|
||||
const int16_t* nextSoundOutput = NULL;
|
||||
int16_t numAvailableSamples = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||
const int16_t* nextSoundOutput = NULL;
|
||||
|
||||
if (_avatarSound) {
|
||||
const QByteArray& soundByteArray = _avatarSound->getByteArray();
|
||||
nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
|
||||
if (_avatarSound) {
|
||||
const QByteArray& soundByteArray = _avatarSound->getByteArray();
|
||||
nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
|
||||
+ _numAvatarSoundSentBytes);
|
||||
|
||||
int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > SCRIPT_AUDIO_BUFFER_BYTES
|
||||
? SCRIPT_AUDIO_BUFFER_BYTES
|
||||
: soundByteArray.size() - _numAvatarSoundSentBytes;
|
||||
numAvailableSamples = (int16_t)numAvailableBytes / sizeof(int16_t);
|
||||
int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL
|
||||
? AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL
|
||||
: soundByteArray.size() - _numAvatarSoundSentBytes;
|
||||
numAvailableSamples = (int16_t)numAvailableBytes / sizeof(int16_t);
|
||||
|
||||
|
||||
// check if the all of the _numAvatarAudioBufferSamples to be sent are silence
|
||||
for (int i = 0; i < numAvailableSamples; ++i) {
|
||||
if (nextSoundOutput[i] != 0) {
|
||||
silentFrame = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_numAvatarSoundSentBytes += numAvailableBytes;
|
||||
if (_numAvatarSoundSentBytes == soundByteArray.size()) {
|
||||
// we're done with this sound object - so set our pointer back to NULL
|
||||
// and our sent bytes back to zero
|
||||
_avatarSound.clear();
|
||||
_numAvatarSoundSentBytes = 0;
|
||||
// check if the all of the _numAvatarAudioBufferSamples to be sent are silence
|
||||
for (int i = 0; i < numAvailableSamples; ++i) {
|
||||
if (nextSoundOutput[i] != 0) {
|
||||
silentFrame = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
auto audioPacket = NLPacket::create(silentFrame
|
||||
_numAvatarSoundSentBytes += numAvailableBytes;
|
||||
if (_numAvatarSoundSentBytes == soundByteArray.size()) {
|
||||
// we're done with this sound object - so set our pointer back to NULL
|
||||
// and our sent bytes back to zero
|
||||
_avatarSound.clear();
|
||||
_numAvatarSoundSentBytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
auto audioPacket = NLPacket::create(silentFrame
|
||||
? PacketType::SilentAudioFrame
|
||||
: PacketType::MicrophoneAudioNoEcho);
|
||||
|
||||
// seek past the sequence number, will be packed when destination node is known
|
||||
audioPacket->seek(sizeof(quint16));
|
||||
// seek past the sequence number, will be packed when destination node is known
|
||||
audioPacket->seek(sizeof(quint16));
|
||||
|
||||
if (silentFrame) {
|
||||
if (!_isListeningToAudioStream) {
|
||||
// if we have a silent frame and we're not listening then just send nothing and break out of here
|
||||
return;
|
||||
}
|
||||
|
||||
// write the number of silent samples so the audio-mixer can uphold timing
|
||||
audioPacket->writePrimitive(SCRIPT_AUDIO_BUFFER_SAMPLES);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||
glm::quat headOrientation = scriptedAvatar->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
} else if (nextSoundOutput) {
|
||||
// write the codec
|
||||
QString codecName;
|
||||
audioPacket->writeString(codecName);
|
||||
|
||||
// assume scripted avatar audio is mono and set channel flag to zero
|
||||
audioPacket->writePrimitive((quint8)0);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||
glm::quat headOrientation = scriptedAvatar->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
// write the raw audio data
|
||||
audioPacket->write(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples * sizeof(int16_t));
|
||||
if (silentFrame) {
|
||||
if (!_isListeningToAudioStream) {
|
||||
// if we have a silent frame and we're not listening then just send nothing and break out of here
|
||||
return;
|
||||
}
|
||||
|
||||
// write audio packet to AudioMixer nodes
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
nodeList->eachNode([this, &nodeList, &audioPacket](const SharedNodePointer& node){
|
||||
// only send to nodes of type AudioMixer
|
||||
if (node->getType() == NodeType::AudioMixer) {
|
||||
// pack sequence number
|
||||
quint16 sequence = _outgoingScriptAudioSequenceNumbers[node->getUUID()]++;
|
||||
audioPacket->seek(0);
|
||||
audioPacket->writePrimitive(sequence);
|
||||
// write the number of silent samples so the audio-mixer can uphold timing
|
||||
audioPacket->writePrimitive(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||
glm::quat headOrientation = scriptedAvatar->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
} else if (nextSoundOutput) {
|
||||
|
||||
// write the codec
|
||||
audioPacket->writeString(_selectedCodecName);
|
||||
|
||||
// assume scripted avatar audio is mono and set channel flag to zero
|
||||
audioPacket->writePrimitive((quint8)0);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||
glm::quat headOrientation = scriptedAvatar->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
// encode it
|
||||
if(_encoder) {
|
||||
QByteArray decodedBuffer(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples*sizeof(int16_t));
|
||||
QByteArray encodedBuffer;
|
||||
_encoder->encode(decodedBuffer, encodedBuffer);
|
||||
audioPacket->write(encodedBuffer.data(), encodedBuffer.size());
|
||||
} else {
|
||||
audioPacket->write(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples*sizeof(int16_t));
|
||||
}
|
||||
|
||||
// send audio packet
|
||||
nodeList->sendUnreliablePacket(*audioPacket, *node);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// write audio packet to AudioMixer nodes
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
nodeList->eachNode([this, &nodeList, &audioPacket](const SharedNodePointer& node) {
|
||||
// only send to nodes of type AudioMixer
|
||||
if (node->getType() == NodeType::AudioMixer) {
|
||||
// pack sequence number
|
||||
quint16 sequence = _outgoingScriptAudioSequenceNumbers[node->getUUID()]++;
|
||||
audioPacket->seek(0);
|
||||
audioPacket->writePrimitive(sequence);
|
||||
|
||||
// send audio packet
|
||||
nodeList->sendUnreliablePacket(*audioPacket, *node);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -501,4 +591,13 @@ void Agent::aboutToFinish() {
|
|||
|
||||
// cleanup the AudioInjectorManager (and any still running injectors)
|
||||
DependencyManager::destroy<AudioInjectorManager>();
|
||||
|
||||
emit stopAvatarAudioTimer();
|
||||
_avatarAudioTimerThread.quit();
|
||||
|
||||
// cleanup codec & encoder
|
||||
if (_codec && _encoder) {
|
||||
_codec->releaseEncoder(_encoder);
|
||||
_encoder = nullptr;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <QtScript/QScriptEngine>
|
||||
#include <QtCore/QObject>
|
||||
#include <QtCore/QUrl>
|
||||
#include <QtCore/QTimer>
|
||||
#include <QUuid>
|
||||
|
||||
#include <EntityEditPacketSender.h>
|
||||
|
@ -26,8 +27,9 @@
|
|||
#include <ScriptEngine.h>
|
||||
#include <ThreadedAssignment.h>
|
||||
|
||||
#include "MixedAudioStream.h"
|
||||
#include <plugins/CodecPlugin.h>
|
||||
|
||||
#include "MixedAudioStream.h"
|
||||
|
||||
class Agent : public ThreadedAssignment {
|
||||
Q_OBJECT
|
||||
|
@ -56,7 +58,7 @@ public:
|
|||
|
||||
public slots:
|
||||
void run() override;
|
||||
void playAvatarSound(SharedSoundPointer avatarSound) { setAvatarSound(avatarSound); }
|
||||
void playAvatarSound(SharedSoundPointer avatarSound);
|
||||
|
||||
private slots:
|
||||
void requestScript();
|
||||
|
@ -66,12 +68,20 @@ private slots:
|
|||
void handleAudioPacket(QSharedPointer<ReceivedMessage> message);
|
||||
void handleOctreePacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
|
||||
void handleJurisdictionPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
|
||||
|
||||
void processAgentAvatarAndAudio(float deltaTime);
|
||||
void handleSelectedAudioFormat(QSharedPointer<ReceivedMessage> message);
|
||||
|
||||
void nodeActivated(SharedNodePointer activatedNode);
|
||||
|
||||
void processAgentAvatar();
|
||||
void processAgentAvatarAudio();
|
||||
|
||||
signals:
|
||||
void startAvatarAudioTimer();
|
||||
void stopAvatarAudioTimer();
|
||||
private:
|
||||
void negotiateAudioFormat();
|
||||
void selectAudioFormat(const QString& selectedCodecName);
|
||||
|
||||
std::unique_ptr<ScriptEngine> _scriptEngine;
|
||||
EntityEditPacketSender _entityEditSender;
|
||||
EntityTreeHeadlessViewer _entityViewer;
|
||||
|
@ -92,7 +102,11 @@ private:
|
|||
bool _isAvatar = false;
|
||||
QTimer* _avatarIdentityTimer = nullptr;
|
||||
QHash<QUuid, quint16> _outgoingScriptAudioSequenceNumbers;
|
||||
|
||||
|
||||
CodecPluginPointer _codec;
|
||||
QString _selectedCodecName;
|
||||
Encoder* _encoder { nullptr };
|
||||
QThread _avatarAudioTimerThread;
|
||||
};
|
||||
|
||||
#endif // hifi_Agent_h
|
||||
|
|
33
assignment-client/src/AvatarAudioTimer.cpp
Normal file
33
assignment-client/src/AvatarAudioTimer.cpp
Normal file
|
@ -0,0 +1,33 @@
|
|||
//
|
||||
// AvatarAudioTimer.cpp
|
||||
// assignment-client/src
|
||||
//
|
||||
// Created by David Kelly on 10/12/13.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include <QDebug>
|
||||
#include <SharedUtil.h>
|
||||
#include "AvatarAudioTimer.h"
|
||||
|
||||
// this should send a signal every 10ms, with pretty good precision. Hardcoding
|
||||
// to 10ms since that's what you'd want for audio.
|
||||
void AvatarAudioTimer::start() {
|
||||
qDebug() << "AvatarAudioTimer::start called";
|
||||
auto startTime = usecTimestampNow();
|
||||
quint64 frameCounter = 0;
|
||||
const int TARGET_INTERVAL_USEC = 10000; // 10ms
|
||||
while (!_quit) {
|
||||
frameCounter++;
|
||||
// simplest possible timer
|
||||
quint64 targetTime = startTime + frameCounter * TARGET_INTERVAL_USEC;
|
||||
quint64 interval = std::max((quint64)0, targetTime - usecTimestampNow());
|
||||
usleep(interval);
|
||||
emit avatarTick();
|
||||
}
|
||||
qDebug() << "AvatarAudioTimer is finished";
|
||||
}
|
||||
|
||||
|
31
assignment-client/src/AvatarAudioTimer.h
Normal file
31
assignment-client/src/AvatarAudioTimer.h
Normal file
|
@ -0,0 +1,31 @@
|
|||
//
|
||||
// AvatarAudioTimer.h
|
||||
// assignment-client/src
|
||||
//
|
||||
// Created by David Kelly on 10/12/13.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AvatarAudioTimer_h
|
||||
#define hifi_AvatarAudioTimer_h
|
||||
|
||||
#include <QtCore/QObject>
|
||||
|
||||
class AvatarAudioTimer : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
signals:
|
||||
void avatarTick();
|
||||
|
||||
public slots:
|
||||
void start();
|
||||
void stop() { _quit = true; }
|
||||
|
||||
private:
|
||||
bool _quit { false };
|
||||
};
|
||||
|
||||
#endif //hifi_AvatarAudioTimer_h
|
|
@ -46,14 +46,21 @@ AnimationDetails ScriptableAvatar::getAnimationDetails() {
|
|||
return _animationDetails;
|
||||
}
|
||||
|
||||
void ScriptableAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
|
||||
_bind.reset();
|
||||
_animSkeleton.reset();
|
||||
AvatarData::setSkeletonModelURL(skeletonModelURL);
|
||||
}
|
||||
void ScriptableAvatar::update(float deltatime) {
|
||||
if (_bind.isNull() && !_skeletonFBXURL.isEmpty()) { // AvatarData will parse the .fst, but not get the .fbx skeleton.
|
||||
_bind = DependencyManager::get<AnimationCache>()->getAnimation(_skeletonFBXURL);
|
||||
}
|
||||
|
||||
// Run animation
|
||||
if (_animation && _animation->isLoaded() && _animation->getFrames().size() > 0 && _bind->isLoaded()) {
|
||||
|
||||
if (_animation && _animation->isLoaded() && _animation->getFrames().size() > 0 && !_bind.isNull() && _bind->isLoaded()) {
|
||||
if (!_animSkeleton) {
|
||||
_animSkeleton = std::make_shared<AnimSkeleton>(_bind->getGeometry());
|
||||
}
|
||||
float currentFrame = _animationDetails.currentFrame + deltatime * _animationDetails.fps;
|
||||
if (_animationDetails.loop || currentFrame < _animationDetails.lastFrame) {
|
||||
while (currentFrame >= _animationDetails.lastFrame) {
|
||||
|
@ -64,14 +71,16 @@ void ScriptableAvatar::update(float deltatime) {
|
|||
const QVector<FBXJoint>& modelJoints = _bind->getGeometry().joints;
|
||||
QStringList animationJointNames = _animation->getJointNames();
|
||||
|
||||
if (_jointData.size() != modelJoints.size()) {
|
||||
_jointData.resize(modelJoints.size());
|
||||
const int nJoints = modelJoints.size();
|
||||
if (_jointData.size() != nJoints) {
|
||||
_jointData.resize(nJoints);
|
||||
}
|
||||
|
||||
const int frameCount = _animation->getFrames().size();
|
||||
const FBXAnimationFrame& floorFrame = _animation->getFrames().at((int)glm::floor(currentFrame) % frameCount);
|
||||
const FBXAnimationFrame& ceilFrame = _animation->getFrames().at((int)glm::ceil(currentFrame) % frameCount);
|
||||
const float frameFraction = glm::fract(currentFrame);
|
||||
std::vector<AnimPose> poses = _animSkeleton->getRelativeDefaultPoses();
|
||||
|
||||
for (int i = 0; i < animationJointNames.size(); i++) {
|
||||
const QString& name = animationJointNames[i];
|
||||
|
@ -79,18 +88,21 @@ void ScriptableAvatar::update(float deltatime) {
|
|||
// trusting the .fst (which is sometimes not updated to match changes to .fbx).
|
||||
int mapping = _bind->getGeometry().getJointIndex(name);
|
||||
if (mapping != -1 && !_maskedJoints.contains(name)) {
|
||||
JointData& data = _jointData[mapping];
|
||||
|
||||
auto newRotation = modelJoints[mapping].preRotation *
|
||||
safeMix(floorFrame.rotations.at(i), ceilFrame.rotations.at(i), frameFraction);
|
||||
// We could probably do translations as in interpolation in model space (rather than the parent space that each frame is in),
|
||||
// but we don't do so for MyAvatar yet, so let's not be different here.
|
||||
if (data.rotation != newRotation) {
|
||||
data.rotation = newRotation;
|
||||
data.rotationSet = true;
|
||||
}
|
||||
// Eventually, this should probably deal with post rotations and translations, too.
|
||||
poses[mapping].rot = modelJoints[mapping].preRotation *
|
||||
safeMix(floorFrame.rotations.at(i), ceilFrame.rotations.at(i), frameFraction);;
|
||||
}
|
||||
}
|
||||
_animSkeleton->convertRelativePosesToAbsolute(poses);
|
||||
for (int i = 0; i < nJoints; i++) {
|
||||
JointData& data = _jointData[i];
|
||||
AnimPose& pose = poses[i];
|
||||
if (data.rotation != pose.rot) {
|
||||
data.rotation = pose.rot;
|
||||
data.rotationSet = true;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
_animation.clear();
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define hifi_ScriptableAvatar_h
|
||||
|
||||
#include <AnimationCache.h>
|
||||
#include <AnimSkeleton.h>
|
||||
#include <AvatarData.h>
|
||||
#include <ScriptEngine.h>
|
||||
|
||||
|
@ -25,6 +26,7 @@ public:
|
|||
bool hold = false, float firstFrame = 0.0f, float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
|
||||
Q_INVOKABLE void stopAnimation();
|
||||
Q_INVOKABLE AnimationDetails getAnimationDetails();
|
||||
virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override;
|
||||
|
||||
private slots:
|
||||
void update(float deltatime);
|
||||
|
@ -34,6 +36,7 @@ private:
|
|||
AnimationDetails _animationDetails;
|
||||
QStringList _maskedJoints;
|
||||
AnimationPointer _bind; // a sleazy way to get the skeleton, given the various library/cmake dependencies
|
||||
std::shared_ptr<AnimSkeleton> _animSkeleton;
|
||||
};
|
||||
|
||||
#endif // hifi_ScriptableAvatar_h
|
|
@ -20,7 +20,7 @@ macro(SET_PACKAGING_PARAMETERS)
|
|||
set(RELEASE_NUMBER $ENV{RELEASE_NUMBER})
|
||||
string(TOLOWER "$ENV{BRANCH}" BUILD_BRANCH)
|
||||
set(BUILD_GLOBAL_SERVICES "DEVELOPMENT")
|
||||
set(USE_STABLE_GLOBAL_SERVICES FALSE)
|
||||
set(USE_STABLE_GLOBAL_SERVICES 0)
|
||||
|
||||
message(STATUS "The BUILD_BRANCH variable is: ${BUILD_BRANCH}")
|
||||
message(STATUS "The BRANCH environment variable is: $ENV{BRANCH}")
|
||||
|
@ -43,7 +43,7 @@ macro(SET_PACKAGING_PARAMETERS)
|
|||
if (BUILD_BRANCH STREQUAL "stable")
|
||||
message(STATUS "The RELEASE_TYPE is PRODUCTION and the BUILD_BRANCH is stable...")
|
||||
set(BUILD_GLOBAL_SERVICES "STABLE")
|
||||
set(USE_STABLE_GLOBAL_SERVICES TRUE)
|
||||
set(USE_STABLE_GLOBAL_SERVICES 1)
|
||||
endif()
|
||||
|
||||
elseif (RELEASE_TYPE STREQUAL "PR")
|
||||
|
|
|
@ -2267,7 +2267,7 @@ void DomainServer::processPathQueryPacket(QSharedPointer<ReceivedMessage> messag
|
|||
QByteArray viewpointUTF8 = responseViewpoint.toUtf8();
|
||||
|
||||
// prepare a packet for the response
|
||||
auto pathResponsePacket = NLPacket::create(PacketType::DomainServerPathResponse);
|
||||
auto pathResponsePacket = NLPacket::create(PacketType::DomainServerPathResponse, -1, true);
|
||||
|
||||
// check the number of bytes the viewpoint is
|
||||
quint16 numViewpointBytes = viewpointUTF8.size();
|
||||
|
|
|
@ -11,42 +11,34 @@
|
|||
|
||||
{ "from": "OculusTouch.LY", "to": "Standard.LY",
|
||||
"filters": [
|
||||
{ "type": "deadZone", "min": 0.05 },
|
||||
{ "type": "deadZone", "min": 0.3 },
|
||||
"invert"
|
||||
]
|
||||
},
|
||||
{ "from": "OculusTouch.LX", "filters": { "type": "deadZone", "min": 0.05 }, "to": "Standard.LX" },
|
||||
{ "from": "OculusTouch.LT", "to": "Standard.LTClick",
|
||||
{ "from": "OculusTouch.LX", "filters": { "type": "deadZone", "min": 0.3 }, "to": "Standard.LX" },
|
||||
{ "from": "OculusTouch.LT", "to": "Standard.LTClick",
|
||||
"peek": true,
|
||||
"filters": [ { "type": "hysteresis", "min": 0.85, "max": 0.9 } ]
|
||||
},
|
||||
{ "from": "OculusTouch.LT", "to": "Standard.LT" },
|
||||
{ "from": "OculusTouch.LS", "to": "Standard.LS" },
|
||||
{ "from": "OculusTouch.LeftGrip", "to": "Standard.LTClick",
|
||||
"peek": true,
|
||||
"filters": [ { "type": "hysteresis", "min": 0.85, "max": 0.9 } ]
|
||||
},
|
||||
{ "from": "OculusTouch.LeftGrip", "to": "Standard.LT" },
|
||||
{ "from": "OculusTouch.LeftGrip", "filters": { "type": "deadZone", "min": 0.5 }, "to": "Standard.LeftGrip" },
|
||||
{ "from": "OculusTouch.LeftHand", "to": "Standard.LeftHand" },
|
||||
|
||||
{ "from": "OculusTouch.RY", "to": "Standard.RY",
|
||||
"filters": [
|
||||
{ "type": "deadZone", "min": 0.05 },
|
||||
{ "type": "deadZone", "min": 0.3 },
|
||||
"invert"
|
||||
]
|
||||
},
|
||||
{ "from": "OculusTouch.RX", "filters": { "type": "deadZone", "min": 0.05 }, "to": "Standard.RX" },
|
||||
{ "from": "OculusTouch.RT", "to": "Standard.RTClick",
|
||||
{ "from": "OculusTouch.RX", "filters": { "type": "deadZone", "min": 0.3 }, "to": "Standard.RX" },
|
||||
{ "from": "OculusTouch.RT", "to": "Standard.RTClick",
|
||||
"peek": true,
|
||||
"filters": [ { "type": "hysteresis", "min": 0.85, "max": 0.9 } ]
|
||||
},
|
||||
{ "from": "OculusTouch.RT", "to": "Standard.RT" },
|
||||
{ "from": "OculusTouch.RS", "to": "Standard.RS" },
|
||||
{ "from": "OculusTouch.RightGrip", "to": "Standard.LTClick",
|
||||
"peek": true,
|
||||
"filters": [ { "type": "hysteresis", "min": 0.85, "max": 0.9 } ]
|
||||
},
|
||||
{ "from": "OculusTouch.RightGrip", "to": "Standard.RT" },
|
||||
{ "from": "OculusTouch.RightGrip", "filters": { "type": "deadZone", "min": 0.5 }, "to": "Standard.RightGrip" },
|
||||
{ "from": "OculusTouch.RightHand", "to": "Standard.RightHand" },
|
||||
|
||||
{ "from": "OculusTouch.LeftApplicationMenu", "to": "Standard.Back" },
|
||||
|
|
|
@ -668,6 +668,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
|
|||
connect(&domainHandler, SIGNAL(disconnectedFromDomain()), SLOT(clearDomainOctreeDetails()));
|
||||
connect(&domainHandler, &DomainHandler::domainConnectionRefused, this, &Application::domainConnectionRefused);
|
||||
|
||||
// We could clear ATP assets only when changing domains, but it's possible that the domain you are connected
|
||||
// to has gone down and switched to a new content set, so when you reconnect the cached ATP assets will no longer be valid.
|
||||
connect(&domainHandler, &DomainHandler::disconnectedFromDomain, DependencyManager::get<ScriptCache>().data(), &ScriptCache::clearATPScriptsFromCache);
|
||||
|
||||
// update our location every 5 seconds in the metaverse server, assuming that we are authenticated with one
|
||||
const qint64 DATA_SERVER_LOCATION_CHANGE_UPDATE_MSECS = 5 * MSECS_PER_SECOND;
|
||||
|
||||
|
@ -1498,6 +1502,9 @@ void Application::cleanupBeforeQuit() {
|
|||
DependencyManager::get<ScriptEngines>()->shutdownScripting(); // stop all currently running global scripts
|
||||
DependencyManager::destroy<ScriptEngines>();
|
||||
|
||||
_displayPlugin.reset();
|
||||
PluginManager::getInstance()->shutdown();
|
||||
|
||||
// Cleanup all overlays after the scripts, as scripts might add more
|
||||
_overlays.cleanupAllOverlays();
|
||||
|
||||
|
|
|
@ -37,9 +37,13 @@ AvatarActionHold::AvatarActionHold(const QUuid& id, EntityItemPointer ownerEntit
|
|||
}
|
||||
|
||||
AvatarActionHold::~AvatarActionHold() {
|
||||
auto myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
||||
if (myAvatar) {
|
||||
myAvatar->removeHoldAction(this);
|
||||
// Sometimes actions are destroyed after the AvatarManager is destroyed by the Application.
|
||||
auto avatarManager = DependencyManager::get<AvatarManager>();
|
||||
if (avatarManager) {
|
||||
auto myAvatar = avatarManager->getMyAvatar();
|
||||
if (myAvatar) {
|
||||
myAvatar->removeHoldAction(this);
|
||||
}
|
||||
}
|
||||
|
||||
#if WANT_DEBUG
|
||||
|
|
|
@ -2427,12 +2427,13 @@ void MyAvatar::updateHoldActions(const AnimPose& prePhysicsPose, const AnimPose&
|
|||
EntityTreeRenderer* entityTreeRenderer = qApp->getEntities();
|
||||
EntityTreePointer entityTree = entityTreeRenderer ? entityTreeRenderer->getTree() : nullptr;
|
||||
if (entityTree) {
|
||||
// to prevent actions from adding or removing themselves from the _holdActions vector
|
||||
// while we are iterating, we need to enter a critical section.
|
||||
std::lock_guard<std::mutex> guard(_holdActionsMutex);
|
||||
|
||||
// lateAvatarUpdate will modify entity position & orientation, so we need an entity write lock
|
||||
entityTree->withWriteLock([&] {
|
||||
|
||||
// to prevent actions from adding or removing themselves from the _holdActions vector
|
||||
// while we are iterating, we need to enter a critical section.
|
||||
std::lock_guard<std::mutex> guard(_holdActionsMutex);
|
||||
|
||||
for (auto& holdAction : _holdActions) {
|
||||
holdAction->lateAvatarUpdate(prePhysicsPose, postUpdatePose);
|
||||
}
|
||||
|
|
|
@ -258,7 +258,7 @@ void ApplicationOverlay::buildFramebufferObject() {
|
|||
|
||||
auto uiSize = qApp->getUiSize();
|
||||
if (!_overlayFramebuffer || uiSize != _overlayFramebuffer->getSize()) {
|
||||
_overlayFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_overlayFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("ApplicationOverlay"));
|
||||
}
|
||||
|
||||
auto width = uiSize.x;
|
||||
|
|
|
@ -356,6 +356,7 @@ void OpenGLDisplayPlugin::customizeContext() {
|
|||
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
|
||||
image.width(), image.height(),
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
cursorData.texture->setSource("cursor texture");
|
||||
auto usage = gpu::Texture::Usage::Builder().withColor().withAlpha();
|
||||
cursorData.texture->setUsage(usage.build());
|
||||
cursorData.texture->assignStoredMip(0, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA), image.byteCount(), image.constBits());
|
||||
|
@ -413,8 +414,6 @@ void OpenGLDisplayPlugin::customizeContext() {
|
|||
_cursorPipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
}
|
||||
auto renderSize = getRecommendedRenderSize();
|
||||
_compositeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_RGBA_32, renderSize.x, renderSize.y));
|
||||
}
|
||||
|
||||
void OpenGLDisplayPlugin::uncustomizeContext() {
|
||||
|
@ -424,6 +423,7 @@ void OpenGLDisplayPlugin::uncustomizeContext() {
|
|||
_compositeFramebuffer.reset();
|
||||
withPresentThreadLock([&] {
|
||||
_currentFrame.reset();
|
||||
_lastFrame = nullptr;
|
||||
while (!_newFrameQueue.empty()) {
|
||||
_gpuContext->consumeFrameUpdates(_newFrameQueue.front());
|
||||
_newFrameQueue.pop();
|
||||
|
@ -559,7 +559,7 @@ void OpenGLDisplayPlugin::compositeScene() {
|
|||
void OpenGLDisplayPlugin::compositeLayers() {
|
||||
auto renderSize = getRecommendedRenderSize();
|
||||
if (!_compositeFramebuffer || _compositeFramebuffer->getSize() != renderSize) {
|
||||
_compositeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_RGBA_32, renderSize.x, renderSize.y));
|
||||
_compositeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("displayPlugin::composite", gpu::Element::COLOR_RGBA_32, renderSize.x, renderSize.y));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -611,10 +611,10 @@ void OpenGLDisplayPlugin::present() {
|
|||
{
|
||||
withPresentThreadLock([&] {
|
||||
_renderRate.increment();
|
||||
if (_currentFrame != _lastFrame) {
|
||||
if (_currentFrame.get() != _lastFrame) {
|
||||
_newFrameRate.increment();
|
||||
}
|
||||
_lastFrame = _currentFrame;
|
||||
_lastFrame = _currentFrame.get();
|
||||
});
|
||||
// Execute the frame rendering commands
|
||||
PROFILE_RANGE_EX("execute", 0xff00ff00, (uint64_t)presentCount())
|
||||
|
@ -755,3 +755,8 @@ void OpenGLDisplayPlugin::render(std::function<void(gpu::Batch& batch)> f) {
|
|||
f(batch);
|
||||
_gpuContext->executeBatch(batch);
|
||||
}
|
||||
|
||||
|
||||
OpenGLDisplayPlugin::~OpenGLDisplayPlugin() {
|
||||
qDebug() << "Destroying OpenGLDisplayPlugin";
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ protected:
|
|||
using Lock = std::unique_lock<Mutex>;
|
||||
using Condition = std::condition_variable;
|
||||
public:
|
||||
~OpenGLDisplayPlugin();
|
||||
// These must be final to ensure proper ordering of operations
|
||||
// between the main thread and the presentation thread
|
||||
bool activate() override final;
|
||||
|
@ -115,7 +116,7 @@ protected:
|
|||
RateCounter<> _renderRate;
|
||||
|
||||
gpu::FramePointer _currentFrame;
|
||||
gpu::FramePointer _lastFrame;
|
||||
gpu::Frame* _lastFrame { nullptr };
|
||||
gpu::FramebufferPointer _compositeFramebuffer;
|
||||
gpu::PipelinePointer _overlayPipeline;
|
||||
gpu::PipelinePointer _simplePipeline;
|
||||
|
|
|
@ -125,6 +125,7 @@ void HmdDisplayPlugin::uncustomizeContext() {
|
|||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
|
||||
});
|
||||
_overlayRenderer = OverlayRenderer();
|
||||
_previewTexture.reset();
|
||||
Parent::uncustomizeContext();
|
||||
}
|
||||
|
||||
|
@ -265,6 +266,7 @@ void HmdDisplayPlugin::internalPresent() {
|
|||
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
|
||||
image.width(), image.height(),
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
_previewTexture->setSource("HMD Preview Texture");
|
||||
_previewTexture->setUsage(gpu::Texture::Usage::Builder().withColor().build());
|
||||
_previewTexture->assignStoredMip(0, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA), image.byteCount(), image.constBits());
|
||||
_previewTexture->autoGenerateMips(-1);
|
||||
|
@ -634,3 +636,6 @@ void HmdDisplayPlugin::compositeExtra() {
|
|||
});
|
||||
}
|
||||
|
||||
HmdDisplayPlugin::~HmdDisplayPlugin() {
|
||||
qDebug() << "Destroying HmdDisplayPlugin";
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
class HmdDisplayPlugin : public OpenGLDisplayPlugin {
|
||||
using Parent = OpenGLDisplayPlugin;
|
||||
public:
|
||||
~HmdDisplayPlugin();
|
||||
bool isHmd() const override final { return true; }
|
||||
float getIPD() const override final { return _ipd; }
|
||||
glm::mat4 getEyeToHeadTransform(Eye eye) const override final { return _eyeOffsets[eye]; }
|
||||
|
|
|
@ -20,16 +20,17 @@ using namespace gpu;
|
|||
Framebuffer::~Framebuffer() {
|
||||
}
|
||||
|
||||
Framebuffer* Framebuffer::create() {
|
||||
Framebuffer* Framebuffer::create(const std::string& name) {
|
||||
auto framebuffer = new Framebuffer();
|
||||
framebuffer->setName(name);
|
||||
framebuffer->_renderBuffers.resize(MAX_NUM_RENDER_BUFFERS);
|
||||
framebuffer->_colorStamps.resize(MAX_NUM_RENDER_BUFFERS, 0);
|
||||
return framebuffer;
|
||||
}
|
||||
|
||||
|
||||
Framebuffer* Framebuffer::create( const Format& colorBufferFormat, uint16 width, uint16 height) {
|
||||
auto framebuffer = Framebuffer::create();
|
||||
Framebuffer* Framebuffer::create(const std::string& name, const Format& colorBufferFormat, uint16 width, uint16 height) {
|
||||
auto framebuffer = Framebuffer::create(name);
|
||||
|
||||
auto colorTexture = TexturePointer(Texture::create2D(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
colorTexture->setSource("Framebuffer::colorTexture");
|
||||
|
@ -39,13 +40,11 @@ Framebuffer* Framebuffer::create( const Format& colorBufferFormat, uint16 width,
|
|||
return framebuffer;
|
||||
}
|
||||
|
||||
Framebuffer* Framebuffer::create( const Format& colorBufferFormat, const Format& depthStencilBufferFormat, uint16 width, uint16 height) {
|
||||
auto framebuffer = Framebuffer::create();
|
||||
Framebuffer* Framebuffer::create(const std::string& name, const Format& colorBufferFormat, const Format& depthStencilBufferFormat, uint16 width, uint16 height) {
|
||||
auto framebuffer = Framebuffer::create(name);
|
||||
|
||||
auto colorTexture = TexturePointer(Texture::create2D(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
colorTexture->setSource("Framebuffer::colorTexture");
|
||||
auto depthTexture = TexturePointer(Texture::create2D(depthStencilBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
depthTexture->setSource("Framebuffer::depthTexture");
|
||||
framebuffer->setRenderBuffer(0, colorTexture);
|
||||
framebuffer->setDepthStencilBuffer(depthTexture, depthStencilBufferFormat);
|
||||
|
||||
|
@ -53,11 +52,10 @@ Framebuffer* Framebuffer::create( const Format& colorBufferFormat, const Format&
|
|||
}
|
||||
|
||||
Framebuffer* Framebuffer::createShadowmap(uint16 width) {
|
||||
auto framebuffer = Framebuffer::create();
|
||||
auto framebuffer = Framebuffer::create("Shadowmap");
|
||||
|
||||
auto depthFormat = Element(gpu::SCALAR, gpu::FLOAT, gpu::DEPTH); // Depth32 texel format
|
||||
auto depthTexture = TexturePointer(Texture::create2D(depthFormat, width, width));
|
||||
depthTexture->setSource("Framebuffer::shadowMap");
|
||||
Sampler::Desc samplerDesc;
|
||||
samplerDesc._borderColor = glm::vec4(1.0f);
|
||||
samplerDesc._wrapModeU = Sampler::WRAP_BORDER;
|
||||
|
@ -155,6 +153,10 @@ int Framebuffer::setRenderBuffer(uint32 slot, const TexturePointer& texture, uin
|
|||
if (!validateTargetCompatibility(*texture, subresource)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (texture->source().empty()) {
|
||||
texture->setSource(_name + "::color::" + std::to_string(slot));
|
||||
}
|
||||
}
|
||||
|
||||
++_colorStamps[slot];
|
||||
|
@ -216,7 +218,6 @@ uint32 Framebuffer::getRenderBufferSubresource(uint32 slot) const {
|
|||
}
|
||||
|
||||
bool Framebuffer::setDepthStencilBuffer(const TexturePointer& texture, const Format& format, uint32 subresource) {
|
||||
++_depthStamp;
|
||||
if (isSwapchain()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -226,8 +227,13 @@ bool Framebuffer::setDepthStencilBuffer(const TexturePointer& texture, const For
|
|||
if (!validateTargetCompatibility(*texture)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (texture->source().empty()) {
|
||||
texture->setSource(_name + "::depthStencil");
|
||||
}
|
||||
}
|
||||
|
||||
++_depthStamp;
|
||||
updateSize(texture);
|
||||
|
||||
// assign the new one
|
||||
|
|
|
@ -88,9 +88,9 @@ public:
|
|||
~Framebuffer();
|
||||
|
||||
static Framebuffer* create(const SwapchainPointer& swapchain);
|
||||
static Framebuffer* create();
|
||||
static Framebuffer* create(const Format& colorBufferFormat, uint16 width, uint16 height);
|
||||
static Framebuffer* create(const Format& colorBufferFormat, const Format& depthStencilBufferFormat, uint16 width, uint16 height);
|
||||
static Framebuffer* create(const std::string& name);
|
||||
static Framebuffer* create(const std::string& name, const Format& colorBufferFormat, uint16 width, uint16 height);
|
||||
static Framebuffer* create(const std::string& name, const Format& colorBufferFormat, const Format& depthStencilBufferFormat, uint16 width, uint16 height);
|
||||
static Framebuffer* createShadowmap(uint16 width);
|
||||
|
||||
bool isSwapchain() const;
|
||||
|
@ -127,6 +127,8 @@ public:
|
|||
uint16 getWidth() const;
|
||||
uint16 getHeight() const;
|
||||
uint16 getNumSamples() const;
|
||||
const std::string& getName() const { return _name; }
|
||||
void setName(const std::string& name) { _name = name; }
|
||||
|
||||
float getAspectRatio() const { return getWidth() / (float) getHeight() ; }
|
||||
|
||||
|
@ -145,6 +147,7 @@ public:
|
|||
static Transform evalSubregionTexcoordTransform(const glm::ivec2& sourceSurface, const glm::ivec4& destViewport);
|
||||
|
||||
protected:
|
||||
std::string _name;
|
||||
SwapchainPointer _swapchain;
|
||||
|
||||
Stamp _depthStamp { 0 };
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "AccountManager.h"
|
||||
|
||||
const QString HIFI_URL_SCHEME = "hifi";
|
||||
const QString DEFAULT_HIFI_ADDRESS = "hifi://dev-welcome";
|
||||
const QString DEFAULT_HIFI_ADDRESS = "hifi://welcome";
|
||||
const QString SANDBOX_HIFI_ADDRESS = "hifi://localhost";
|
||||
const QString SANDBOX_STATUS_URL = "http://localhost:60332/status";
|
||||
const QString INDEX_PATH = "/";
|
||||
|
|
|
@ -389,7 +389,7 @@ void NodeList::sendDSPathQuery(const QString& newPath) {
|
|||
// only send a path query if we know who our DS is or is going to be
|
||||
if (_domainHandler.isSocketKnown()) {
|
||||
// construct the path query packet
|
||||
auto pathQueryPacket = NLPacket::create(PacketType::DomainServerPathQuery);
|
||||
auto pathQueryPacket = NLPacket::create(PacketType::DomainServerPathQuery, -1, true);
|
||||
|
||||
// get the UTF8 representation of path query
|
||||
QByteArray pathQueryUTF8 = newPath.toUtf8();
|
||||
|
|
|
@ -276,4 +276,30 @@ void PluginManager::saveSettings() {
|
|||
saveInputPluginSettings(getInputPlugins());
|
||||
}
|
||||
|
||||
void PluginManager::shutdown() {
|
||||
for (auto inputPlugin : getInputPlugins()) {
|
||||
if (inputPlugin->isActive()) {
|
||||
inputPlugin->deactivate();
|
||||
}
|
||||
}
|
||||
|
||||
for (auto displayPlugins : getDisplayPlugins()) {
|
||||
if (displayPlugins->isActive()) {
|
||||
displayPlugins->deactivate();
|
||||
}
|
||||
}
|
||||
|
||||
auto loadedPlugins = getLoadedPlugins();
|
||||
// Now grab the dynamic plugins
|
||||
for (auto loader : getLoadedPlugins()) {
|
||||
InputProvider* inputProvider = qobject_cast<InputProvider*>(loader->instance());
|
||||
if (inputProvider) {
|
||||
inputProvider->destroyInputPlugins();
|
||||
}
|
||||
DisplayProvider* displayProvider = qobject_cast<DisplayProvider*>(loader->instance());
|
||||
if (displayProvider) {
|
||||
displayProvider->destroyDisplayPlugins();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -28,6 +28,8 @@ public:
|
|||
void disableInputs(const QStringList& inputs);
|
||||
void saveSettings();
|
||||
void setContainer(PluginContainer* container) { _container = container; }
|
||||
|
||||
void shutdown();
|
||||
private:
|
||||
PluginContainer* _container { nullptr };
|
||||
};
|
||||
|
|
|
@ -19,6 +19,7 @@ public:
|
|||
virtual ~DisplayProvider() {}
|
||||
|
||||
virtual DisplayPluginList getDisplayPlugins() = 0;
|
||||
virtual void destroyDisplayPlugins() = 0;
|
||||
};
|
||||
|
||||
#define DisplayProvider_iid "com.highfidelity.plugins.display"
|
||||
|
@ -29,6 +30,7 @@ class InputProvider {
|
|||
public:
|
||||
virtual ~InputProvider() {}
|
||||
virtual InputPluginList getInputPlugins() = 0;
|
||||
virtual void destroyInputPlugins() = 0;
|
||||
};
|
||||
|
||||
#define InputProvider_iid "com.highfidelity.plugins.input"
|
||||
|
|
|
@ -75,11 +75,11 @@ void AmbientOcclusionFramebuffer::allocate() {
|
|||
auto height = _frameSize.y;
|
||||
|
||||
_occlusionTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_occlusionFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_occlusionFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("occlusion"));
|
||||
_occlusionFramebuffer->setRenderBuffer(0, _occlusionTexture);
|
||||
|
||||
_occlusionBlurredTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_occlusionBlurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_occlusionBlurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("occlusionBlurred"));
|
||||
_occlusionBlurredFramebuffer->setRenderBuffer(0, _occlusionBlurredTexture);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,11 +41,10 @@ const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
|
|||
|
||||
if (!_antialiasingBuffer) {
|
||||
// Link the antialiasing FBO to texture
|
||||
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
|
||||
auto format = gpu::Element::COLOR_SRGBA_32; // DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
_antialiasingTexture = gpu::TexturePointer(gpu::Texture::create2D(format, width, height, defaultSampler));
|
||||
_antialiasingTexture->setSource("Antialiasing::_antialiasingTexture");
|
||||
_antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture);
|
||||
}
|
||||
|
||||
|
|
|
@ -43,8 +43,8 @@ void DeferredFramebuffer::updatePrimaryDepth(const gpu::TexturePointer& depthBuf
|
|||
|
||||
void DeferredFramebuffer::allocate() {
|
||||
|
||||
_deferredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_deferredFramebufferDepthColor = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_deferredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("deferred"));
|
||||
_deferredFramebufferDepthColor = gpu::FramebufferPointer(gpu::Framebuffer::create("deferredDepthColor"));
|
||||
|
||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
auto linearFormat = gpu::Element::COLOR_RGBA_32;
|
||||
|
@ -54,11 +54,8 @@ void DeferredFramebuffer::allocate() {
|
|||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
|
||||
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
_deferredColorTexture->setSource("DeferredFramebuffer::_deferredColorTexture");
|
||||
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(linearFormat, width, height, defaultSampler));
|
||||
_deferredNormalTexture->setSource("DeferredFramebuffer::_deferredNormalTexture");
|
||||
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
_deferredSpecularTexture->setSource("DeferredFramebuffer::_deferredSpecularTexture");
|
||||
|
||||
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
|
||||
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
|
||||
|
@ -69,7 +66,6 @@ void DeferredFramebuffer::allocate() {
|
|||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
if (!_primaryDepthTexture) {
|
||||
_primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, width, height, defaultSampler));
|
||||
_primaryDepthTexture->setSource("DeferredFramebuffer::_primaryDepthTexture");
|
||||
}
|
||||
|
||||
_deferredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
@ -80,8 +76,7 @@ void DeferredFramebuffer::allocate() {
|
|||
auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR);
|
||||
|
||||
_lightingTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, defaultSampler));
|
||||
_lightingTexture->setSource("DeferredFramebuffer::_lightingTexture");
|
||||
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("lighting"));
|
||||
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
|
||||
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
||||
|
|
|
@ -346,12 +346,11 @@ void PreparePrimaryFramebuffer::run(const SceneContextPointer& sceneContext, con
|
|||
}
|
||||
|
||||
if (!_primaryFramebuffer) {
|
||||
_primaryFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_primaryFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("deferredPrimary"));
|
||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
auto primaryColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, frameSize.x, frameSize.y, defaultSampler));
|
||||
primaryColorTexture->setSource("PreparePrimaryFramebuffer::primaryColorTexture");
|
||||
|
||||
|
||||
_primaryFramebuffer->setRenderBuffer(0, primaryColorTexture);
|
||||
|
@ -359,7 +358,6 @@ void PreparePrimaryFramebuffer::run(const SceneContextPointer& sceneContext, con
|
|||
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
auto primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, frameSize.x, frameSize.y, defaultSampler));
|
||||
primaryDepthTexture->setSource("PreparePrimaryFramebuffer::primaryDepthTexture");
|
||||
|
||||
_primaryFramebuffer->setDepthStencilBuffer(primaryDepthTexture, depthFormat);
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ void FramebufferCache::createPrimaryFramebuffer() {
|
|||
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
|
||||
_selfieFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_selfieFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("selfie"));
|
||||
auto tex = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width * 0.5, height * 0.5, defaultSampler));
|
||||
_selfieFramebuffer->setRenderBuffer(0, tex);
|
||||
|
||||
|
@ -47,7 +47,7 @@ void FramebufferCache::createPrimaryFramebuffer() {
|
|||
gpu::FramebufferPointer FramebufferCache::getFramebuffer() {
|
||||
std::unique_lock<std::mutex> lock(_mutex);
|
||||
if (_cachedFramebuffers.empty()) {
|
||||
_cachedFramebuffers.push_back(gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_SRGBA_32, _frameBufferSize.width(), _frameBufferSize.height())));
|
||||
_cachedFramebuffers.push_back(gpu::FramebufferPointer(gpu::Framebuffer::create("cached", gpu::Element::COLOR_SRGBA_32, _frameBufferSize.width(), _frameBufferSize.height())));
|
||||
}
|
||||
gpu::FramebufferPointer result = _cachedFramebuffers.front();
|
||||
_cachedFramebuffers.pop_front();
|
||||
|
|
|
@ -319,7 +319,7 @@ void diffuseProfileGPU(gpu::TexturePointer& profileMap, RenderArgs* args) {
|
|||
makePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("diffuseProfile"));
|
||||
makeFramebuffer->setRenderBuffer(0, profileMap);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
|
@ -356,7 +356,7 @@ void diffuseScatterGPU(const gpu::TexturePointer& profileMap, gpu::TexturePointe
|
|||
makePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("diffuseScatter"));
|
||||
makeFramebuffer->setRenderBuffer(0, lut);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
|
@ -393,7 +393,7 @@ void computeSpecularBeckmannGPU(gpu::TexturePointer& beckmannMap, RenderArgs* ar
|
|||
makePipeline = gpu::Pipeline::create(program, state);
|
||||
}
|
||||
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
auto makeFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("computeSpecularBeckmann"));
|
||||
makeFramebuffer->setRenderBuffer(0, beckmannMap);
|
||||
|
||||
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
|
||||
|
|
|
@ -74,22 +74,19 @@ void LinearDepthFramebuffer::allocate() {
|
|||
// For Linear Depth:
|
||||
_linearDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), width, height,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_linearDepthTexture->setSource("LinearDepthFramebuffer::_linearDepthTexture");
|
||||
_linearDepthFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_linearDepthFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("linearDepth"));
|
||||
_linearDepthFramebuffer->setRenderBuffer(0, _linearDepthTexture);
|
||||
_linearDepthFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat());
|
||||
|
||||
// For Downsampling:
|
||||
_halfLinearDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_halfLinearDepthTexture->setSource("LinearDepthFramebuffer::_halfLinearDepthTexture");
|
||||
_halfLinearDepthTexture->autoGenerateMips(5);
|
||||
|
||||
_halfNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_halfNormalTexture->setSource("LinearDepthFramebuffer::_halfNormalTexture");
|
||||
|
||||
_downsampleFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_downsampleFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("halfLinearDepth"));
|
||||
_downsampleFramebuffer->setRenderBuffer(0, _halfLinearDepthTexture);
|
||||
_downsampleFramebuffer->setRenderBuffer(1, _halfNormalTexture);
|
||||
}
|
||||
|
@ -304,18 +301,15 @@ void SurfaceGeometryFramebuffer::allocate() {
|
|||
auto height = _frameSize.y;
|
||||
|
||||
_curvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_curvatureTexture->setSource("SurfaceGeometryFramebuffer::_curvatureTexture");
|
||||
_curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::curvature"));
|
||||
_curvatureFramebuffer->setRenderBuffer(0, _curvatureTexture);
|
||||
|
||||
_lowCurvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_lowCurvatureTexture->setSource("SurfaceGeometryFramebuffer::_lowCurvatureTexture");
|
||||
_lowCurvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_lowCurvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::lowCurvature"));
|
||||
_lowCurvatureFramebuffer->setRenderBuffer(0, _lowCurvatureTexture);
|
||||
|
||||
_blurringTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_blurringTexture->setSource("SurfaceGeometryFramebuffer::_blurringTexture");
|
||||
_blurringFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_blurringFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::blurring"));
|
||||
_blurringFramebuffer->setRenderBuffer(0, _blurringTexture);
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ bool BlurInOutResource::updateResources(const gpu::FramebufferPointer& sourceFra
|
|||
}
|
||||
|
||||
if (!_blurredFramebuffer) {
|
||||
_blurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_blurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("blur"));
|
||||
|
||||
// attach depthStencil if present in source
|
||||
//if (sourceFramebuffer->hasDepthStencil()) {
|
||||
|
@ -124,7 +124,7 @@ bool BlurInOutResource::updateResources(const gpu::FramebufferPointer& sourceFra
|
|||
// The job output the blur result in a new Framebuffer spawning here.
|
||||
// Let s make sure it s ready for this
|
||||
if (!_outputFramebuffer) {
|
||||
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
|
||||
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("blurOutput"));
|
||||
|
||||
// attach depthStencil if present in source
|
||||
/* if (sourceFramebuffer->hasDepthStencil()) {
|
||||
|
|
|
@ -36,6 +36,19 @@ void ScriptCache::clearCache() {
|
|||
_scriptCache.clear();
|
||||
}
|
||||
|
||||
void ScriptCache::clearATPScriptsFromCache() {
|
||||
Lock lock(_containerLock);
|
||||
qCDebug(scriptengine) << "Clearing ATP scripts from ScriptCache";
|
||||
for (auto it = _scriptCache.begin(); it != _scriptCache.end();) {
|
||||
if (it.key().scheme() == "atp") {
|
||||
qCDebug(scriptengine) << "Removing: " << it.key();
|
||||
it = _scriptCache.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
QString ScriptCache::getScript(const QUrl& unnormalizedURL, ScriptUser* scriptUser, bool& isPending, bool reload) {
|
||||
QUrl url = ResourceManager::normalizeURL(unnormalizedURL);
|
||||
QString scriptContents;
|
||||
|
|
|
@ -39,6 +39,7 @@ class ScriptCache : public QObject, public Dependency {
|
|||
|
||||
public:
|
||||
void clearCache();
|
||||
Q_INVOKABLE void clearATPScriptsFromCache();
|
||||
void getScriptContents(const QString& scriptOrURL, contentAvailableCallback contentAvailable, bool forceDownload = false);
|
||||
|
||||
|
||||
|
|
|
@ -38,6 +38,10 @@ public:
|
|||
return _inputPlugins;
|
||||
}
|
||||
|
||||
virtual void destroyInputPlugins() override {
|
||||
_inputPlugins.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
InputPluginList _inputPlugins;
|
||||
};
|
||||
|
|
|
@ -37,6 +37,10 @@ public:
|
|||
return _inputPlugins;
|
||||
}
|
||||
|
||||
virtual void destroyInputPlugins() override {
|
||||
_inputPlugins.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
InputPluginList _inputPlugins;
|
||||
};
|
||||
|
|
|
@ -38,6 +38,9 @@ public:
|
|||
return _inputPlugins;
|
||||
}
|
||||
|
||||
virtual void destroyInputPlugins() override {
|
||||
_inputPlugins.clear();
|
||||
}
|
||||
private:
|
||||
InputPluginList _inputPlugins;
|
||||
};
|
||||
|
|
|
@ -126,3 +126,7 @@ void OculusBaseDisplayPlugin::updatePresentPose() {
|
|||
//_currentPresentFrameInfo.presentPose = toGlm(trackingState.HeadPose.ThePose);
|
||||
_currentPresentFrameInfo.presentPose = _currentPresentFrameInfo.renderPose;
|
||||
}
|
||||
|
||||
OculusBaseDisplayPlugin::~OculusBaseDisplayPlugin() {
|
||||
qDebug() << "Destroying OculusBaseDisplayPlugin";
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
class OculusBaseDisplayPlugin : public HmdDisplayPlugin {
|
||||
using Parent = HmdDisplayPlugin;
|
||||
public:
|
||||
~OculusBaseDisplayPlugin();
|
||||
bool isSupported() const override;
|
||||
|
||||
// Stereo specific methods
|
||||
|
|
|
@ -46,7 +46,7 @@ void OculusDisplayPlugin::cycleDebugOutput() {
|
|||
|
||||
void OculusDisplayPlugin::customizeContext() {
|
||||
Parent::customizeContext();
|
||||
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create(gpu::Element::COLOR_SRGBA_32, _renderTargetSize.x, _renderTargetSize.y));
|
||||
_outputFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("OculusOutput", gpu::Element::COLOR_SRGBA_32, _renderTargetSize.x, _renderTargetSize.y));
|
||||
ovrTextureSwapChainDesc desc = { };
|
||||
desc.Type = ovrTexture_2D;
|
||||
desc.ArraySize = 1;
|
||||
|
@ -97,6 +97,7 @@ void OculusDisplayPlugin::uncustomizeContext() {
|
|||
|
||||
ovr_DestroyTextureSwapChain(_session, _textureSwapChain);
|
||||
_textureSwapChain = nullptr;
|
||||
_outputFramebuffer.reset();
|
||||
Parent::uncustomizeContext();
|
||||
}
|
||||
|
||||
|
@ -163,3 +164,7 @@ QString OculusDisplayPlugin::getPreferredAudioOutDevice() const {
|
|||
}
|
||||
return AudioClient::friendlyNameForAudioDevice(buffer);
|
||||
}
|
||||
|
||||
OculusDisplayPlugin::~OculusDisplayPlugin() {
|
||||
qDebug() << "Destroying OculusDisplayPlugin";
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
class OculusDisplayPlugin : public OculusBaseDisplayPlugin {
|
||||
using Parent = OculusBaseDisplayPlugin;
|
||||
public:
|
||||
~OculusDisplayPlugin();
|
||||
const QString& getName() const override { return NAME; }
|
||||
|
||||
void init() override;
|
||||
|
|
|
@ -62,6 +62,14 @@ public:
|
|||
return _inputPlugins;
|
||||
}
|
||||
|
||||
virtual void destroyInputPlugins() override {
|
||||
_inputPlugins.clear();
|
||||
}
|
||||
|
||||
virtual void destroyDisplayPlugins() override {
|
||||
_displayPlugins.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
DisplayPluginList _displayPlugins;
|
||||
InputPluginList _inputPlugins;
|
||||
|
|
|
@ -38,6 +38,10 @@ public:
|
|||
return _displayPlugins;
|
||||
}
|
||||
|
||||
virtual void destroyDisplayPlugins() override {
|
||||
_displayPlugins.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
DisplayPluginList _displayPlugins;
|
||||
};
|
||||
|
|
|
@ -445,8 +445,9 @@ void OpenVrDisplayPlugin::internalDeactivate() {
|
|||
_openVrDisplayActive = false;
|
||||
_container->setIsOptionChecked(StandingHMDSensorMode, false);
|
||||
if (_system) {
|
||||
// Invalidate poses. It's fine if someone else sets these shared values, but we're about to stop updating them, and
|
||||
// TODO: Invalidate poses. It's fine if someone else sets these shared values, but we're about to stop updating them, and
|
||||
// we don't want ViveControllerManager to consider old values to be valid.
|
||||
_container->makeRenderingContextCurrent();
|
||||
releaseOpenVrSystem();
|
||||
_system = nullptr;
|
||||
}
|
||||
|
|
|
@ -51,6 +51,13 @@ public:
|
|||
return _inputPlugins;
|
||||
}
|
||||
|
||||
virtual void destroyInputPlugins() override {
|
||||
_inputPlugins.clear();
|
||||
}
|
||||
|
||||
virtual void destroyDisplayPlugins() override {
|
||||
_displayPlugins.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
DisplayPluginList _displayPlugins;
|
||||
|
|
|
@ -132,6 +132,7 @@ void ViveControllerManager::deactivate() {
|
|||
_container->removeMenu(MENU_PATH);
|
||||
|
||||
if (_system) {
|
||||
_container->makeRenderingContextCurrent();
|
||||
releaseOpenVrSystem();
|
||||
_system = nullptr;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ print('crowd-agent version 2');
|
|||
- File urls for AC scripts silently fail. Use a local server (e.g., python SimpleHTTPServer) for development.
|
||||
- URLs are cached regardless of server headers. Must use cache-defeating query parameters.
|
||||
- JSON.stringify(Avatar) silently fails (even when Agent.isAvatar)
|
||||
- If you run from a dev build directory, you must link assignment-client/<target>/resources to ../../interface/<target>/resources
|
||||
*/
|
||||
|
||||
function messageSend(message) {
|
||||
|
|
|
@ -23,6 +23,14 @@ var DENSITY = 0.3; // square meters per person. Some say 10 sq ft is arm's lengt
|
|||
var SOUND_DATA = {url: "http://howard-stearns.github.io/models/sounds/piano1.wav"};
|
||||
var AVATARS_CHATTERING_AT_ONCE = 4; // How many of the agents should we request to play SOUND at once.
|
||||
var NEXT_SOUND_SPREAD = 500; // millisecond range of how long to wait after one sound finishes, before playing the next
|
||||
var ANIMATION_DATA = {
|
||||
"url": "http://howard-stearns.github.io/models/resources/avatar/animations/idle.fbx",
|
||||
// "url": "http://howard-stearns.github.io/models/resources/avatar/animations/walk_fwd.fbx", // alternative example
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 300.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
};
|
||||
|
||||
var spread = Math.sqrt(MINIMUM_AVATARS * DENSITY); // meters
|
||||
var turnSpread = 90; // How many degrees should turn from front range over.
|
||||
|
@ -71,18 +79,8 @@ function messageHandler(channel, messageString, senderID) {
|
|||
rcpt: senderID,
|
||||
position: Vec3.sum(MyAvatar.position, {x: coord(), y: 0, z: coord()}),
|
||||
orientation: Quat.fromPitchYawRollDegrees(0, Quat.safeEulerAngles(MyAvatar.orientation).y + (turnSpread * (Math.random() - 0.5)), 0),
|
||||
soundData: chatter && SOUND_DATA/*
|
||||
// No need to specify skeletonModelURL
|
||||
//skeletonModelURL: "file:///c:/Program Files/High Fidelity Release/resources/meshes/being_of_light/being_of_light.fbx",
|
||||
//skeletonModelURL: "file:///c:/Program Files/High Fidelity Release/resources/meshes/defaultAvatar_full.fst"/,
|
||||
animationData: { // T-pose until we get animations working again.
|
||||
"url": "file:///C:/Program Files/High Fidelity Release/resources/avatar/animations/idle.fbx",
|
||||
//"url": "file:///c:/Program Files/High Fidelity Release/resources/avatar/animations/walk_fwd.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 300.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
}*/
|
||||
soundData: chatter && SOUND_DATA,
|
||||
animationData: ANIMATION_DATA
|
||||
});
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -471,7 +471,12 @@ function update() {
|
|||
if (!Menu.isOptionChecked("First Person")) {
|
||||
return off(); // What to do? menus can be behind hand!
|
||||
}
|
||||
if (!Window.hasFocus() || !Reticle.allowMouseCapture) {
|
||||
if ((!Window.hasFocus() && !HMD.active) || !Reticle.allowMouseCapture) {
|
||||
// In desktop it's pretty clear when another app is on top. In that case we bail, because
|
||||
// hand controllers might be sputtering "valid" data and that will keep someone from deliberately
|
||||
// using the mouse on another app. (Fogbugz case 546.)
|
||||
// However, in HMD, you might not realize you're not on top, and you wouldn't be able to operate
|
||||
// other apps anyway. So in that case, we DO keep going even though we're not on top. (Fogbugz 1831.)
|
||||
return off(); // Don't mess with other apps or paused mouse activity
|
||||
}
|
||||
leftTrigger.update();
|
||||
|
|
|
@ -1155,7 +1155,7 @@ function getPositionToImportEntity() {
|
|||
return position;
|
||||
}
|
||||
function importSVO(importURL) {
|
||||
if (!Entities.canAdjustLocks()) {
|
||||
if (!Entities.canRez() && !Entities.canRezTmp()) {
|
||||
Window.notifyEditError(INSUFFICIENT_PERMISSIONS_IMPORT_ERROR_MSG);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<link rel="stylesheet" type="text/css" href="content-update.css"></link>
|
||||
</head>
|
||||
<body onload="ready()">
|
||||
<div class="colmask">
|
||||
<div class="colmask" id="content">
|
||||
<h3>We backed up your old Sandbox content, just in case.</h3>
|
||||
<p><b>To restore it, follow these steps:</b>
|
||||
|
||||
|
@ -45,9 +45,9 @@
|
|||
</div>
|
||||
|
||||
<div class="footer">
|
||||
|
||||
<p>For more information about managing your high Fidelity Sandbox Server, check out our docs: <a href="https://wiki.highfidelity.com/wiki/Sandbox" target="_blank">wiki.highfidelity.com/wiki/Sandbox</a>
|
||||
<p>For more information about managing your high Fidelity Sandbox Server, check out our docs: <a href="https://wiki.highfidelity.com/wiki/Sandbox" target="_blank">wiki.highfidelity.com/wiki/Sandbox</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -6,6 +6,11 @@ function ready() {
|
|||
|
||||
electron.ipcRenderer.on('update', function(event, message) {
|
||||
$('#directory').html(message);
|
||||
|
||||
electron.ipcRenderer.send('setSize', {
|
||||
width: $(window).width(),
|
||||
height: $('#content').height() + 50
|
||||
});
|
||||
});
|
||||
|
||||
electron.ipcRenderer.send('ready');
|
||||
|
|
|
@ -530,6 +530,9 @@ function openBackupInstructions(folder) {
|
|||
}
|
||||
window.show();
|
||||
|
||||
electron.ipcMain.on('setSize', function(event, obj) {
|
||||
window.setSize(obj.width, obj.height);
|
||||
});
|
||||
electron.ipcMain.on('ready', function() {
|
||||
console.log("got ready");
|
||||
window.webContents.send('update', folder);
|
||||
|
|
Loading…
Reference in a new issue