mirror of
https://github.com/overte-org/overte.git
synced 2025-08-04 01:23:38 +02:00
move agent specific code out of ScriptEngine
This commit is contained in:
parent
8395fb6eff
commit
54c56a92f1
4 changed files with 185 additions and 192 deletions
|
@ -43,7 +43,6 @@ Agent::Agent(NLPacket& packet) :
|
|||
{
|
||||
// be the parent of the script engine so it gets moved when we do
|
||||
_scriptEngine.setParent(this);
|
||||
_scriptEngine.setIsAgent(true);
|
||||
|
||||
DependencyManager::get<EntityScriptingInterface>()->setPacketSender(&_entityEditSender);
|
||||
|
||||
|
@ -166,7 +165,7 @@ void Agent::run() {
|
|||
scriptedAvatar.setSkeletonModelURL(QUrl());
|
||||
|
||||
// give this AvatarData object to the script engine
|
||||
_scriptEngine.setAvatarData(&scriptedAvatar, "Avatar");
|
||||
setAvatarData(&scriptedAvatar, "Avatar");
|
||||
|
||||
auto avatarHashMap = DependencyManager::set<AvatarHashMap>();
|
||||
_scriptEngine.registerGlobalObject("AvatarList", avatarHashMap.data());
|
||||
|
@ -200,9 +199,165 @@ void Agent::run() {
|
|||
_entityViewer.init();
|
||||
entityScriptingInterface->setEntityTree(_entityViewer.getTree());
|
||||
|
||||
// wire up our additional agent related processing to the update signal
|
||||
QObject::connect(&_scriptEngine, &ScriptEngine::update, this, &Agent::processAgentAvatarAndAudio);
|
||||
|
||||
_scriptEngine.setScriptContents(scriptContents);
|
||||
_scriptEngine.run();
|
||||
setFinished(true);
|
||||
|
||||
// kill the avatar identity timer
|
||||
delete _avatarIdentityTimer;
|
||||
|
||||
}
|
||||
|
||||
void Agent::setIsAvatar(bool isAvatar) {
|
||||
_isAvatar = isAvatar;
|
||||
|
||||
if (_isAvatar && !_avatarIdentityTimer) {
|
||||
// set up the avatar timers
|
||||
_avatarIdentityTimer = new QTimer(this);
|
||||
_avatarBillboardTimer = new QTimer(this);
|
||||
|
||||
// connect our slot
|
||||
connect(_avatarIdentityTimer, &QTimer::timeout, this, &Agent::sendAvatarIdentityPacket);
|
||||
connect(_avatarBillboardTimer, &QTimer::timeout, this, &Agent::sendAvatarBillboardPacket);
|
||||
|
||||
// start the timers
|
||||
_avatarIdentityTimer->start(AVATAR_IDENTITY_PACKET_SEND_INTERVAL_MSECS);
|
||||
_avatarBillboardTimer->start(AVATAR_BILLBOARD_PACKET_SEND_INTERVAL_MSECS);
|
||||
}
|
||||
|
||||
if (!_isAvatar) {
|
||||
delete _avatarIdentityTimer;
|
||||
_avatarIdentityTimer = NULL;
|
||||
delete _avatarBillboardTimer;
|
||||
_avatarBillboardTimer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void Agent::setAvatarData(AvatarData* avatarData, const QString& objectName) {
|
||||
_avatarData = avatarData;
|
||||
_scriptEngine.registerGlobalObject(objectName, avatarData);
|
||||
}
|
||||
|
||||
void Agent::sendAvatarIdentityPacket() {
|
||||
if (_isAvatar && _avatarData) {
|
||||
_avatarData->sendIdentityPacket();
|
||||
}
|
||||
}
|
||||
|
||||
void Agent::sendAvatarBillboardPacket() {
|
||||
if (_isAvatar && _avatarData) {
|
||||
_avatarData->sendBillboardPacket();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Agent::processAgentAvatarAndAudio(float deltaTime) {
|
||||
qDebug() << "processAgentAvatarAndAudio()";
|
||||
if (!_scriptEngine.isFinished() && _isAvatar && _avatarData) {
|
||||
|
||||
const int SCRIPT_AUDIO_BUFFER_SAMPLES = floor(((SCRIPT_DATA_CALLBACK_USECS * AudioConstants::SAMPLE_RATE)
|
||||
/ (1000 * 1000)) + 0.5);
|
||||
const int SCRIPT_AUDIO_BUFFER_BYTES = SCRIPT_AUDIO_BUFFER_SAMPLES * sizeof(int16_t);
|
||||
|
||||
QByteArray avatarByteArray = _avatarData->toByteArray(true, randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
|
||||
_avatarData->doneEncoding(true);
|
||||
auto avatarPacket = NLPacket::create(PacketType::AvatarData, avatarByteArray.size());
|
||||
|
||||
avatarPacket->write(avatarByteArray);
|
||||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
nodeList->broadcastToNodes(std::move(avatarPacket), NodeSet() << NodeType::AvatarMixer);
|
||||
|
||||
if (_isListeningToAudioStream || _avatarSound) {
|
||||
// if we have an avatar audio stream then send it out to our audio-mixer
|
||||
bool silentFrame = true;
|
||||
|
||||
int16_t numAvailableSamples = SCRIPT_AUDIO_BUFFER_SAMPLES;
|
||||
const int16_t* nextSoundOutput = NULL;
|
||||
|
||||
if (_avatarSound) {
|
||||
|
||||
const QByteArray& soundByteArray = _avatarSound->getByteArray();
|
||||
nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
|
||||
+ _numAvatarSoundSentBytes);
|
||||
|
||||
int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > SCRIPT_AUDIO_BUFFER_BYTES
|
||||
? SCRIPT_AUDIO_BUFFER_BYTES
|
||||
: soundByteArray.size() - _numAvatarSoundSentBytes;
|
||||
numAvailableSamples = numAvailableBytes / sizeof(int16_t);
|
||||
|
||||
|
||||
// check if the all of the _numAvatarAudioBufferSamples to be sent are silence
|
||||
for (int i = 0; i < numAvailableSamples; ++i) {
|
||||
if (nextSoundOutput[i] != 0) {
|
||||
silentFrame = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_numAvatarSoundSentBytes += numAvailableBytes;
|
||||
if (_numAvatarSoundSentBytes == soundByteArray.size()) {
|
||||
// we're done with this sound object - so set our pointer back to NULL
|
||||
// and our sent bytes back to zero
|
||||
_avatarSound = NULL;
|
||||
_numAvatarSoundSentBytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
auto audioPacket = NLPacket::create(silentFrame
|
||||
? PacketType::SilentAudioFrame
|
||||
: PacketType::MicrophoneAudioNoEcho);
|
||||
|
||||
// seek past the sequence number, will be packed when destination node is known
|
||||
audioPacket->seek(sizeof(quint16));
|
||||
|
||||
if (silentFrame) {
|
||||
if (!_isListeningToAudioStream) {
|
||||
// if we have a silent frame and we're not listening then just send nothing and break out of here
|
||||
return;
|
||||
}
|
||||
|
||||
// write the number of silent samples so the audio-mixer can uphold timing
|
||||
audioPacket->writePrimitive(SCRIPT_AUDIO_BUFFER_SAMPLES);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
audioPacket->writePrimitive(_avatarData->getPosition());
|
||||
glm::quat headOrientation = _avatarData->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
}else if (nextSoundOutput) {
|
||||
// assume scripted avatar audio is mono and set channel flag to zero
|
||||
audioPacket->writePrimitive((quint8)0);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
audioPacket->writePrimitive(_avatarData->getPosition());
|
||||
glm::quat headOrientation = _avatarData->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
// write the raw audio data
|
||||
audioPacket->write(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples * sizeof(int16_t));
|
||||
}
|
||||
|
||||
// write audio packet to AudioMixer nodes
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
nodeList->eachNode([this, &nodeList, &audioPacket](const SharedNodePointer& node){
|
||||
// only send to nodes of type AudioMixer
|
||||
if (node->getType() == NodeType::AudioMixer) {
|
||||
// pack sequence number
|
||||
quint16 sequence = _outgoingScriptAudioSequenceNumbers[node->getUUID()]++;
|
||||
audioPacket->seek(0);
|
||||
audioPacket->writePrimitive(sequence);
|
||||
|
||||
// send audio packet
|
||||
nodeList->sendUnreliablePacket(*audioPacket, *node);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Agent::aboutToFinish() {
|
||||
|
|
|
@ -37,28 +37,28 @@ class Agent : public ThreadedAssignment {
|
|||
public:
|
||||
Agent(NLPacket& packet);
|
||||
|
||||
void setIsAvatar(bool isAvatar) { _scriptEngine.setIsAvatar(isAvatar); }
|
||||
bool isAvatar() const { return _scriptEngine.isAvatar(); }
|
||||
|
||||
bool isPlayingAvatarSound() const { return _scriptEngine.isPlayingAvatarSound(); }
|
||||
|
||||
bool isListeningToAudioStream() const { return _scriptEngine.isListeningToAudioStream(); }
|
||||
void setIsListeningToAudioStream(bool isListeningToAudioStream)
|
||||
{ _scriptEngine.setIsListeningToAudioStream(isListeningToAudioStream); }
|
||||
|
||||
void setIsAvatar(bool isAvatar);
|
||||
bool isAvatar() const { return _isAvatar; }
|
||||
|
||||
bool isPlayingAvatarSound() const { return _avatarSound != NULL; }
|
||||
|
||||
bool isListeningToAudioStream() const { return _isListeningToAudioStream; }
|
||||
void setIsListeningToAudioStream(bool isListeningToAudioStream) { _isListeningToAudioStream = isListeningToAudioStream; }
|
||||
|
||||
float getLastReceivedAudioLoudness() const { return _lastReceivedAudioLoudness; }
|
||||
|
||||
virtual void aboutToFinish();
|
||||
|
||||
public slots:
|
||||
void run();
|
||||
void playAvatarSound(Sound* avatarSound) { _scriptEngine.setAvatarSound(avatarSound); }
|
||||
void playAvatarSound(Sound* avatarSound) { setAvatarSound(avatarSound); }
|
||||
|
||||
private slots:
|
||||
void handleAudioPacket(QSharedPointer<NLPacket> packet);
|
||||
void handleOctreePacket(QSharedPointer<NLPacket> packet, SharedNodePointer senderNode);
|
||||
void handleJurisdictionPacket(QSharedPointer<NLPacket> packet, SharedNodePointer senderNode);
|
||||
void sendPingRequests();
|
||||
void processAgentAvatarAndAudio(float deltaTime);
|
||||
|
||||
private:
|
||||
ScriptEngine _scriptEngine;
|
||||
|
@ -68,6 +68,23 @@ private:
|
|||
|
||||
MixedAudioStream _receivedAudioStream;
|
||||
float _lastReceivedAudioLoudness;
|
||||
|
||||
void setAvatarData(AvatarData* avatarData, const QString& objectName);
|
||||
void setAvatarSound(Sound* avatarSound) { _avatarSound = avatarSound; }
|
||||
|
||||
void sendAvatarIdentityPacket();
|
||||
void sendAvatarBillboardPacket();
|
||||
|
||||
// FIXME - Agent only data
|
||||
AvatarData* _avatarData = nullptr;
|
||||
bool _isListeningToAudioStream = false;
|
||||
Sound* _avatarSound = nullptr;
|
||||
int _numAvatarSoundSentBytes = 0;
|
||||
bool _isAvatar = false;
|
||||
QTimer* _avatarIdentityTimer = nullptr;
|
||||
QTimer* _avatarBillboardTimer = nullptr;
|
||||
QHash<QUuid, quint16> _outgoingScriptAudioSequenceNumbers;
|
||||
|
||||
};
|
||||
|
||||
#endif // hifi_Agent_h
|
||||
|
|
|
@ -89,16 +89,8 @@ ScriptEngine::ScriptEngine(const QString& scriptContents, const QString& fileNam
|
|||
_isFinished(false),
|
||||
_isRunning(false),
|
||||
_isInitialized(false),
|
||||
_isAvatar(false),
|
||||
_avatarIdentityTimer(NULL),
|
||||
_avatarBillboardTimer(NULL),
|
||||
_timerFunctionMap(),
|
||||
_isListeningToAudioStream(false),
|
||||
_avatarSound(NULL),
|
||||
_numAvatarSoundSentBytes(0),
|
||||
_controllerScriptingInterface(controllerScriptingInterface),
|
||||
_wantSignals(wantSignals),
|
||||
_avatarData(NULL),
|
||||
_fileNameString(fileNameString),
|
||||
_quatLibrary(),
|
||||
_vec3Library(),
|
||||
|
@ -229,40 +221,6 @@ QString ScriptEngine::getFilename() const {
|
|||
}
|
||||
|
||||
|
||||
void ScriptEngine::setIsAvatar(bool isAvatar) {
|
||||
_isAvatar = isAvatar;
|
||||
|
||||
if (_isAvatar && !_avatarIdentityTimer) {
|
||||
// set up the avatar timers
|
||||
_avatarIdentityTimer = new QTimer(this);
|
||||
_avatarBillboardTimer = new QTimer(this);
|
||||
|
||||
// connect our slot
|
||||
connect(_avatarIdentityTimer, &QTimer::timeout, this, &ScriptEngine::sendAvatarIdentityPacket);
|
||||
connect(_avatarBillboardTimer, &QTimer::timeout, this, &ScriptEngine::sendAvatarBillboardPacket);
|
||||
|
||||
// start the timers
|
||||
_avatarIdentityTimer->start(AVATAR_IDENTITY_PACKET_SEND_INTERVAL_MSECS);
|
||||
_avatarBillboardTimer->start(AVATAR_BILLBOARD_PACKET_SEND_INTERVAL_MSECS);
|
||||
}
|
||||
|
||||
if (!_isAvatar) {
|
||||
delete _avatarIdentityTimer;
|
||||
_avatarIdentityTimer = NULL;
|
||||
delete _avatarBillboardTimer;
|
||||
_avatarBillboardTimer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ScriptEngine::setAvatarData(AvatarData* avatarData, const QString& objectName) {
|
||||
_avatarData = avatarData;
|
||||
|
||||
// remove the old Avatar property, if it exists
|
||||
globalObject().setProperty(objectName, QScriptValue());
|
||||
|
||||
// give the script engine the new Avatar script property
|
||||
registerGlobalObject(objectName, _avatarData);
|
||||
}
|
||||
|
||||
bool ScriptEngine::setScriptContents(const QString& scriptContents, const QString& fileNameString) {
|
||||
if (_isRunning) {
|
||||
|
@ -571,18 +529,6 @@ QScriptValue ScriptEngine::evaluate(const QString& program, const QString& fileN
|
|||
return result;
|
||||
}
|
||||
|
||||
void ScriptEngine::sendAvatarIdentityPacket() {
|
||||
if (_isAvatar && _avatarData) {
|
||||
_avatarData->sendIdentityPacket();
|
||||
}
|
||||
}
|
||||
|
||||
void ScriptEngine::sendAvatarBillboardPacket() {
|
||||
if (_isAvatar && _avatarData) {
|
||||
_avatarData->sendBillboardPacket();
|
||||
}
|
||||
}
|
||||
|
||||
void ScriptEngine::run() {
|
||||
// TODO: can we add a short circuit for _stoppingAllScripts here? What does it mean to not start running if
|
||||
// we're in the process of stopping?
|
||||
|
@ -634,107 +580,6 @@ void ScriptEngine::run() {
|
|||
}
|
||||
}
|
||||
|
||||
if (!_isFinished && _isAvatar && _avatarData) {
|
||||
|
||||
const int SCRIPT_AUDIO_BUFFER_SAMPLES = floor(((SCRIPT_DATA_CALLBACK_USECS * AudioConstants::SAMPLE_RATE)
|
||||
/ (1000 * 1000)) + 0.5);
|
||||
const int SCRIPT_AUDIO_BUFFER_BYTES = SCRIPT_AUDIO_BUFFER_SAMPLES * sizeof(int16_t);
|
||||
|
||||
QByteArray avatarByteArray = _avatarData->toByteArray(true, randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
|
||||
_avatarData->doneEncoding(true);
|
||||
auto avatarPacket = NLPacket::create(PacketType::AvatarData, avatarByteArray.size());
|
||||
|
||||
avatarPacket->write(avatarByteArray);
|
||||
|
||||
nodeList->broadcastToNodes(std::move(avatarPacket), NodeSet() << NodeType::AvatarMixer);
|
||||
|
||||
if (_isListeningToAudioStream || _avatarSound) {
|
||||
// if we have an avatar audio stream then send it out to our audio-mixer
|
||||
bool silentFrame = true;
|
||||
|
||||
int16_t numAvailableSamples = SCRIPT_AUDIO_BUFFER_SAMPLES;
|
||||
const int16_t* nextSoundOutput = NULL;
|
||||
|
||||
if (_avatarSound) {
|
||||
|
||||
const QByteArray& soundByteArray = _avatarSound->getByteArray();
|
||||
nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
|
||||
+ _numAvatarSoundSentBytes);
|
||||
|
||||
int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > SCRIPT_AUDIO_BUFFER_BYTES
|
||||
? SCRIPT_AUDIO_BUFFER_BYTES
|
||||
: soundByteArray.size() - _numAvatarSoundSentBytes;
|
||||
numAvailableSamples = numAvailableBytes / sizeof(int16_t);
|
||||
|
||||
|
||||
// check if the all of the _numAvatarAudioBufferSamples to be sent are silence
|
||||
for (int i = 0; i < numAvailableSamples; ++i) {
|
||||
if (nextSoundOutput[i] != 0) {
|
||||
silentFrame = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_numAvatarSoundSentBytes += numAvailableBytes;
|
||||
if (_numAvatarSoundSentBytes == soundByteArray.size()) {
|
||||
// we're done with this sound object - so set our pointer back to NULL
|
||||
// and our sent bytes back to zero
|
||||
_avatarSound = NULL;
|
||||
_numAvatarSoundSentBytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
auto audioPacket = NLPacket::create(silentFrame
|
||||
? PacketType::SilentAudioFrame
|
||||
: PacketType::MicrophoneAudioNoEcho);
|
||||
|
||||
// seek past the sequence number, will be packed when destination node is known
|
||||
audioPacket->seek(sizeof(quint16));
|
||||
|
||||
if (silentFrame) {
|
||||
if (!_isListeningToAudioStream) {
|
||||
// if we have a silent frame and we're not listening then just send nothing and break out of here
|
||||
break;
|
||||
}
|
||||
|
||||
// write the number of silent samples so the audio-mixer can uphold timing
|
||||
audioPacket->writePrimitive(SCRIPT_AUDIO_BUFFER_SAMPLES);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
audioPacket->writePrimitive(_avatarData->getPosition());
|
||||
glm::quat headOrientation = _avatarData->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
} else if (nextSoundOutput) {
|
||||
// assume scripted avatar audio is mono and set channel flag to zero
|
||||
audioPacket->writePrimitive((quint8) 0);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
audioPacket->writePrimitive(_avatarData->getPosition());
|
||||
glm::quat headOrientation = _avatarData->getHeadOrientation();
|
||||
audioPacket->writePrimitive(headOrientation);
|
||||
|
||||
// write the raw audio data
|
||||
audioPacket->write(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples * sizeof(int16_t));
|
||||
}
|
||||
|
||||
// write audio packet to AudioMixer nodes
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
nodeList->eachNode([this, &nodeList, &audioPacket](const SharedNodePointer& node){
|
||||
// only send to nodes of type AudioMixer
|
||||
if (node->getType() == NodeType::AudioMixer) {
|
||||
// pack sequence number
|
||||
quint16 sequence = _outgoingScriptAudioSequenceNumbers[node->getUUID()]++;
|
||||
audioPacket->seek(0);
|
||||
audioPacket->writePrimitive(sequence);
|
||||
|
||||
// send audio packet
|
||||
nodeList->sendUnreliablePacket(*audioPacket, *node);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
qint64 now = usecTimestampNow();
|
||||
float deltaTime = (float) (now - lastUpdate) / (float) USECS_PER_SECOND;
|
||||
|
||||
|
@ -761,9 +606,6 @@ void ScriptEngine::run() {
|
|||
emit scriptEnding();
|
||||
}
|
||||
|
||||
// kill the avatar identity timer
|
||||
delete _avatarIdentityTimer;
|
||||
|
||||
if (entityScriptingInterface->getEntityPacketSender()->serversExist()) {
|
||||
// release the queue of edit entity messages.
|
||||
entityScriptingInterface->getEntityPacketSender()->releaseQueuedMessages();
|
||||
|
|
|
@ -142,17 +142,10 @@ protected:
|
|||
bool _isRunning;
|
||||
int _evaluatesPending = 0;
|
||||
bool _isInitialized;
|
||||
bool _isAvatar;
|
||||
QTimer* _avatarIdentityTimer;
|
||||
QTimer* _avatarBillboardTimer;
|
||||
QHash<QTimer*, QScriptValue> _timerFunctionMap;
|
||||
bool _isListeningToAudioStream;
|
||||
Sound* _avatarSound;
|
||||
int _numAvatarSoundSentBytes;
|
||||
bool _isAgent = false;
|
||||
QSet<QUrl> _includedURLs;
|
||||
bool _wantSignals = true;
|
||||
|
||||
|
||||
private:
|
||||
QString getFilename() const;
|
||||
void waitTillDoneRunning();
|
||||
|
@ -164,7 +157,6 @@ private:
|
|||
void stopTimer(QTimer* timer);
|
||||
|
||||
AbstractControllerScriptingInterface* _controllerScriptingInterface;
|
||||
AvatarData* _avatarData;
|
||||
QString _fileNameString;
|
||||
Quat _quatLibrary;
|
||||
Vec3 _vec3Library;
|
||||
|
@ -174,7 +166,6 @@ private:
|
|||
|
||||
ArrayBufferClass* _arrayBufferClass;
|
||||
|
||||
QHash<QUuid, quint16> _outgoingScriptAudioSequenceNumbers;
|
||||
QHash<EntityItemID, RegisteredEventHandlers> _registeredHandlers;
|
||||
void generalHandler(const EntityItemID& entityID, const QString& eventName, std::function<QScriptValueList()> argGenerator);
|
||||
|
||||
|
@ -204,18 +195,6 @@ private:
|
|||
void init();
|
||||
void run();
|
||||
|
||||
// FIXME - all of these needto be removed and the code that depends on it in Agent.cpp should be moved into Agent.cpp
|
||||
void setIsAgent(bool isAgent) { _isAgent = isAgent; }
|
||||
void setIsAvatar(bool isAvatar);
|
||||
bool isAvatar() const { return _isAvatar; }
|
||||
void setAvatarData(AvatarData* avatarData, const QString& objectName);
|
||||
bool isListeningToAudioStream() const { return _isListeningToAudioStream; }
|
||||
void setIsListeningToAudioStream(bool isListeningToAudioStream) { _isListeningToAudioStream = isListeningToAudioStream; }
|
||||
void setAvatarSound(Sound* avatarSound) { _avatarSound = avatarSound; }
|
||||
bool isPlayingAvatarSound() const { return _avatarSound != NULL; }
|
||||
|
||||
void sendAvatarIdentityPacket();
|
||||
void sendAvatarBillboardPacket();
|
||||
};
|
||||
|
||||
#endif // hifi_ScriptEngine_h
|
||||
|
|
Loading…
Reference in a new issue