mirror of
https://github.com/lubosz/overte.git
synced 2025-08-07 18:21:16 +02:00
Cleanup artifacts
Seems playAvatarSound could be called from a thread other than the Agent's thread. So, artifacts happen when that pointer changes while a 'tick' is happening. Also cleaned up code a bit, got rid of some hard-coded stuff I had in just for dev purposes.
This commit is contained in:
parent
0794d95bdc
commit
ef844cbd00
2 changed files with 22 additions and 15 deletions
|
@ -52,10 +52,15 @@ static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10;
|
||||||
// this should send a signal every 10ms, with pretty good precision
|
// this should send a signal every 10ms, with pretty good precision
|
||||||
void AvatarAudioTimer::start() {
|
void AvatarAudioTimer::start() {
|
||||||
qDebug() << "AvatarAudioTimer::start called";
|
qDebug() << "AvatarAudioTimer::start called";
|
||||||
|
auto startTime = usecTimestampNow();
|
||||||
|
quint64 frameCounter = 0;
|
||||||
const int TARGET_INTERVAL_USEC = 10000; // 10ms
|
const int TARGET_INTERVAL_USEC = 10000; // 10ms
|
||||||
while (!_quit) {
|
while (!_quit) {
|
||||||
|
frameCounter++;
|
||||||
// simplest possible timer
|
// simplest possible timer
|
||||||
usleep(TARGET_INTERVAL_USEC);
|
quint64 targetTime = startTime + frameCounter * TARGET_INTERVAL_USEC;
|
||||||
|
quint64 interval = std::max((quint64)0, targetTime - usecTimestampNow());
|
||||||
|
usleep(interval);
|
||||||
emit avatarTick();
|
emit avatarTick();
|
||||||
}
|
}
|
||||||
qDebug() << "AvatarAudioTimer is finished";
|
qDebug() << "AvatarAudioTimer is finished";
|
||||||
|
@ -91,6 +96,16 @@ Agent::Agent(ReceivedMessage& message) :
|
||||||
packetReceiver.registerListener(PacketType::SelectedAudioFormat, this, "handleSelectedAudioFormat");
|
packetReceiver.registerListener(PacketType::SelectedAudioFormat, this, "handleSelectedAudioFormat");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Agent::playAvatarSound(SharedSoundPointer sound) {
|
||||||
|
// this must happen on Agent's main thread
|
||||||
|
if (QThread::currentThread() != thread()) {
|
||||||
|
QMetaObject::invokeMethod(this, "playAvatarSound", Q_ARG(SharedSoundPointer, sound));
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
setAvatarSound(sound);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Agent::handleOctreePacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode) {
|
void Agent::handleOctreePacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode) {
|
||||||
auto packetType = message->getType();
|
auto packetType = message->getType();
|
||||||
|
|
||||||
|
@ -233,7 +248,6 @@ void Agent::nodeActivated(SharedNodePointer activatedNode) {
|
||||||
if (activatedNode->getType() == NodeType::AudioMixer) {
|
if (activatedNode->getType() == NodeType::AudioMixer) {
|
||||||
negotiateAudioFormat();
|
negotiateAudioFormat();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Agent::negotiateAudioFormat() {
|
void Agent::negotiateAudioFormat() {
|
||||||
|
@ -254,7 +268,6 @@ void Agent::negotiateAudioFormat() {
|
||||||
// send off this mute packet
|
// send off this mute packet
|
||||||
nodeList->sendPacket(std::move(negotiateFormatPacket), *audioMixer);
|
nodeList->sendPacket(std::move(negotiateFormatPacket), *audioMixer);
|
||||||
}
|
}
|
||||||
qInfo() << "negotiateAudioFormat called";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Agent::handleSelectedAudioFormat(QSharedPointer<ReceivedMessage> message) {
|
void Agent::handleSelectedAudioFormat(QSharedPointer<ReceivedMessage> message) {
|
||||||
|
@ -386,7 +399,6 @@ void Agent::executeScript() {
|
||||||
|
|
||||||
DependencyManager::set<AssignmentParentFinder>(_entityViewer.getTree());
|
DependencyManager::set<AssignmentParentFinder>(_entityViewer.getTree());
|
||||||
|
|
||||||
qDebug() << "Connecting avatarAudioTimer and starting...";
|
|
||||||
AvatarAudioTimer* audioTimerWorker = new AvatarAudioTimer();
|
AvatarAudioTimer* audioTimerWorker = new AvatarAudioTimer();
|
||||||
audioTimerWorker->moveToThread(&_avatarAudioTimerThread);
|
audioTimerWorker->moveToThread(&_avatarAudioTimerThread);
|
||||||
connect(audioTimerWorker, &AvatarAudioTimer::avatarTick, this, &Agent::processAgentAvatarAndAudio);
|
connect(audioTimerWorker, &AvatarAudioTimer::avatarTick, this, &Agent::processAgentAvatarAndAudio);
|
||||||
|
@ -395,9 +407,6 @@ void Agent::executeScript() {
|
||||||
connect(&_avatarAudioTimerThread, &QThread::finished, audioTimerWorker, &QObject::deleteLater);
|
connect(&_avatarAudioTimerThread, &QThread::finished, audioTimerWorker, &QObject::deleteLater);
|
||||||
_avatarAudioTimerThread.start();
|
_avatarAudioTimerThread.start();
|
||||||
|
|
||||||
// wire up our additional agent related processing to the update signal
|
|
||||||
//QObject::connect(_scriptEngine.get(), &ScriptEngine::update, this, &Agent::processAgentAvatarAndAudio);
|
|
||||||
|
|
||||||
_scriptEngine->run();
|
_scriptEngine->run();
|
||||||
|
|
||||||
Frame::clearFrameHandler(AUDIO_FRAME_TYPE);
|
Frame::clearFrameHandler(AUDIO_FRAME_TYPE);
|
||||||
|
@ -424,7 +433,7 @@ void Agent::setIsAvatar(bool isAvatar) {
|
||||||
// start the timers
|
// start the timers
|
||||||
_avatarIdentityTimer->start(AVATAR_IDENTITY_PACKET_SEND_INTERVAL_MSECS);
|
_avatarIdentityTimer->start(AVATAR_IDENTITY_PACKET_SEND_INTERVAL_MSECS);
|
||||||
|
|
||||||
// tell the audiotimer worker to start working
|
// tell the avatarAudioTimer to start ticking
|
||||||
emit startAvatarAudioTimer();
|
emit startAvatarAudioTimer();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -464,8 +473,6 @@ void Agent::sendAvatarIdentityPacket() {
|
||||||
void Agent::processAgentAvatarAndAudio() {
|
void Agent::processAgentAvatarAndAudio() {
|
||||||
if (!_scriptEngine->isFinished() && _isAvatar) {
|
if (!_scriptEngine->isFinished() && _isAvatar) {
|
||||||
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
|
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
|
||||||
const int SCRIPT_AUDIO_BUFFER_SAMPLES = AudioConstants::SAMPLE_RATE / 100 + 0.5;
|
|
||||||
const int SCRIPT_AUDIO_BUFFER_BYTES = SCRIPT_AUDIO_BUFFER_SAMPLES * sizeof(int16_t);
|
|
||||||
|
|
||||||
QByteArray avatarByteArray = scriptedAvatar->toByteArray(true, randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
|
QByteArray avatarByteArray = scriptedAvatar->toByteArray(true, randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
|
||||||
scriptedAvatar->doneEncoding(true);
|
scriptedAvatar->doneEncoding(true);
|
||||||
|
@ -484,7 +491,7 @@ void Agent::processAgentAvatarAndAudio() {
|
||||||
// if we have an avatar audio stream then send it out to our audio-mixer
|
// if we have an avatar audio stream then send it out to our audio-mixer
|
||||||
bool silentFrame = true;
|
bool silentFrame = true;
|
||||||
|
|
||||||
int16_t numAvailableSamples = SCRIPT_AUDIO_BUFFER_SAMPLES;
|
int16_t numAvailableSamples = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||||
const int16_t* nextSoundOutput = NULL;
|
const int16_t* nextSoundOutput = NULL;
|
||||||
|
|
||||||
if (_avatarSound) {
|
if (_avatarSound) {
|
||||||
|
@ -492,8 +499,8 @@ void Agent::processAgentAvatarAndAudio() {
|
||||||
nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
|
nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
|
||||||
+ _numAvatarSoundSentBytes);
|
+ _numAvatarSoundSentBytes);
|
||||||
|
|
||||||
int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > SCRIPT_AUDIO_BUFFER_BYTES
|
int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL
|
||||||
? SCRIPT_AUDIO_BUFFER_BYTES
|
? AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL
|
||||||
: soundByteArray.size() - _numAvatarSoundSentBytes;
|
: soundByteArray.size() - _numAvatarSoundSentBytes;
|
||||||
numAvailableSamples = (int16_t)numAvailableBytes / sizeof(int16_t);
|
numAvailableSamples = (int16_t)numAvailableBytes / sizeof(int16_t);
|
||||||
|
|
||||||
|
@ -529,7 +536,7 @@ void Agent::processAgentAvatarAndAudio() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// write the number of silent samples so the audio-mixer can uphold timing
|
// write the number of silent samples so the audio-mixer can uphold timing
|
||||||
audioPacket->writePrimitive(SCRIPT_AUDIO_BUFFER_SAMPLES);
|
audioPacket->writePrimitive(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
// use the orientation and position of this avatar for the source of this audio
|
// use the orientation and position of this avatar for the source of this audio
|
||||||
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
audioPacket->writePrimitive(scriptedAvatar->getPosition());
|
||||||
|
|
|
@ -73,7 +73,7 @@ public:
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
void run() override;
|
void run() override;
|
||||||
void playAvatarSound(SharedSoundPointer avatarSound) { setAvatarSound(avatarSound); }
|
void playAvatarSound(SharedSoundPointer avatarSound);// { setAvatarSound(avatarSound); }
|
||||||
void processAgentAvatarAndAudio();
|
void processAgentAvatarAndAudio();
|
||||||
|
|
||||||
private slots:
|
private slots:
|
||||||
|
|
Loading…
Reference in a new issue