mirror of
https://github.com/lubosz/overte.git
synced 2025-04-26 20:35:27 +02:00
Revert "Test if this fixes the choppy audio"
This reverts commit 2e323efe1b
.
This commit is contained in:
parent
2e323efe1b
commit
4b808fe38e
2 changed files with 3 additions and 100 deletions
libraries/audio-client/src
|
@ -57,11 +57,6 @@
|
|||
#include <QtAndroidExtras/QAndroidJniObject>
|
||||
#endif
|
||||
|
||||
Q_DECLARE_METATYPE(const void*);
|
||||
Q_DECLARE_METATYPE(quint16);
|
||||
Q_DECLARE_METATYPE(Transform);
|
||||
|
||||
|
||||
const int AudioClient::MIN_BUFFER_FRAMES = 1;
|
||||
|
||||
const int AudioClient::MAX_BUFFER_FRAMES = 20;
|
||||
|
@ -213,18 +208,10 @@ AudioClient::AudioClient() :
|
|||
_audioOutputIODevice(_localInjectorsStream, _receivedAudioStream, this),
|
||||
_stats(&_receivedAudioStream),
|
||||
_positionGetter(DEFAULT_POSITION_GETTER),
|
||||
_audioSender(new AudioSender()),
|
||||
#if defined(Q_OS_ANDROID)
|
||||
_checkInputTimer(this),
|
||||
#endif
|
||||
_orientationGetter(DEFAULT_ORIENTATION_GETTER) {
|
||||
|
||||
qRegisterMetaType<const void*>("const void*");
|
||||
qRegisterMetaType<quint16>("quint16");
|
||||
qRegisterMetaType<Transform>("Transform");
|
||||
|
||||
_audioSender->start();
|
||||
|
||||
// avoid putting a lock in the device callback
|
||||
assert(_localSamplesAvailable.is_lock_free());
|
||||
|
||||
|
@ -1125,21 +1112,11 @@ void AudioClient::handleAudioInput(QByteArray& audioBuffer) {
|
|||
} else {
|
||||
encodedBuffer = audioBuffer;
|
||||
}
|
||||
QMetaObject::invokeMethod(_audioSender, "emitAudioPacket", Qt::QueuedConnection,
|
||||
Q_ARG(const void*, encodedBuffer.data()),
|
||||
Q_ARG(size_t, encodedBuffer.size()),
|
||||
Q_ARG(quint16, _outgoingAvatarAudioSequenceNumber++ ),
|
||||
Q_ARG(bool,_isStereoInput),
|
||||
Q_ARG(Transform, audioTransform),
|
||||
Q_ARG(glm::vec3, avatarBoundingBoxCorner),
|
||||
Q_ARG(glm::vec3, avatarBoundingBoxScale),
|
||||
Q_ARG(PacketType, packetType),
|
||||
Q_ARG(QString, _selectedCodecName));
|
||||
|
||||
/* emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, _isStereoInput,
|
||||
emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, _isStereoInput,
|
||||
audioTransform, avatarBoundingBoxCorner, avatarBoundingBoxScale,
|
||||
packetType, _selectedCodecName); */
|
||||
//_stats.sentPacket();
|
||||
packetType, _selectedCodecName);
|
||||
_stats.sentPacket();
|
||||
}
|
||||
|
||||
void AudioClient::handleMicAudioInput() {
|
||||
|
@ -2074,65 +2051,3 @@ void AudioClient::setInputVolume(float volume, bool emitSignal) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
AudioSender::AudioSender() {
|
||||
|
||||
}
|
||||
|
||||
void AudioSender::start() {
|
||||
moveToNewNamedThread(this, "Audio Sender Thread", [this] { }, QThread::HighestPriority);
|
||||
}
|
||||
|
||||
|
||||
void AudioSender::emitAudioPacket(const void* audioData, size_t bytes, quint16 sequenceNumber, bool isStereo,
|
||||
const Transform& transform, glm::vec3 avatarBoundingBoxCorner, glm::vec3 avatarBoundingBoxScale,
|
||||
PacketType packetType, QString codecName) {
|
||||
static std::mutex _mutex;
|
||||
using Locker = std::unique_lock<std::mutex>;
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
if (audioMixer && audioMixer->getActiveSocket()) {
|
||||
Locker lock(_mutex);
|
||||
auto audioPacket = NLPacket::create(packetType);
|
||||
|
||||
// write sequence number
|
||||
auto sequence = sequenceNumber;
|
||||
audioPacket->writePrimitive(sequence);
|
||||
|
||||
// write the codec
|
||||
audioPacket->writeString(codecName);
|
||||
|
||||
if (packetType == PacketType::SilentAudioFrame) {
|
||||
// pack num silent samples
|
||||
quint16 numSilentSamples = isStereo ?
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_STEREO :
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||
audioPacket->writePrimitive(numSilentSamples);
|
||||
} else {
|
||||
// set the mono/stereo byte
|
||||
quint8 channelFlag = isStereo ? 1 : 0;
|
||||
audioPacket->writePrimitive(channelFlag);
|
||||
}
|
||||
|
||||
// at this point we'd better be sending the mixer a valid position, or it won't consider us for mixing
|
||||
assert(!isNaN(transform.getTranslation()));
|
||||
|
||||
// pack the three float positions
|
||||
audioPacket->writePrimitive(transform.getTranslation());
|
||||
// pack the orientation
|
||||
audioPacket->writePrimitive(transform.getRotation());
|
||||
|
||||
audioPacket->writePrimitive(avatarBoundingBoxCorner);
|
||||
audioPacket->writePrimitive(avatarBoundingBoxScale);
|
||||
|
||||
|
||||
if (audioPacket->getType() != PacketType::SilentAudioFrame) {
|
||||
// audio samples have already been packed (written to networkAudioSamples)
|
||||
int leadingBytes = audioPacket->getPayloadSize();
|
||||
audioPacket->setPayloadSize(leadingBytes + bytes);
|
||||
memcpy(audioPacket->getPayload() + leadingBytes, audioData, bytes);
|
||||
}
|
||||
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
|
||||
nodeList->sendUnreliablePacket(*audioPacket, *audioMixer);
|
||||
}
|
||||
}
|
|
@ -71,17 +71,6 @@ class QIODevice;
|
|||
class Transform;
|
||||
class NLPacket;
|
||||
|
||||
class AudioSender : public QObject {
|
||||
Q_OBJECT
|
||||
public:
|
||||
AudioSender();
|
||||
void start();
|
||||
public Q_SLOTS:
|
||||
void emitAudioPacket(const void* audioData, size_t bytes, quint16 sequenceNumber, bool isStereo,
|
||||
const Transform& transform, glm::vec3 avatarBoundingBoxCorner, glm::vec3 avatarBoundingBoxScale,
|
||||
PacketType packetType, QString codecName = QString(""));
|
||||
};
|
||||
|
||||
class AudioClient : public AbstractAudioInterface, public Dependency {
|
||||
Q_OBJECT
|
||||
SINGLETON_DEPENDENCY
|
||||
|
@ -272,7 +261,6 @@ protected:
|
|||
virtual void customDeleter() override;
|
||||
|
||||
private:
|
||||
AudioSender *_audioSender;
|
||||
friend class CheckDevicesThread;
|
||||
friend class LocalInjectorsThread;
|
||||
|
||||
|
|
Loading…
Reference in a new issue