mirror of
https://github.com/overte-org/overte.git
synced 2025-08-06 10:14:28 +02:00
Merge upstream/master into cleanup project
This commit is contained in:
commit
de67a828cc
8 changed files with 54 additions and 25 deletions
|
@ -62,10 +62,9 @@ void attachNewBufferToNode(Node *newNode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioMixer::AudioMixer(const QByteArray& packet) :
|
AudioMixer::AudioMixer(const QByteArray& packet) :
|
||||||
ThreadedAssignment(packet),
|
ThreadedAssignment(packet)
|
||||||
_clientMixBuffer(NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio), 0)
|
|
||||||
{
|
{
|
||||||
connect(NodeList::getInstance(), &NodeList::uuidChanged, this, &AudioMixer::receivedSessionUUID);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
||||||
|
@ -301,7 +300,8 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
|
|
||||||
if ((*otherNode != *node
|
if ((*otherNode != *node
|
||||||
|| otherNodeBuffer->shouldLoopbackForNode())
|
|| otherNodeBuffer->shouldLoopbackForNode())
|
||||||
&& otherNodeBuffer->willBeAddedToMix()) {
|
&& otherNodeBuffer->willBeAddedToMix()
|
||||||
|
&& otherNodeClientData->getNextOutputLoudness() > 0) {
|
||||||
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -332,10 +332,6 @@ void AudioMixer::readPendingDatagrams() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::receivedSessionUUID(const QUuid& sessionUUID) {
|
|
||||||
populatePacketHeader(_clientMixBuffer, PacketTypeMixedAudio);
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioMixer::run() {
|
void AudioMixer::run() {
|
||||||
|
|
||||||
commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);
|
commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);
|
||||||
|
@ -350,17 +346,12 @@ void AudioMixer::run() {
|
||||||
timeval startTime;
|
timeval startTime;
|
||||||
|
|
||||||
gettimeofday(&startTime, NULL);
|
gettimeofday(&startTime, NULL);
|
||||||
|
|
||||||
int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio);
|
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO
|
||||||
|
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
|
||||||
|
|
||||||
while (!_isFinished) {
|
while (!_isFinished) {
|
||||||
|
|
||||||
QCoreApplication::processEvents();
|
|
||||||
|
|
||||||
if (_isFinished) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||||
if (node->getLinkedData()) {
|
if (node->getLinkedData()) {
|
||||||
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES);
|
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES);
|
||||||
|
@ -371,9 +362,11 @@ void AudioMixer::run() {
|
||||||
if (node->getType() == NodeType::Agent && node->getActiveSocket() && node->getLinkedData()
|
if (node->getType() == NodeType::Agent && node->getActiveSocket() && node->getLinkedData()
|
||||||
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
|
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
|
||||||
prepareMixForListeningNode(node.data());
|
prepareMixForListeningNode(node.data());
|
||||||
|
|
||||||
|
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
||||||
|
|
||||||
memcpy(_clientMixBuffer.data() + numBytesPacketHeader, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
memcpy(clientMixBuffer + numBytesPacketHeader, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
nodeList->writeDatagram(_clientMixBuffer, node);
|
nodeList->writeDatagram(clientMixBuffer, NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader, node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,6 +376,12 @@ void AudioMixer::run() {
|
||||||
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QCoreApplication::processEvents();
|
||||||
|
|
||||||
|
if (_isFinished) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow();
|
int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow();
|
||||||
|
|
||||||
|
@ -393,4 +392,6 @@ void AudioMixer::run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
delete[] clientMixBuffer;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,8 +28,6 @@ public slots:
|
||||||
void run();
|
void run();
|
||||||
|
|
||||||
void readPendingDatagrams();
|
void readPendingDatagrams();
|
||||||
private slots:
|
|
||||||
void receivedSessionUUID(const QUuid& sessionUUID);
|
|
||||||
private:
|
private:
|
||||||
/// adds one buffer to the mix for a listening node
|
/// adds one buffer to the mix for a listening node
|
||||||
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
||||||
|
@ -38,8 +36,6 @@ private:
|
||||||
/// prepares and sends a mix to one Node
|
/// prepares and sends a mix to one Node
|
||||||
void prepareMixForListeningNode(Node* node);
|
void prepareMixForListeningNode(Node* node);
|
||||||
|
|
||||||
QByteArray _clientMixBuffer;
|
|
||||||
|
|
||||||
// client samples capacity is larger than what will be sent to optimize mixing
|
// client samples capacity is larger than what will be sent to optimize mixing
|
||||||
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + SAMPLE_PHASE_DELAY_AT_90];
|
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + SAMPLE_PHASE_DELAY_AT_90];
|
||||||
};
|
};
|
||||||
|
|
|
@ -13,6 +13,13 @@
|
||||||
|
|
||||||
#include "AudioMixerClientData.h"
|
#include "AudioMixerClientData.h"
|
||||||
|
|
||||||
|
AudioMixerClientData::AudioMixerClientData() :
|
||||||
|
_ringBuffers(),
|
||||||
|
_nextOutputLoudness(0)
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
AudioMixerClientData::~AudioMixerClientData() {
|
AudioMixerClientData::~AudioMixerClientData() {
|
||||||
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||||
// delete this attached PositionalAudioRingBuffer
|
// delete this attached PositionalAudioRingBuffer
|
||||||
|
@ -80,6 +87,10 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSam
|
||||||
// this is a ring buffer that is ready to go
|
// this is a ring buffer that is ready to go
|
||||||
// set its flag so we know to push its buffer when all is said and done
|
// set its flag so we know to push its buffer when all is said and done
|
||||||
_ringBuffers[i]->setWillBeAddedToMix(true);
|
_ringBuffers[i]->setWillBeAddedToMix(true);
|
||||||
|
|
||||||
|
// calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||||
|
// that would be mixed in
|
||||||
|
_nextOutputLoudness = _ringBuffers[i]->averageLoudnessForBoundarySamples(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,16 +18,20 @@
|
||||||
|
|
||||||
class AudioMixerClientData : public NodeData {
|
class AudioMixerClientData : public NodeData {
|
||||||
public:
|
public:
|
||||||
|
AudioMixerClientData();
|
||||||
~AudioMixerClientData();
|
~AudioMixerClientData();
|
||||||
|
|
||||||
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
|
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
|
||||||
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
|
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
|
||||||
|
|
||||||
|
float getNextOutputLoudness() const { return _nextOutputLoudness; }
|
||||||
|
|
||||||
int parseData(const QByteArray& packet);
|
int parseData(const QByteArray& packet);
|
||||||
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
|
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
|
||||||
void pushBuffersAfterFrameSend();
|
void pushBuffersAfterFrameSend();
|
||||||
private:
|
private:
|
||||||
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
|
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
|
||||||
|
float _nextOutputLoudness;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* defined(__hifi__AudioMixerClientData__) */
|
#endif /* defined(__hifi__AudioMixerClientData__) */
|
||||||
|
|
|
@ -90,8 +90,7 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
|
||||||
_saccade += (_saccadeTarget - _saccade) * 0.50f;
|
_saccade += (_saccadeTarget - _saccade) * 0.50f;
|
||||||
|
|
||||||
const float AUDIO_AVERAGING_SECS = 0.05f;
|
const float AUDIO_AVERAGING_SECS = 0.05f;
|
||||||
_averageLoudness = (1.f - deltaTime / AUDIO_AVERAGING_SECS) * _averageLoudness +
|
_averageLoudness = glm::mix(_averageLoudness, _audioLoudness, glm::min(deltaTime / AUDIO_AVERAGING_SECS, 1.0f));
|
||||||
(deltaTime / AUDIO_AVERAGING_SECS) * _audioLoudness;
|
|
||||||
|
|
||||||
// Detect transition from talking to not; force blink after that and a delay
|
// Detect transition from talking to not; force blink after that and a delay
|
||||||
bool forceBlink = false;
|
bool forceBlink = false;
|
||||||
|
|
|
@ -55,6 +55,20 @@ int AudioRingBuffer::parseData(const QByteArray& packet) {
|
||||||
return writeData(packet.data() + numBytesPacketHeader, packet.size() - numBytesPacketHeader);
|
return writeData(packet.data() + numBytesPacketHeader, packet.size() - numBytesPacketHeader);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float AudioRingBuffer::averageLoudnessForBoundarySamples(int numSamples) {
|
||||||
|
// ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer
|
||||||
|
float averageLoudness = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < numSamples; ++i) {
|
||||||
|
averageLoudness += fabsf(_nextOutput[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
averageLoudness /= numSamples;
|
||||||
|
averageLoudness /= MAX_SAMPLE_VALUE;
|
||||||
|
|
||||||
|
return averageLoudness;
|
||||||
|
}
|
||||||
|
|
||||||
qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {
|
qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {
|
||||||
return readData((char*) destination, maxSamples * sizeof(int16_t));
|
return readData((char*) destination, maxSamples * sizeof(int16_t));
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,6 +49,8 @@ public:
|
||||||
// assume callers using this will never wrap around the end
|
// assume callers using this will never wrap around the end
|
||||||
const int16_t* getNextOutput() { return _nextOutput; }
|
const int16_t* getNextOutput() { return _nextOutput; }
|
||||||
const int16_t* getBuffer() { return _buffer; }
|
const int16_t* getBuffer() { return _buffer; }
|
||||||
|
|
||||||
|
float averageLoudnessForBoundarySamples(int numSamples);
|
||||||
|
|
||||||
qint64 readSamples(int16_t* destination, qint64 maxSamples);
|
qint64 readSamples(int16_t* destination, qint64 maxSamples);
|
||||||
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
|
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
|
||||||
|
|
|
@ -267,7 +267,9 @@ void NodeList::processNodeData(const HifiSockAddr& senderSockAddr, const QByteAr
|
||||||
// the node decided not to do anything with this packet
|
// the node decided not to do anything with this packet
|
||||||
// if it comes from a known source we should keep that node alive
|
// if it comes from a known source we should keep that node alive
|
||||||
SharedNodePointer matchingNode = sendingNodeForPacket(packet);
|
SharedNodePointer matchingNode = sendingNodeForPacket(packet);
|
||||||
matchingNode->setLastHeardMicrostamp(usecTimestampNow());
|
if (matchingNode) {
|
||||||
|
matchingNode->setLastHeardMicrostamp(usecTimestampNow());
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue