mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-08-04 03:04:40 +02:00
fix windows warnings
This commit is contained in:
parent
1358f68868
commit
825c1c8876
10 changed files with 459 additions and 459 deletions
|
@ -64,7 +64,7 @@ void attachNewBufferToNode(Node *newNode) {
|
||||||
AudioMixer::AudioMixer(const unsigned char* dataBuffer, int numBytes) :
|
AudioMixer::AudioMixer(const unsigned char* dataBuffer, int numBytes) :
|
||||||
ThreadedAssignment(dataBuffer, numBytes)
|
ThreadedAssignment(dataBuffer, numBytes)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
||||||
|
@ -73,79 +73,79 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
float attenuationCoefficient = 1.0f;
|
float attenuationCoefficient = 1.0f;
|
||||||
int numSamplesDelay = 0;
|
int numSamplesDelay = 0;
|
||||||
float weakChannelAmplitudeRatio = 1.0f;
|
float weakChannelAmplitudeRatio = 1.0f;
|
||||||
|
|
||||||
const int PHASE_DELAY_AT_90 = 20;
|
const int PHASE_DELAY_AT_90 = 20;
|
||||||
|
|
||||||
if (bufferToAdd != listeningNodeBuffer) {
|
if (bufferToAdd != listeningNodeBuffer) {
|
||||||
// if the two buffer pointers do not match then these are different buffers
|
// if the two buffer pointers do not match then these are different buffers
|
||||||
|
|
||||||
glm::vec3 listenerPosition = listeningNodeBuffer->getPosition();
|
glm::vec3 listenerPosition = listeningNodeBuffer->getPosition();
|
||||||
glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition();
|
glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition();
|
||||||
glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation());
|
glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation());
|
||||||
|
|
||||||
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
||||||
float radius = 0.0f;
|
float radius = 0.0f;
|
||||||
|
|
||||||
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) {
|
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) {
|
||||||
InjectedAudioRingBuffer* injectedBuffer = (InjectedAudioRingBuffer*) bufferToAdd;
|
InjectedAudioRingBuffer* injectedBuffer = (InjectedAudioRingBuffer*) bufferToAdd;
|
||||||
radius = injectedBuffer->getRadius();
|
radius = injectedBuffer->getRadius();
|
||||||
attenuationCoefficient *= injectedBuffer->getAttenuationRatio();
|
attenuationCoefficient *= injectedBuffer->getAttenuationRatio();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
||||||
// this is either not a spherical source, or the listener is outside the sphere
|
// this is either not a spherical source, or the listener is outside the sphere
|
||||||
|
|
||||||
if (radius > 0) {
|
if (radius > 0) {
|
||||||
// this is a spherical source - the distance used for the coefficient
|
// this is a spherical source - the distance used for the coefficient
|
||||||
// needs to be the closest point on the boundary to the source
|
// needs to be the closest point on the boundary to the source
|
||||||
|
|
||||||
// ovveride the distance to the node with the distance to the point on the
|
// ovveride the distance to the node with the distance to the point on the
|
||||||
// boundary of the sphere
|
// boundary of the sphere
|
||||||
distanceSquareToSource -= (radius * radius);
|
distanceSquareToSource -= (radius * radius);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// calculate the angle delivery for off-axis attenuation
|
// calculate the angle delivery for off-axis attenuation
|
||||||
glm::vec3 rotatedListenerPosition = glm::inverse(bufferToAdd->getOrientation()) * relativePosition;
|
glm::vec3 rotatedListenerPosition = glm::inverse(bufferToAdd->getOrientation()) * relativePosition;
|
||||||
|
|
||||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||||
glm::normalize(rotatedListenerPosition));
|
glm::normalize(rotatedListenerPosition));
|
||||||
|
|
||||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||||
|
|
||||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f));
|
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f));
|
||||||
|
|
||||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||||
attenuationCoefficient *= offAxisCoefficient;
|
attenuationCoefficient *= offAxisCoefficient;
|
||||||
}
|
}
|
||||||
|
|
||||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||||
|
|
||||||
const float DISTANCE_SCALE = 2.5f;
|
const float DISTANCE_SCALE = 2.5f;
|
||||||
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
||||||
const float DISTANCE_LOG_BASE = 2.5f;
|
const float DISTANCE_LOG_BASE = 2.5f;
|
||||||
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
||||||
|
|
||||||
// calculate the distance coefficient using the distance to this node
|
// calculate the distance coefficient using the distance to this node
|
||||||
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
||||||
DISTANCE_SCALE_LOG +
|
DISTANCE_SCALE_LOG +
|
||||||
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
||||||
distanceCoefficient = std::min(1.0f, distanceCoefficient);
|
distanceCoefficient = std::min(1.0f, distanceCoefficient);
|
||||||
|
|
||||||
// multiply the current attenuation coefficient by the distance coefficient
|
// multiply the current attenuation coefficient by the distance coefficient
|
||||||
attenuationCoefficient *= distanceCoefficient;
|
attenuationCoefficient *= distanceCoefficient;
|
||||||
|
|
||||||
// project the rotated source position vector onto the XZ plane
|
// project the rotated source position vector onto the XZ plane
|
||||||
rotatedSourcePosition.y = 0.0f;
|
rotatedSourcePosition.y = 0.0f;
|
||||||
|
|
||||||
// produce an oriented angle about the y-axis
|
// produce an oriented angle about the y-axis
|
||||||
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
|
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||||
glm::normalize(rotatedSourcePosition),
|
glm::normalize(rotatedSourcePosition),
|
||||||
glm::vec3(0.0f, 1.0f, 0.0f));
|
glm::vec3(0.0f, 1.0f, 0.0f));
|
||||||
|
|
||||||
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
||||||
|
|
||||||
// figure out the number of samples of delay and the ratio of the amplitude
|
// figure out the number of samples of delay and the ratio of the amplitude
|
||||||
// in the weak channel for audio spatialization
|
// in the weak channel for audio spatialization
|
||||||
float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource)));
|
float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource)));
|
||||||
|
@ -153,11 +153,11 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||||
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
||||||
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
||||||
|
|
||||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 2) {
|
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 2) {
|
||||||
if ((s / 2) < numSamplesDelay) {
|
if ((s / 2) < numSamplesDelay) {
|
||||||
// pull the earlier sample for the delayed channel
|
// pull the earlier sample for the delayed channel
|
||||||
|
@ -165,12 +165,12 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
_clientSamples[s + delayedChannelOffset] = glm::clamp(_clientSamples[s + delayedChannelOffset] + earlierSample,
|
_clientSamples[s + delayedChannelOffset] = glm::clamp(_clientSamples[s + delayedChannelOffset] + earlierSample,
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// pull the current sample for the good channel
|
// pull the current sample for the good channel
|
||||||
int16_t currentSample = (*bufferToAdd)[s / 2] * attenuationCoefficient;
|
int16_t currentSample = (*bufferToAdd)[s / 2] * attenuationCoefficient;
|
||||||
_clientSamples[s + goodChannelOffset] = glm::clamp(_clientSamples[s + goodChannelOffset] + currentSample,
|
_clientSamples[s + goodChannelOffset] = glm::clamp(_clientSamples[s + goodChannelOffset] + currentSample,
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
|
|
||||||
if ((s / 2) + numSamplesDelay < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
if ((s / 2) + numSamplesDelay < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||||
// place the current sample at the right spot in the delayed channel
|
// place the current sample at the right spot in the delayed channel
|
||||||
int16_t clampedSample = glm::clamp((int) (_clientSamples[s + (numSamplesDelay * 2) + delayedChannelOffset]
|
int16_t clampedSample = glm::clamp((int) (_clientSamples[s + (numSamplesDelay * 2) + delayedChannelOffset]
|
||||||
|
@ -183,22 +183,22 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
|
|
||||||
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
|
||||||
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
|
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
|
||||||
|
|
||||||
// zero out the client mix for this node
|
// zero out the client mix for this node
|
||||||
memset(_clientSamples, 0, sizeof(_clientSamples));
|
memset(_clientSamples, 0, sizeof(_clientSamples));
|
||||||
|
|
||||||
// loop through all other nodes that have sufficient audio to mix
|
// loop through all other nodes that have sufficient audio to mix
|
||||||
for (NodeList::iterator otherNode = nodeList->begin(); otherNode != nodeList->end(); otherNode++) {
|
for (NodeList::iterator otherNode = nodeList->begin(); otherNode != nodeList->end(); otherNode++) {
|
||||||
if (otherNode->getLinkedData()) {
|
if (otherNode->getLinkedData()) {
|
||||||
|
|
||||||
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
||||||
|
|
||||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||||
for (int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) {
|
for (unsigned int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) {
|
||||||
PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];
|
PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];
|
||||||
|
|
||||||
if ((*otherNode != *node
|
if ((*otherNode != *node
|
||||||
|| otherNodeBuffer->shouldLoopbackForNode())
|
|| otherNodeBuffer->shouldLoopbackForNode())
|
||||||
&& otherNodeBuffer->willBeAddedToMix()) {
|
&& otherNodeBuffer->willBeAddedToMix()) {
|
||||||
|
@ -217,14 +217,14 @@ void AudioMixer::processDatagram(const QByteArray& dataByteArray, const HifiSock
|
||||||
|| dataByteArray[0] == PACKET_TYPE_INJECT_AUDIO) {
|
|| dataByteArray[0] == PACKET_TYPE_INJECT_AUDIO) {
|
||||||
QUuid nodeUUID = QUuid::fromRfc4122(dataByteArray.mid(numBytesForPacketHeader((unsigned char*) dataByteArray.data()),
|
QUuid nodeUUID = QUuid::fromRfc4122(dataByteArray.mid(numBytesForPacketHeader((unsigned char*) dataByteArray.data()),
|
||||||
NUM_BYTES_RFC4122_UUID));
|
NUM_BYTES_RFC4122_UUID));
|
||||||
|
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
|
||||||
Node* matchingNode = nodeList->nodeWithUUID(nodeUUID);
|
Node* matchingNode = nodeList->nodeWithUUID(nodeUUID);
|
||||||
|
|
||||||
if (matchingNode) {
|
if (matchingNode) {
|
||||||
nodeList->updateNodeWithData(matchingNode, senderSockAddr, (unsigned char*) dataByteArray.data(), dataByteArray.size());
|
nodeList->updateNodeWithData(matchingNode, senderSockAddr, (unsigned char*) dataByteArray.data(), dataByteArray.size());
|
||||||
|
|
||||||
if (!matchingNode->getActiveSocket()) {
|
if (!matchingNode->getActiveSocket()) {
|
||||||
// we don't have an active socket for this node, but they're talking to us
|
// we don't have an active socket for this node, but they're talking to us
|
||||||
// this means they've heard from us and can reply, let's assume public is active
|
// this means they've heard from us and can reply, let's assume public is active
|
||||||
|
@ -238,29 +238,29 @@ void AudioMixer::processDatagram(const QByteArray& dataByteArray, const HifiSock
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixer::run() {
|
void AudioMixer::run() {
|
||||||
|
|
||||||
commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NODE_TYPE_AUDIO_MIXER);
|
commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NODE_TYPE_AUDIO_MIXER);
|
||||||
|
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
|
||||||
const char AUDIO_MIXER_NODE_TYPES_OF_INTEREST[2] = { NODE_TYPE_AGENT, NODE_TYPE_AUDIO_INJECTOR };
|
const char AUDIO_MIXER_NODE_TYPES_OF_INTEREST[2] = { NODE_TYPE_AGENT, NODE_TYPE_AUDIO_INJECTOR };
|
||||||
nodeList->setNodeTypesOfInterest(AUDIO_MIXER_NODE_TYPES_OF_INTEREST, sizeof(AUDIO_MIXER_NODE_TYPES_OF_INTEREST));
|
nodeList->setNodeTypesOfInterest(AUDIO_MIXER_NODE_TYPES_OF_INTEREST, sizeof(AUDIO_MIXER_NODE_TYPES_OF_INTEREST));
|
||||||
|
|
||||||
nodeList->linkedDataCreateCallback = attachNewBufferToNode;
|
nodeList->linkedDataCreateCallback = attachNewBufferToNode;
|
||||||
|
|
||||||
int nextFrame = 0;
|
int nextFrame = 0;
|
||||||
timeval startTime;
|
timeval startTime;
|
||||||
|
|
||||||
gettimeofday(&startTime, NULL);
|
gettimeofday(&startTime, NULL);
|
||||||
|
|
||||||
int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MIXED_AUDIO);
|
int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MIXED_AUDIO);
|
||||||
unsigned char clientPacket[NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader];
|
unsigned char clientPacket[NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader];
|
||||||
populateTypeAndVersion(clientPacket, PACKET_TYPE_MIXED_AUDIO);
|
populateTypeAndVersion(clientPacket, PACKET_TYPE_MIXED_AUDIO);
|
||||||
|
|
||||||
while (!_isFinished) {
|
while (!_isFinished) {
|
||||||
|
|
||||||
QCoreApplication::processEvents();
|
QCoreApplication::processEvents();
|
||||||
|
|
||||||
if (_isFinished) {
|
if (_isFinished) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -270,33 +270,33 @@ void AudioMixer::run() {
|
||||||
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES);
|
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||||
if (node->getType() == NODE_TYPE_AGENT && node->getActiveSocket() && node->getLinkedData()
|
if (node->getType() == NODE_TYPE_AGENT && node->getActiveSocket() && node->getLinkedData()
|
||||||
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
|
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
|
||||||
prepareMixForListeningNode(&(*node));
|
prepareMixForListeningNode(&(*node));
|
||||||
|
|
||||||
memcpy(clientPacket + numBytesPacketHeader, _clientSamples, sizeof(_clientSamples));
|
memcpy(clientPacket + numBytesPacketHeader, _clientSamples, sizeof(_clientSamples));
|
||||||
nodeList->getNodeSocket().writeDatagram((char*) clientPacket, sizeof(clientPacket),
|
nodeList->getNodeSocket().writeDatagram((char*) clientPacket, sizeof(clientPacket),
|
||||||
node->getActiveSocket()->getAddress(),
|
node->getActiveSocket()->getAddress(),
|
||||||
node->getActiveSocket()->getPort());
|
node->getActiveSocket()->getPort());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// push forward the next output pointers for any audio buffers we used
|
// push forward the next output pointers for any audio buffers we used
|
||||||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||||
if (node->getLinkedData()) {
|
if (node->getLinkedData()) {
|
||||||
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow();
|
int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow();
|
||||||
|
|
||||||
if (usecToSleep > 0) {
|
if (usecToSleep > 0) {
|
||||||
usleep(usecToSleep);
|
usleep(usecToSleep);
|
||||||
} else {
|
} else {
|
||||||
qDebug("Took too much time, not sleeping!\n");
|
qDebug("Took too much time, not sleeping!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,19 +14,19 @@
|
||||||
#include "AudioMixerClientData.h"
|
#include "AudioMixerClientData.h"
|
||||||
|
|
||||||
AudioMixerClientData::~AudioMixerClientData() {
|
AudioMixerClientData::~AudioMixerClientData() {
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||||
// delete this attached PositionalAudioRingBuffer
|
// delete this attached PositionalAudioRingBuffer
|
||||||
delete _ringBuffers[i];
|
delete _ringBuffers[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Microphone) {
|
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Microphone) {
|
||||||
return (AvatarAudioRingBuffer*) _ringBuffers[i];
|
return (AvatarAudioRingBuffer*) _ringBuffers[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// no AvatarAudioRingBuffer found - return NULL
|
// no AvatarAudioRingBuffer found - return NULL
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -34,49 +34,49 @@ AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
||||||
int AudioMixerClientData::parseData(unsigned char* packetData, int numBytes) {
|
int AudioMixerClientData::parseData(unsigned char* packetData, int numBytes) {
|
||||||
if (packetData[0] == PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO
|
if (packetData[0] == PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO
|
||||||
|| packetData[0] == PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO) {
|
|| packetData[0] == PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO) {
|
||||||
|
|
||||||
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
|
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
|
||||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||||
|
|
||||||
if (!avatarRingBuffer) {
|
if (!avatarRingBuffer) {
|
||||||
// we don't have an AvatarAudioRingBuffer yet, so add it
|
// we don't have an AvatarAudioRingBuffer yet, so add it
|
||||||
avatarRingBuffer = new AvatarAudioRingBuffer();
|
avatarRingBuffer = new AvatarAudioRingBuffer();
|
||||||
_ringBuffers.push_back(avatarRingBuffer);
|
_ringBuffers.push_back(avatarRingBuffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||||
avatarRingBuffer->parseData(packetData, numBytes);
|
avatarRingBuffer->parseData(packetData, numBytes);
|
||||||
} else {
|
} else {
|
||||||
// this is injected audio
|
// this is injected audio
|
||||||
|
|
||||||
// grab the stream identifier for this injected audio
|
// grab the stream identifier for this injected audio
|
||||||
QByteArray rfcUUID = QByteArray((char*) packetData + numBytesForPacketHeader(packetData) + NUM_BYTES_RFC4122_UUID,
|
QByteArray rfcUUID = QByteArray((char*) packetData + numBytesForPacketHeader(packetData) + NUM_BYTES_RFC4122_UUID,
|
||||||
NUM_BYTES_RFC4122_UUID);
|
NUM_BYTES_RFC4122_UUID);
|
||||||
QUuid streamIdentifier = QUuid::fromRfc4122(rfcUUID);
|
QUuid streamIdentifier = QUuid::fromRfc4122(rfcUUID);
|
||||||
|
|
||||||
InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;
|
InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;
|
||||||
|
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector
|
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector
|
||||||
&& ((InjectedAudioRingBuffer*) _ringBuffers[i])->getStreamIdentifier() == streamIdentifier) {
|
&& ((InjectedAudioRingBuffer*) _ringBuffers[i])->getStreamIdentifier() == streamIdentifier) {
|
||||||
matchingInjectedRingBuffer = (InjectedAudioRingBuffer*) _ringBuffers[i];
|
matchingInjectedRingBuffer = (InjectedAudioRingBuffer*) _ringBuffers[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!matchingInjectedRingBuffer) {
|
if (!matchingInjectedRingBuffer) {
|
||||||
// we don't have a matching injected audio ring buffer, so add it
|
// we don't have a matching injected audio ring buffer, so add it
|
||||||
matchingInjectedRingBuffer = new InjectedAudioRingBuffer(streamIdentifier);
|
matchingInjectedRingBuffer = new InjectedAudioRingBuffer(streamIdentifier);
|
||||||
_ringBuffers.push_back(matchingInjectedRingBuffer);
|
_ringBuffers.push_back(matchingInjectedRingBuffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
matchingInjectedRingBuffer->parseData(packetData, numBytes);
|
matchingInjectedRingBuffer->parseData(packetData, numBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSamples) {
|
void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSamples) {
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||||
if (_ringBuffers[i]->shouldBeAddedToMix(jitterBufferLengthSamples)) {
|
if (_ringBuffers[i]->shouldBeAddedToMix(jitterBufferLengthSamples)) {
|
||||||
// this is a ring buffer that is ready to go
|
// this is a ring buffer that is ready to go
|
||||||
// set its flag so we know to push its buffer when all is said and done
|
// set its flag so we know to push its buffer when all is said and done
|
||||||
|
@ -86,13 +86,13 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSam
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||||
// this was a used buffer, push the output pointer forwards
|
// this was a used buffer, push the output pointer forwards
|
||||||
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
|
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
|
||||||
|
|
||||||
if (audioBuffer->willBeAddedToMix()) {
|
if (audioBuffer->willBeAddedToMix()) {
|
||||||
audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
audioBuffer->setWillBeAddedToMix(false);
|
audioBuffer->setWillBeAddedToMix(false);
|
||||||
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector
|
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector
|
||||||
&& audioBuffer->hasStarted() && audioBuffer->isStarved()) {
|
&& audioBuffer->hasStarted() && audioBuffer->isStarved()) {
|
||||||
|
|
|
@ -542,7 +542,7 @@ void DomainServer::prepopulateStaticAssignmentFile() {
|
||||||
QStringList multiConfigList = multiConfig.split(";");
|
QStringList multiConfigList = multiConfig.split(";");
|
||||||
|
|
||||||
// read each config to a payload for a VS assignment
|
// read each config to a payload for a VS assignment
|
||||||
for (int i = 0; i < multiConfigList.size(); i++) {
|
for (unsigned int i = 0; i < multiConfigList.size(); i++) {
|
||||||
QString config = multiConfigList.at(i);
|
QString config = multiConfigList.at(i);
|
||||||
|
|
||||||
qDebug("config[%d]=%s\n", i, config.toLocal8Bit().constData());
|
qDebug("config[%d]=%s\n", i, config.toLocal8Bit().constData());
|
||||||
|
@ -584,7 +584,7 @@ void DomainServer::prepopulateStaticAssignmentFile() {
|
||||||
QStringList multiConfigList = multiConfig.split(";");
|
QStringList multiConfigList = multiConfig.split(";");
|
||||||
|
|
||||||
// read each config to a payload for a VS assignment
|
// read each config to a payload for a VS assignment
|
||||||
for (int i = 0; i < multiConfigList.size(); i++) {
|
for (unsigned int i = 0; i < multiConfigList.size(); i++) {
|
||||||
QString config = multiConfigList.at(i);
|
QString config = multiConfigList.at(i);
|
||||||
|
|
||||||
qDebug("config[%d]=%s\n", i, config.toLocal8Bit().constData());
|
qDebug("config[%d]=%s\n", i, config.toLocal8Bit().constData());
|
||||||
|
|
|
@ -96,26 +96,26 @@ QAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
||||||
kAudioObjectPropertyScopeGlobal,
|
kAudioObjectPropertyScopeGlobal,
|
||||||
kAudioObjectPropertyElementMaster
|
kAudioObjectPropertyElementMaster
|
||||||
};
|
};
|
||||||
|
|
||||||
if (mode == QAudio::AudioOutput) {
|
if (mode == QAudio::AudioOutput) {
|
||||||
propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
|
propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
OSStatus getPropertyError = AudioObjectGetPropertyData(kAudioObjectSystemObject,
|
OSStatus getPropertyError = AudioObjectGetPropertyData(kAudioObjectSystemObject,
|
||||||
&propertyAddress,
|
&propertyAddress,
|
||||||
0,
|
0,
|
||||||
NULL,
|
NULL,
|
||||||
&propertySize,
|
&propertySize,
|
||||||
&defaultDeviceID);
|
&defaultDeviceID);
|
||||||
|
|
||||||
if (!getPropertyError && propertySize) {
|
if (!getPropertyError && propertySize) {
|
||||||
CFStringRef deviceName = NULL;
|
CFStringRef deviceName = NULL;
|
||||||
propertySize = sizeof(deviceName);
|
propertySize = sizeof(deviceName);
|
||||||
propertyAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
|
propertyAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
|
||||||
getPropertyError = AudioObjectGetPropertyData(defaultDeviceID, &propertyAddress, 0,
|
getPropertyError = AudioObjectGetPropertyData(defaultDeviceID, &propertyAddress, 0,
|
||||||
NULL, &propertySize, &deviceName);
|
NULL, &propertySize, &deviceName);
|
||||||
|
|
||||||
if (!getPropertyError && propertySize) {
|
if (!getPropertyError && propertySize) {
|
||||||
// find a device in the list that matches the name we have and return it
|
// find a device in the list that matches the name we have and return it
|
||||||
foreach(QAudioDeviceInfo audioDevice, QAudioDeviceInfo::availableDevices(mode)) {
|
foreach(QAudioDeviceInfo audioDevice, QAudioDeviceInfo::availableDevices(mode)) {
|
||||||
|
@ -127,7 +127,7 @@ QAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// fallback for failed lookup is the default device
|
// fallback for failed lookup is the default device
|
||||||
return (mode == QAudio::AudioInput) ? QAudioDeviceInfo::defaultInputDevice() : QAudioDeviceInfo::defaultOutputDevice();
|
return (mode == QAudio::AudioInput) ? QAudioDeviceInfo::defaultInputDevice() : QAudioDeviceInfo::defaultOutputDevice();
|
||||||
}
|
}
|
||||||
|
@ -138,28 +138,28 @@ bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
|
||||||
if (!audioDevice.isFormatSupported(desiredAudioFormat)) {
|
if (!audioDevice.isFormatSupported(desiredAudioFormat)) {
|
||||||
qDebug() << "The desired format for audio I/O is" << desiredAudioFormat << "\n";
|
qDebug() << "The desired format for audio I/O is" << desiredAudioFormat << "\n";
|
||||||
qDebug() << "The desired audio format is not supported by this device.\n";
|
qDebug() << "The desired audio format is not supported by this device.\n";
|
||||||
|
|
||||||
if (desiredAudioFormat.channelCount() == 1) {
|
if (desiredAudioFormat.channelCount() == 1) {
|
||||||
adjustedAudioFormat = desiredAudioFormat;
|
adjustedAudioFormat = desiredAudioFormat;
|
||||||
adjustedAudioFormat.setChannelCount(2);
|
adjustedAudioFormat.setChannelCount(2);
|
||||||
|
|
||||||
if (audioDevice.isFormatSupported(adjustedAudioFormat)) {
|
if (audioDevice.isFormatSupported(adjustedAudioFormat)) {
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
adjustedAudioFormat.setChannelCount(1);
|
adjustedAudioFormat.setChannelCount(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (audioDevice.supportedSampleRates().contains(SAMPLE_RATE * 2)) {
|
if (audioDevice.supportedSampleRates().contains(SAMPLE_RATE * 2)) {
|
||||||
// use 48, which is a sample downsample, upsample
|
// use 48, which is a sample downsample, upsample
|
||||||
adjustedAudioFormat = desiredAudioFormat;
|
adjustedAudioFormat = desiredAudioFormat;
|
||||||
adjustedAudioFormat.setSampleRate(SAMPLE_RATE * 2);
|
adjustedAudioFormat.setSampleRate(SAMPLE_RATE * 2);
|
||||||
|
|
||||||
// return the nearest in case it needs 2 channels
|
// return the nearest in case it needs 2 channels
|
||||||
adjustedAudioFormat = audioDevice.nearestFormat(adjustedAudioFormat);
|
adjustedAudioFormat = audioDevice.nearestFormat(adjustedAudioFormat);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
// set the adjustedAudioFormat to the desiredAudioFormat, since it will work
|
// set the adjustedAudioFormat to the desiredAudioFormat, since it will work
|
||||||
|
@ -176,15 +176,15 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
||||||
} else {
|
} else {
|
||||||
float sourceToDestinationFactor = (sourceAudioFormat.sampleRate() / (float) destinationAudioFormat.sampleRate())
|
float sourceToDestinationFactor = (sourceAudioFormat.sampleRate() / (float) destinationAudioFormat.sampleRate())
|
||||||
* (sourceAudioFormat.channelCount() / (float) destinationAudioFormat.channelCount());
|
* (sourceAudioFormat.channelCount() / (float) destinationAudioFormat.channelCount());
|
||||||
|
|
||||||
// take into account the number of channels in source and destination
|
// take into account the number of channels in source and destination
|
||||||
// accomodate for the case where have an output with > 2 channels
|
// accomodate for the case where have an output with > 2 channels
|
||||||
// this is the case with our HDMI capture
|
// this is the case with our HDMI capture
|
||||||
|
|
||||||
if (sourceToDestinationFactor >= 2) {
|
if (sourceToDestinationFactor >= 2) {
|
||||||
// we need to downsample from 48 to 24
|
// we need to downsample from 48 to 24
|
||||||
// for now this only supports a mono output - this would be the case for audio input
|
// for now this only supports a mono output - this would be the case for audio input
|
||||||
|
|
||||||
for (int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
|
for (int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
|
||||||
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
|
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
|
||||||
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
|
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
|
||||||
|
@ -197,7 +197,7 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
||||||
+ (sourceSamples[i + sourceAudioFormat.channelCount()] / 4);
|
+ (sourceSamples[i + sourceAudioFormat.channelCount()] / 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// upsample from 24 to 48
|
// upsample from 24 to 48
|
||||||
// for now this only supports a stereo to stereo conversion - this is our case for network audio to output
|
// for now this only supports a stereo to stereo conversion - this is our case for network audio to output
|
||||||
|
@ -205,10 +205,10 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
||||||
int dtsSampleRateFactor = (destinationAudioFormat.sampleRate() / sourceAudioFormat.sampleRate());
|
int dtsSampleRateFactor = (destinationAudioFormat.sampleRate() / sourceAudioFormat.sampleRate());
|
||||||
int sampleShift = destinationAudioFormat.channelCount() * dtsSampleRateFactor;
|
int sampleShift = destinationAudioFormat.channelCount() * dtsSampleRateFactor;
|
||||||
int destinationToSourceFactor = (1 / sourceToDestinationFactor);
|
int destinationToSourceFactor = (1 / sourceToDestinationFactor);
|
||||||
|
|
||||||
for (int i = 0; i < numDestinationSamples; i += sampleShift) {
|
for (int i = 0; i < numDestinationSamples; i += sampleShift) {
|
||||||
sourceIndex = (i / destinationToSourceFactor);
|
sourceIndex = (i / destinationToSourceFactor);
|
||||||
|
|
||||||
// fill the L/R channels and make the rest silent
|
// fill the L/R channels and make the rest silent
|
||||||
for (int j = i; j < i + sampleShift; j++) {
|
for (int j = i; j < i + sampleShift; j++) {
|
||||||
if (j % destinationAudioFormat.channelCount() == 0) {
|
if (j % destinationAudioFormat.channelCount() == 0) {
|
||||||
|
@ -230,7 +230,7 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
||||||
const int CALLBACK_ACCELERATOR_RATIO = 2;
|
const int CALLBACK_ACCELERATOR_RATIO = 2;
|
||||||
|
|
||||||
void Audio::start() {
|
void Audio::start() {
|
||||||
|
|
||||||
// set up the desired audio format
|
// set up the desired audio format
|
||||||
_desiredInputFormat.setSampleRate(SAMPLE_RATE);
|
_desiredInputFormat.setSampleRate(SAMPLE_RATE);
|
||||||
_desiredInputFormat.setSampleSize(16);
|
_desiredInputFormat.setSampleSize(16);
|
||||||
|
@ -238,102 +238,102 @@ void Audio::start() {
|
||||||
_desiredInputFormat.setSampleType(QAudioFormat::SignedInt);
|
_desiredInputFormat.setSampleType(QAudioFormat::SignedInt);
|
||||||
_desiredInputFormat.setByteOrder(QAudioFormat::LittleEndian);
|
_desiredInputFormat.setByteOrder(QAudioFormat::LittleEndian);
|
||||||
_desiredInputFormat.setChannelCount(1);
|
_desiredInputFormat.setChannelCount(1);
|
||||||
|
|
||||||
_desiredOutputFormat = _desiredInputFormat;
|
_desiredOutputFormat = _desiredInputFormat;
|
||||||
_desiredOutputFormat.setChannelCount(2);
|
_desiredOutputFormat.setChannelCount(2);
|
||||||
|
|
||||||
QAudioDeviceInfo inputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioInput);
|
QAudioDeviceInfo inputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioInput);
|
||||||
|
|
||||||
qDebug() << "The audio input device is" << inputDeviceInfo.deviceName() << "\n";
|
qDebug() << "The audio input device is" << inputDeviceInfo.deviceName() << "\n";
|
||||||
|
|
||||||
if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) {
|
if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) {
|
||||||
qDebug() << "The format to be used for audio input is" << _inputFormat << "\n";
|
qDebug() << "The format to be used for audio input is" << _inputFormat << "\n";
|
||||||
|
|
||||||
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
|
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
|
||||||
_numInputCallbackBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL * _inputFormat.channelCount()
|
_numInputCallbackBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL * _inputFormat.channelCount()
|
||||||
* (_inputFormat.sampleRate() / SAMPLE_RATE)
|
* (_inputFormat.sampleRate() / SAMPLE_RATE)
|
||||||
/ CALLBACK_ACCELERATOR_RATIO;
|
/ CALLBACK_ACCELERATOR_RATIO;
|
||||||
_audioInput->setBufferSize(_numInputCallbackBytes);
|
_audioInput->setBufferSize(_numInputCallbackBytes);
|
||||||
|
|
||||||
QAudioDeviceInfo outputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioOutput);
|
QAudioDeviceInfo outputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioOutput);
|
||||||
|
|
||||||
qDebug() << "The audio output device is" << outputDeviceInfo.deviceName() << "\n";
|
qDebug() << "The audio output device is" << outputDeviceInfo.deviceName() << "\n";
|
||||||
|
|
||||||
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
||||||
qDebug() << "The format to be used for audio output is" << _outputFormat << "\n";
|
qDebug() << "The format to be used for audio output is" << _outputFormat << "\n";
|
||||||
|
|
||||||
_inputRingBuffer.resizeForFrameSize(_numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO / sizeof(int16_t));
|
_inputRingBuffer.resizeForFrameSize(_numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO / sizeof(int16_t));
|
||||||
_inputDevice = _audioInput->start();
|
_inputDevice = _audioInput->start();
|
||||||
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
|
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
|
||||||
|
|
||||||
// setup our general output device for audio-mixer audio
|
// setup our general output device for audio-mixer audio
|
||||||
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
_outputDevice = _audioOutput->start();
|
_outputDevice = _audioOutput->start();
|
||||||
|
|
||||||
// setup a loopback audio output device
|
// setup a loopback audio output device
|
||||||
_loopbackAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_loopbackAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
|
|
||||||
gettimeofday(&_lastReceiveTime, NULL);
|
gettimeofday(&_lastReceiveTime, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug() << "Unable to set up audio I/O because of a problem with input or output formats.\n";
|
qDebug() << "Unable to set up audio I/O because of a problem with input or output formats.\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::handleAudioInput() {
|
void Audio::handleAudioInput() {
|
||||||
static char monoAudioDataPacket[MAX_PACKET_SIZE];
|
static char monoAudioDataPacket[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
static int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO);
|
static int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO);
|
||||||
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + NUM_BYTES_RFC4122_UUID;
|
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + NUM_BYTES_RFC4122_UUID;
|
||||||
|
|
||||||
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes);
|
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes);
|
||||||
|
|
||||||
static float inputToNetworkInputRatio = _numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO
|
static float inputToNetworkInputRatio = _numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO
|
||||||
/ NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
|
/ NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
|
||||||
|
|
||||||
static int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio;
|
static int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio;
|
||||||
|
|
||||||
QByteArray inputByteArray = _inputDevice->readAll();
|
QByteArray inputByteArray = _inputDevice->readAll();
|
||||||
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio)) {
|
||||||
// if this person wants local loopback add that to the locally injected audio
|
// if this person wants local loopback add that to the locally injected audio
|
||||||
|
|
||||||
if (!_loopbackOutputDevice) {
|
if (!_loopbackOutputDevice) {
|
||||||
// we didn't have the loopback output device going so set that up now
|
// we didn't have the loopback output device going so set that up now
|
||||||
_loopbackOutputDevice = _loopbackAudioOutput->start();
|
_loopbackOutputDevice = _loopbackAudioOutput->start();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_inputFormat == _outputFormat) {
|
if (_inputFormat == _outputFormat) {
|
||||||
_loopbackOutputDevice->write(inputByteArray);
|
_loopbackOutputDevice->write(inputByteArray);
|
||||||
} else {
|
} else {
|
||||||
static float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate())
|
static float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate())
|
||||||
* (_outputFormat.channelCount() / _inputFormat.channelCount());
|
* (_outputFormat.channelCount() / _inputFormat.channelCount());
|
||||||
|
|
||||||
QByteArray loopBackByteArray(inputByteArray.size() * loopbackOutputToInputRatio, 0);
|
QByteArray loopBackByteArray(inputByteArray.size() * loopbackOutputToInputRatio, 0);
|
||||||
|
|
||||||
linearResampling((int16_t*) inputByteArray.data(), (int16_t*) loopBackByteArray.data(),
|
linearResampling((int16_t*) inputByteArray.data(), (int16_t*) loopBackByteArray.data(),
|
||||||
inputByteArray.size() / sizeof(int16_t),
|
inputByteArray.size() / sizeof(int16_t),
|
||||||
loopBackByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat);
|
loopBackByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat);
|
||||||
|
|
||||||
_loopbackOutputDevice->write(loopBackByteArray);
|
_loopbackOutputDevice->write(loopBackByteArray);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
||||||
|
|
||||||
while (_inputRingBuffer.samplesAvailable() > inputSamplesRequired) {
|
while (_inputRingBuffer.samplesAvailable() > inputSamplesRequired) {
|
||||||
|
|
||||||
int16_t inputAudioSamples[inputSamplesRequired];
|
int16_t inputAudioSamples[inputSamplesRequired];
|
||||||
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
||||||
|
|
||||||
// zero out the monoAudioSamples array and the locally injected audio
|
// zero out the monoAudioSamples array and the locally injected audio
|
||||||
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
|
|
||||||
// zero out the locally injected audio in preparation for audio procedural sounds
|
// zero out the locally injected audio in preparation for audio procedural sounds
|
||||||
memset(_localInjectedSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
memset(_localInjectedSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
|
|
||||||
if (!_muted) {
|
if (!_muted) {
|
||||||
// we aren't muted, downsample the input audio
|
// we aren't muted, downsample the input audio
|
||||||
linearResampling((int16_t*) inputAudioSamples,
|
linearResampling((int16_t*) inputAudioSamples,
|
||||||
|
@ -341,15 +341,15 @@ void Audio::handleAudioInput() {
|
||||||
inputSamplesRequired,
|
inputSamplesRequired,
|
||||||
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||||
_inputFormat, _desiredInputFormat);
|
_inputFormat, _desiredInputFormat);
|
||||||
|
|
||||||
float loudness = 0;
|
float loudness = 0;
|
||||||
|
|
||||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||||
loudness += fabsf(monoAudioSamples[i]);
|
loudness += fabsf(monoAudioSamples[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
_lastInputLoudness = loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
_lastInputLoudness = loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||||
|
|
||||||
// add input data just written to the scope
|
// add input data just written to the scope
|
||||||
QMetaObject::invokeMethod(_scope, "addSamples", Qt::QueuedConnection,
|
QMetaObject::invokeMethod(_scope, "addSamples", Qt::QueuedConnection,
|
||||||
Q_ARG(QByteArray, QByteArray((char*) monoAudioSamples,
|
Q_ARG(QByteArray, QByteArray((char*) monoAudioSamples,
|
||||||
|
@ -359,47 +359,47 @@ void Audio::handleAudioInput() {
|
||||||
// our input loudness is 0, since we're muted
|
// our input loudness is 0, since we're muted
|
||||||
_lastInputLoudness = 0;
|
_lastInputLoudness = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// add procedural effects to the appropriate input samples
|
// add procedural effects to the appropriate input samples
|
||||||
addProceduralSounds(monoAudioSamples,
|
addProceduralSounds(monoAudioSamples,
|
||||||
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER);
|
Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER);
|
||||||
|
|
||||||
if (audioMixer && nodeList->getNodeActiveSocketOrPing(audioMixer)) {
|
if (audioMixer && nodeList->getNodeActiveSocketOrPing(audioMixer)) {
|
||||||
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
|
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
|
||||||
|
|
||||||
glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition();
|
glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition();
|
||||||
glm::quat headOrientation = interfaceAvatar->getHead().getOrientation();
|
glm::quat headOrientation = interfaceAvatar->getHead().getOrientation();
|
||||||
|
|
||||||
// we need the amount of bytes in the buffer + 1 for type
|
// we need the amount of bytes in the buffer + 1 for type
|
||||||
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
||||||
|
|
||||||
PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)
|
PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)
|
||||||
? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO : PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO;
|
? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO : PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO;
|
||||||
|
|
||||||
char* currentPacketPtr = monoAudioDataPacket + populateTypeAndVersion((unsigned char*) monoAudioDataPacket,
|
char* currentPacketPtr = monoAudioDataPacket + populateTypeAndVersion((unsigned char*) monoAudioDataPacket,
|
||||||
packetType);
|
packetType);
|
||||||
|
|
||||||
// pack Source Data
|
// pack Source Data
|
||||||
QByteArray rfcUUID = NodeList::getInstance()->getOwnerUUID().toRfc4122();
|
QByteArray rfcUUID = NodeList::getInstance()->getOwnerUUID().toRfc4122();
|
||||||
memcpy(currentPacketPtr, rfcUUID.constData(), rfcUUID.size());
|
memcpy(currentPacketPtr, rfcUUID.constData(), rfcUUID.size());
|
||||||
currentPacketPtr += rfcUUID.size();
|
currentPacketPtr += rfcUUID.size();
|
||||||
|
|
||||||
// memcpy the three float positions
|
// memcpy the three float positions
|
||||||
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
|
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
|
||||||
currentPacketPtr += (sizeof(headPosition));
|
currentPacketPtr += (sizeof(headPosition));
|
||||||
|
|
||||||
// memcpy our orientation
|
// memcpy our orientation
|
||||||
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
||||||
currentPacketPtr += sizeof(headOrientation);
|
currentPacketPtr += sizeof(headOrientation);
|
||||||
|
|
||||||
nodeList->getNodeSocket().writeDatagram(monoAudioDataPacket,
|
nodeList->getNodeSocket().writeDatagram(monoAudioDataPacket,
|
||||||
NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes,
|
NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes,
|
||||||
audioMixer->getActiveSocket()->getAddress(),
|
audioMixer->getActiveSocket()->getAddress(),
|
||||||
audioMixer->getActiveSocket()->getPort());
|
audioMixer->getActiveSocket()->getPort());
|
||||||
|
|
||||||
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
|
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
|
||||||
.updateValue(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes);
|
.updateValue(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes);
|
||||||
}
|
}
|
||||||
|
@ -409,18 +409,18 @@ void Audio::handleAudioInput() {
|
||||||
void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
||||||
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
|
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
|
||||||
|
|
||||||
timeval currentReceiveTime;
|
timeval currentReceiveTime;
|
||||||
gettimeofday(¤tReceiveTime, NULL);
|
gettimeofday(¤tReceiveTime, NULL);
|
||||||
_totalPacketsReceived++;
|
_totalPacketsReceived++;
|
||||||
|
|
||||||
double timeDiff = diffclock(&_lastReceiveTime, ¤tReceiveTime);
|
double timeDiff = diffclock(&_lastReceiveTime, ¤tReceiveTime);
|
||||||
|
|
||||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
// Discard first few received packets for computing jitter (often they pile up on start)
|
||||||
if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) {
|
if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) {
|
||||||
_stdev.addValue(timeDiff);
|
_stdev.addValue(timeDiff);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
|
if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
|
||||||
_measuredJitter = _stdev.getStDev();
|
_measuredJitter = _stdev.getStDev();
|
||||||
_stdev.reset();
|
_stdev.reset();
|
||||||
|
@ -432,17 +432,17 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
setJitterBufferSamples(glm::clamp((int)newJitterBufferSamples, 0, MAX_JITTER_BUFFER_SAMPLES));
|
setJitterBufferSamples(glm::clamp((int)newJitterBufferSamples, 0, MAX_JITTER_BUFFER_SAMPLES));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_ringBuffer.parseData((unsigned char*) audioByteArray.data(), audioByteArray.size());
|
_ringBuffer.parseData((unsigned char*) audioByteArray.data(), audioByteArray.size());
|
||||||
|
|
||||||
static float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
static float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
||||||
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
|
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
|
||||||
|
|
||||||
static int numRequiredOutputSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO / networkOutputToOutputRatio;
|
static int numRequiredOutputSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO / networkOutputToOutputRatio;
|
||||||
|
|
||||||
QByteArray outputBuffer;
|
QByteArray outputBuffer;
|
||||||
outputBuffer.resize(numRequiredOutputSamples * sizeof(int16_t));
|
outputBuffer.resize(numRequiredOutputSamples * sizeof(int16_t));
|
||||||
|
|
||||||
// if there is anything in the ring buffer, decide what to do
|
// if there is anything in the ring buffer, decide what to do
|
||||||
if (_ringBuffer.samplesAvailable() > 0) {
|
if (_ringBuffer.samplesAvailable() > 0) {
|
||||||
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
|
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
|
||||||
|
@ -452,61 +452,61 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
} else {
|
} else {
|
||||||
// We are either already playing back, or we have enough audio to start playing back.
|
// We are either already playing back, or we have enough audio to start playing back.
|
||||||
_ringBuffer.setIsStarved(false);
|
_ringBuffer.setIsStarved(false);
|
||||||
|
|
||||||
// copy the samples we'll resample from the ring buffer - this also
|
// copy the samples we'll resample from the ring buffer - this also
|
||||||
// pushes the read pointer of the ring buffer forwards
|
// pushes the read pointer of the ring buffer forwards
|
||||||
int16_t ringBufferSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
|
int16_t ringBufferSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
|
||||||
_ringBuffer.readSamples(ringBufferSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
_ringBuffer.readSamples(ringBufferSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
||||||
|
|
||||||
// add the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL from each QByteArray
|
// add the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL from each QByteArray
|
||||||
// in our _localInjectionByteArrays QVector to the _localInjectedSamples
|
// in our _localInjectionByteArrays QVector to the _localInjectedSamples
|
||||||
|
|
||||||
// add to the output samples whatever is in the _localAudioOutput byte array
|
// add to the output samples whatever is in the _localAudioOutput byte array
|
||||||
// that lets this user hear sound effects and loopback (if enabled)
|
// that lets this user hear sound effects and loopback (if enabled)
|
||||||
|
|
||||||
for (int b = 0; b < _localInjectionByteArrays.size(); b++) {
|
for (unsigned int b = 0; b < _localInjectionByteArrays.size(); b++) {
|
||||||
QByteArray audioByteArray = _localInjectionByteArrays.at(b);
|
QByteArray audioByteArray = _localInjectionByteArrays.at(b);
|
||||||
|
|
||||||
int16_t* byteArraySamples = (int16_t*) audioByteArray.data();
|
int16_t* byteArraySamples = (int16_t*) audioByteArray.data();
|
||||||
|
|
||||||
int samplesToRead = MIN(audioByteArray.size() / sizeof(int16_t),
|
int samplesToRead = MIN(audioByteArray.size() / sizeof(int16_t),
|
||||||
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
for (int i = 0; i < samplesToRead; i++) {
|
for (int i = 0; i < samplesToRead; i++) {
|
||||||
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + byteArraySamples[i],
|
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + byteArraySamples[i],
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (samplesToRead < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
if (samplesToRead < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||||
// there isn't anything left to inject from this byte array, remove it from the vector
|
// there isn't anything left to inject from this byte array, remove it from the vector
|
||||||
_localInjectionByteArrays.remove(b);
|
_localInjectionByteArrays.remove(b);
|
||||||
} else {
|
} else {
|
||||||
// pull out the bytes we just read for outputs
|
// pull out the bytes we just read for outputs
|
||||||
audioByteArray.remove(0, samplesToRead * sizeof(int16_t));
|
audioByteArray.remove(0, samplesToRead * sizeof(int16_t));
|
||||||
|
|
||||||
// still data left to read - replace the byte array in the QVector with the smaller one
|
// still data left to read - replace the byte array in the QVector with the smaller one
|
||||||
_localInjectionByteArrays.replace(b, audioByteArray);
|
_localInjectionByteArrays.replace(b, audioByteArray);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||||
ringBufferSamples[i * 2] = glm::clamp(ringBufferSamples[i * 2] + _localInjectedSamples[i],
|
ringBufferSamples[i * 2] = glm::clamp(ringBufferSamples[i * 2] + _localInjectedSamples[i],
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
ringBufferSamples[(i * 2) + 1] = glm::clamp(ringBufferSamples[(i * 2) + 1] + _localInjectedSamples[i],
|
ringBufferSamples[(i * 2) + 1] = glm::clamp(ringBufferSamples[(i * 2) + 1] + _localInjectedSamples[i],
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy the packet from the RB to the output
|
// copy the packet from the RB to the output
|
||||||
linearResampling(ringBufferSamples,
|
linearResampling(ringBufferSamples,
|
||||||
(int16_t*) outputBuffer.data(),
|
(int16_t*) outputBuffer.data(),
|
||||||
NETWORK_BUFFER_LENGTH_SAMPLES_STEREO,
|
NETWORK_BUFFER_LENGTH_SAMPLES_STEREO,
|
||||||
numRequiredOutputSamples,
|
numRequiredOutputSamples,
|
||||||
_desiredOutputFormat, _outputFormat);
|
_desiredOutputFormat, _outputFormat);
|
||||||
|
|
||||||
if (_outputDevice) {
|
if (_outputDevice) {
|
||||||
|
|
||||||
_outputDevice->write(outputBuffer);
|
_outputDevice->write(outputBuffer);
|
||||||
|
|
||||||
// add output (@speakers) data just written to the scope
|
// add output (@speakers) data just written to the scope
|
||||||
QMetaObject::invokeMethod(_scope, "addSamples", Qt::QueuedConnection,
|
QMetaObject::invokeMethod(_scope, "addSamples", Qt::QueuedConnection,
|
||||||
Q_ARG(QByteArray, QByteArray((char*) ringBufferSamples,
|
Q_ARG(QByteArray, QByteArray((char*) ringBufferSamples,
|
||||||
|
@ -514,7 +514,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
Q_ARG(bool, true), Q_ARG(bool, false));
|
Q_ARG(bool, true), Q_ARG(bool, false));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
} else if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
||||||
// we don't have any audio data left in the output buffer, and the ring buffer from
|
// we don't have any audio data left in the output buffer, and the ring buffer from
|
||||||
// the network has nothing in it either - we just starved
|
// the network has nothing in it either - we just starved
|
||||||
|
@ -522,9 +522,9 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
_ringBuffer.setIsStarved(true);
|
_ringBuffer.setIsStarved(true);
|
||||||
_numFramesDisplayStarve = 10;
|
_numFramesDisplayStarve = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
||||||
|
|
||||||
_lastReceiveTime = currentReceiveTime;
|
_lastReceiveTime = currentReceiveTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -541,59 +541,59 @@ void Audio::render(int screenWidth, int screenHeight) {
|
||||||
glLineWidth(2.0);
|
glLineWidth(2.0);
|
||||||
glBegin(GL_LINES);
|
glBegin(GL_LINES);
|
||||||
glColor3f(1,1,1);
|
glColor3f(1,1,1);
|
||||||
|
|
||||||
int startX = 20.0;
|
int startX = 20.0;
|
||||||
int currentX = startX;
|
int currentX = startX;
|
||||||
int topY = screenHeight - 40;
|
int topY = screenHeight - 40;
|
||||||
int bottomY = screenHeight - 20;
|
int bottomY = screenHeight - 20;
|
||||||
float frameWidth = 20.0;
|
float frameWidth = 20.0;
|
||||||
float halfY = topY + ((bottomY - topY) / 2.0);
|
float halfY = topY + ((bottomY - topY) / 2.0);
|
||||||
|
|
||||||
// draw the lines for the base of the ring buffer
|
// draw the lines for the base of the ring buffer
|
||||||
|
|
||||||
glVertex2f(currentX, topY);
|
glVertex2f(currentX, topY);
|
||||||
glVertex2f(currentX, bottomY);
|
glVertex2f(currentX, bottomY);
|
||||||
|
|
||||||
for (int i = 0; i < RING_BUFFER_LENGTH_FRAMES; i++) {
|
for (int i = 0; i < RING_BUFFER_LENGTH_FRAMES; i++) {
|
||||||
glVertex2f(currentX, halfY);
|
glVertex2f(currentX, halfY);
|
||||||
glVertex2f(currentX + frameWidth, halfY);
|
glVertex2f(currentX + frameWidth, halfY);
|
||||||
currentX += frameWidth;
|
currentX += frameWidth;
|
||||||
|
|
||||||
glVertex2f(currentX, topY);
|
glVertex2f(currentX, topY);
|
||||||
glVertex2f(currentX, bottomY);
|
glVertex2f(currentX, bottomY);
|
||||||
}
|
}
|
||||||
glEnd();
|
glEnd();
|
||||||
|
|
||||||
// show a bar with the amount of audio remaining in ring buffer and output device
|
// show a bar with the amount of audio remaining in ring buffer and output device
|
||||||
// beyond the current playback
|
// beyond the current playback
|
||||||
|
|
||||||
int bytesLeftInAudioOutput = _audioOutput->bufferSize() - _audioOutput->bytesFree();
|
int bytesLeftInAudioOutput = _audioOutput->bufferSize() - _audioOutput->bytesFree();
|
||||||
float secondsLeftForAudioOutput = (bytesLeftInAudioOutput / sizeof(int16_t))
|
float secondsLeftForAudioOutput = (bytesLeftInAudioOutput / sizeof(int16_t))
|
||||||
/ ((float) _outputFormat.sampleRate() * _outputFormat.channelCount());
|
/ ((float) _outputFormat.sampleRate() * _outputFormat.channelCount());
|
||||||
float secondsLeftForRingBuffer = _ringBuffer.samplesAvailable()
|
float secondsLeftForRingBuffer = _ringBuffer.samplesAvailable()
|
||||||
/ ((float) _desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
/ ((float) _desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
||||||
float msLeftForAudioOutput = (secondsLeftForAudioOutput + secondsLeftForRingBuffer) * 1000;
|
float msLeftForAudioOutput = (secondsLeftForAudioOutput + secondsLeftForRingBuffer) * 1000;
|
||||||
|
|
||||||
if (_numFramesDisplayStarve == 0) {
|
if (_numFramesDisplayStarve == 0) {
|
||||||
glColor3f(0, 1, 0);
|
glColor3f(0, 1, 0);
|
||||||
} else {
|
} else {
|
||||||
glColor3f(0.5 + (_numFramesDisplayStarve / 20.0f), 0, 0);
|
glColor3f(0.5 + (_numFramesDisplayStarve / 20.0f), 0, 0);
|
||||||
_numFramesDisplayStarve--;
|
_numFramesDisplayStarve--;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_averagedLatency == 0.0) {
|
if (_averagedLatency == 0.0) {
|
||||||
_averagedLatency = msLeftForAudioOutput;
|
_averagedLatency = msLeftForAudioOutput;
|
||||||
} else {
|
} else {
|
||||||
_averagedLatency = 0.99f * _averagedLatency + 0.01f * (msLeftForAudioOutput);
|
_averagedLatency = 0.99f * _averagedLatency + 0.01f * (msLeftForAudioOutput);
|
||||||
}
|
}
|
||||||
|
|
||||||
glBegin(GL_QUADS);
|
glBegin(GL_QUADS);
|
||||||
glVertex2f(startX, topY + 2);
|
glVertex2f(startX, topY + 2);
|
||||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth, topY + 2);
|
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth, topY + 2);
|
||||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth, bottomY - 2);
|
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth, bottomY - 2);
|
||||||
glVertex2f(startX, bottomY - 2);
|
glVertex2f(startX, bottomY - 2);
|
||||||
glEnd();
|
glEnd();
|
||||||
|
|
||||||
// Show a yellow bar with the averaged msecs latency you are hearing (from time of packet receipt)
|
// Show a yellow bar with the averaged msecs latency you are hearing (from time of packet receipt)
|
||||||
glColor3f(1,1,0);
|
glColor3f(1,1,0);
|
||||||
glBegin(GL_QUADS);
|
glBegin(GL_QUADS);
|
||||||
|
@ -602,13 +602,13 @@ void Audio::render(int screenWidth, int screenHeight) {
|
||||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth + 2, bottomY + 2);
|
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth + 2, bottomY + 2);
|
||||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 2, bottomY + 2);
|
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 2, bottomY + 2);
|
||||||
glEnd();
|
glEnd();
|
||||||
|
|
||||||
char out[40];
|
char out[40];
|
||||||
sprintf(out, "%3.0f\n", _averagedLatency);
|
sprintf(out, "%3.0f\n", _averagedLatency);
|
||||||
drawtext(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 10, topY - 9, 0.10, 0, 1, 0, out, 1,1,0);
|
drawtext(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 10, topY - 9, 0.10, 0, 1, 0, out, 1,1,0);
|
||||||
|
|
||||||
// Show a red bar with the 'start' point of one frame plus the jitter buffer
|
// Show a red bar with the 'start' point of one frame plus the jitter buffer
|
||||||
|
|
||||||
glColor3f(1, 0, 0);
|
glColor3f(1, 0, 0);
|
||||||
int jitterBufferPels = (1.f + (float)getJitterBufferSamples()
|
int jitterBufferPels = (1.f + (float)getJitterBufferSamples()
|
||||||
/ (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) * frameWidth;
|
/ (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) * frameWidth;
|
||||||
|
@ -620,14 +620,14 @@ void Audio::render(int screenWidth, int screenHeight) {
|
||||||
} else {
|
} else {
|
||||||
drawtext(startX, bottomY + 12, 0.10, 0, 1, 0, out, 1, 0, 0);
|
drawtext(startX, bottomY + 12, 0.10, 0, 1, 0, out, 1, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
glBegin(GL_QUADS);
|
glBegin(GL_QUADS);
|
||||||
glVertex2f(startX + jitterBufferPels - 2, topY - 2);
|
glVertex2f(startX + jitterBufferPels - 2, topY - 2);
|
||||||
glVertex2f(startX + jitterBufferPels + 2, topY - 2);
|
glVertex2f(startX + jitterBufferPels + 2, topY - 2);
|
||||||
glVertex2f(startX + jitterBufferPels + 2, bottomY + 2);
|
glVertex2f(startX + jitterBufferPels + 2, bottomY + 2);
|
||||||
glVertex2f(startX + jitterBufferPels - 2, bottomY + 2);
|
glVertex2f(startX + jitterBufferPels - 2, bottomY + 2);
|
||||||
glEnd();
|
glEnd();
|
||||||
|
|
||||||
}
|
}
|
||||||
renderToolIcon(screenHeight);
|
renderToolIcon(screenHeight);
|
||||||
}
|
}
|
||||||
|
@ -638,12 +638,12 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
||||||
const float MIN_AUDIBLE_VELOCITY = 0.1;
|
const float MIN_AUDIBLE_VELOCITY = 0.1;
|
||||||
const int VOLUME_BASELINE = 400;
|
const int VOLUME_BASELINE = 400;
|
||||||
const float SOUND_PITCH = 8.f;
|
const float SOUND_PITCH = 8.f;
|
||||||
|
|
||||||
float speed = glm::length(_lastVelocity);
|
float speed = glm::length(_lastVelocity);
|
||||||
float volume = VOLUME_BASELINE * (1.f - speed / MAX_AUDIBLE_VELOCITY);
|
float volume = VOLUME_BASELINE * (1.f - speed / MAX_AUDIBLE_VELOCITY);
|
||||||
|
|
||||||
float sample;
|
float sample;
|
||||||
|
|
||||||
// Travelling noise
|
// Travelling noise
|
||||||
// Add a noise-modulated sinewave with volume that tapers off with speed increasing
|
// Add a noise-modulated sinewave with volume that tapers off with speed increasing
|
||||||
if ((speed > MIN_AUDIBLE_VELOCITY) && (speed < MAX_AUDIBLE_VELOCITY)) {
|
if ((speed > MIN_AUDIBLE_VELOCITY) && (speed < MAX_AUDIBLE_VELOCITY)) {
|
||||||
|
@ -661,23 +661,23 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
||||||
if (_collisionSoundMagnitude > COLLISION_SOUND_CUTOFF_LEVEL) {
|
if (_collisionSoundMagnitude > COLLISION_SOUND_CUTOFF_LEVEL) {
|
||||||
for (int i = 0; i < numSamples; i++) {
|
for (int i = 0; i < numSamples; i++) {
|
||||||
t = (float) _proceduralEffectSample + (float) i;
|
t = (float) _proceduralEffectSample + (float) i;
|
||||||
|
|
||||||
sample = sinf(t * _collisionSoundFrequency)
|
sample = sinf(t * _collisionSoundFrequency)
|
||||||
+ sinf(t * _collisionSoundFrequency / DOWN_TWO_OCTAVES)
|
+ sinf(t * _collisionSoundFrequency / DOWN_TWO_OCTAVES)
|
||||||
+ sinf(t * _collisionSoundFrequency / DOWN_FOUR_OCTAVES * UP_MAJOR_FIFTH);
|
+ sinf(t * _collisionSoundFrequency / DOWN_FOUR_OCTAVES * UP_MAJOR_FIFTH);
|
||||||
sample *= _collisionSoundMagnitude * COLLISION_SOUND_MAX_VOLUME;
|
sample *= _collisionSoundMagnitude * COLLISION_SOUND_MAX_VOLUME;
|
||||||
|
|
||||||
int16_t collisionSample = (int16_t) sample;
|
int16_t collisionSample = (int16_t) sample;
|
||||||
|
|
||||||
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + collisionSample,
|
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + collisionSample,
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
|
|
||||||
_collisionSoundMagnitude *= _collisionSoundDuration;
|
_collisionSoundMagnitude *= _collisionSoundDuration;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_proceduralEffectSample += numSamples;
|
_proceduralEffectSample += numSamples;
|
||||||
|
|
||||||
// Add a drum sound
|
// Add a drum sound
|
||||||
const float MAX_VOLUME = 32000.f;
|
const float MAX_VOLUME = 32000.f;
|
||||||
const float MAX_DURATION = 2.f;
|
const float MAX_DURATION = 2.f;
|
||||||
|
@ -690,13 +690,13 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
||||||
sample = sinf(t * frequency);
|
sample = sinf(t * frequency);
|
||||||
sample += ((randFloat() - 0.5f) * NOISE_MAGNITUDE);
|
sample += ((randFloat() - 0.5f) * NOISE_MAGNITUDE);
|
||||||
sample *= _drumSoundVolume * MAX_VOLUME;
|
sample *= _drumSoundVolume * MAX_VOLUME;
|
||||||
|
|
||||||
int16_t collisionSample = (int16_t) sample;
|
int16_t collisionSample = (int16_t) sample;
|
||||||
|
|
||||||
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + collisionSample,
|
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + collisionSample,
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
|
|
||||||
_drumSoundVolume *= (1.f - _drumSoundDecay);
|
_drumSoundVolume *= (1.f - _drumSoundDecay);
|
||||||
}
|
}
|
||||||
_drumSoundSample += numSamples;
|
_drumSoundSample += numSamples;
|
||||||
|
@ -730,46 +730,46 @@ void Audio::handleAudioByteArray(const QByteArray& audioByteArray) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::renderToolIcon(int screenHeight) {
|
void Audio::renderToolIcon(int screenHeight) {
|
||||||
|
|
||||||
_iconBounds = QRect(ICON_LEFT, screenHeight - BOTTOM_PADDING, ICON_SIZE, ICON_SIZE);
|
_iconBounds = QRect(ICON_LEFT, screenHeight - BOTTOM_PADDING, ICON_SIZE, ICON_SIZE);
|
||||||
glEnable(GL_TEXTURE_2D);
|
glEnable(GL_TEXTURE_2D);
|
||||||
|
|
||||||
glBindTexture(GL_TEXTURE_2D, _micTextureId);
|
glBindTexture(GL_TEXTURE_2D, _micTextureId);
|
||||||
glColor3f(1, 1, 1);
|
glColor3f(1, 1, 1);
|
||||||
glBegin(GL_QUADS);
|
glBegin(GL_QUADS);
|
||||||
|
|
||||||
glTexCoord2f(1, 1);
|
glTexCoord2f(1, 1);
|
||||||
glVertex2f(_iconBounds.left(), _iconBounds.top());
|
glVertex2f(_iconBounds.left(), _iconBounds.top());
|
||||||
|
|
||||||
glTexCoord2f(0, 1);
|
glTexCoord2f(0, 1);
|
||||||
glVertex2f(_iconBounds.right(), _iconBounds.top());
|
glVertex2f(_iconBounds.right(), _iconBounds.top());
|
||||||
|
|
||||||
glTexCoord2f(0, 0);
|
glTexCoord2f(0, 0);
|
||||||
glVertex2f(_iconBounds.right(), _iconBounds.bottom());
|
glVertex2f(_iconBounds.right(), _iconBounds.bottom());
|
||||||
|
|
||||||
glTexCoord2f(1, 0);
|
glTexCoord2f(1, 0);
|
||||||
glVertex2f(_iconBounds.left(), _iconBounds.bottom());
|
glVertex2f(_iconBounds.left(), _iconBounds.bottom());
|
||||||
|
|
||||||
glEnd();
|
glEnd();
|
||||||
|
|
||||||
if (_muted) {
|
if (_muted) {
|
||||||
glBindTexture(GL_TEXTURE_2D, _muteTextureId);
|
glBindTexture(GL_TEXTURE_2D, _muteTextureId);
|
||||||
glBegin(GL_QUADS);
|
glBegin(GL_QUADS);
|
||||||
|
|
||||||
glTexCoord2f(1, 1);
|
glTexCoord2f(1, 1);
|
||||||
glVertex2f(_iconBounds.left(), _iconBounds.top());
|
glVertex2f(_iconBounds.left(), _iconBounds.top());
|
||||||
|
|
||||||
glTexCoord2f(0, 1);
|
glTexCoord2f(0, 1);
|
||||||
glVertex2f(_iconBounds.right(), _iconBounds.top());
|
glVertex2f(_iconBounds.right(), _iconBounds.top());
|
||||||
|
|
||||||
glTexCoord2f(0, 0);
|
glTexCoord2f(0, 0);
|
||||||
glVertex2f(_iconBounds.right(), _iconBounds.bottom());
|
glVertex2f(_iconBounds.right(), _iconBounds.bottom());
|
||||||
|
|
||||||
glTexCoord2f(1, 0);
|
glTexCoord2f(1, 0);
|
||||||
glVertex2f(_iconBounds.left(), _iconBounds.bottom());
|
glVertex2f(_iconBounds.left(), _iconBounds.bottom());
|
||||||
|
|
||||||
glEnd();
|
glEnd();
|
||||||
}
|
}
|
||||||
|
|
||||||
glDisable(GL_TEXTURE_2D);
|
glDisable(GL_TEXTURE_2D);
|
||||||
}
|
}
|
|
@ -34,33 +34,33 @@ const HifiSockAddr& DataServerClient::dataServerSockAddr() {
|
||||||
void DataServerClient::putValueForKey(const QString& key, const char* value) {
|
void DataServerClient::putValueForKey(const QString& key, const char* value) {
|
||||||
QString clientString = Application::getInstance()->getProfile()->getUserString();
|
QString clientString = Application::getInstance()->getProfile()->getUserString();
|
||||||
if (!clientString.isEmpty()) {
|
if (!clientString.isEmpty()) {
|
||||||
|
|
||||||
unsigned char* putPacket = new unsigned char[MAX_PACKET_SIZE];
|
unsigned char* putPacket = new unsigned char[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
// setup the header for this packet
|
// setup the header for this packet
|
||||||
int numPacketBytes = populateTypeAndVersion(putPacket, PACKET_TYPE_DATA_SERVER_PUT);
|
int numPacketBytes = populateTypeAndVersion(putPacket, PACKET_TYPE_DATA_SERVER_PUT);
|
||||||
|
|
||||||
// pack the client UUID, null terminated
|
// pack the client UUID, null terminated
|
||||||
memcpy(putPacket + numPacketBytes, clientString.toLocal8Bit().constData(), clientString.toLocal8Bit().size());
|
memcpy(putPacket + numPacketBytes, clientString.toLocal8Bit().constData(), clientString.toLocal8Bit().size());
|
||||||
numPacketBytes += clientString.toLocal8Bit().size();
|
numPacketBytes += clientString.toLocal8Bit().size();
|
||||||
putPacket[numPacketBytes++] = '\0';
|
putPacket[numPacketBytes++] = '\0';
|
||||||
|
|
||||||
// pack a 1 to designate that we are putting a single value
|
// pack a 1 to designate that we are putting a single value
|
||||||
putPacket[numPacketBytes++] = 1;
|
putPacket[numPacketBytes++] = 1;
|
||||||
|
|
||||||
// pack the key, null terminated
|
// pack the key, null terminated
|
||||||
strcpy((char*) putPacket + numPacketBytes, key.toLocal8Bit().constData());
|
strcpy((char*) putPacket + numPacketBytes, key.toLocal8Bit().constData());
|
||||||
numPacketBytes += key.size();
|
numPacketBytes += key.size();
|
||||||
putPacket[numPacketBytes++] = '\0';
|
putPacket[numPacketBytes++] = '\0';
|
||||||
|
|
||||||
// pack the value, null terminated
|
// pack the value, null terminated
|
||||||
strcpy((char*) putPacket + numPacketBytes, value);
|
strcpy((char*) putPacket + numPacketBytes, value);
|
||||||
numPacketBytes += strlen(value);
|
numPacketBytes += strlen(value);
|
||||||
putPacket[numPacketBytes++] = '\0';
|
putPacket[numPacketBytes++] = '\0';
|
||||||
|
|
||||||
// add the putPacket to our vector of unconfirmed packets, will be deleted once put is confirmed
|
// add the putPacket to our vector of unconfirmed packets, will be deleted once put is confirmed
|
||||||
// _unmatchedPackets.insert(std::pair<unsigned char*, int>(putPacket, numPacketBytes));
|
// _unmatchedPackets.insert(std::pair<unsigned char*, int>(putPacket, numPacketBytes));
|
||||||
|
|
||||||
// send this put request to the data server
|
// send this put request to the data server
|
||||||
NodeList::getInstance()->getNodeSocket().writeDatagram((char*) putPacket, numPacketBytes,
|
NodeList::getInstance()->getNodeSocket().writeDatagram((char*) putPacket, numPacketBytes,
|
||||||
dataServerSockAddr().getAddress(),
|
dataServerSockAddr().getAddress(),
|
||||||
|
@ -81,27 +81,27 @@ void DataServerClient::getValuesForKeysAndUUID(const QStringList& keys, const QU
|
||||||
void DataServerClient::getValuesForKeysAndUserString(const QStringList& keys, const QString& userString) {
|
void DataServerClient::getValuesForKeysAndUserString(const QStringList& keys, const QString& userString) {
|
||||||
if (!userString.isEmpty() && keys.size() <= UCHAR_MAX) {
|
if (!userString.isEmpty() && keys.size() <= UCHAR_MAX) {
|
||||||
unsigned char* getPacket = new unsigned char[MAX_PACKET_SIZE];
|
unsigned char* getPacket = new unsigned char[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
// setup the header for this packet
|
// setup the header for this packet
|
||||||
int numPacketBytes = populateTypeAndVersion(getPacket, PACKET_TYPE_DATA_SERVER_GET);
|
int numPacketBytes = populateTypeAndVersion(getPacket, PACKET_TYPE_DATA_SERVER_GET);
|
||||||
|
|
||||||
// pack the user string (could be username or UUID string), null-terminate
|
// pack the user string (could be username or UUID string), null-terminate
|
||||||
memcpy(getPacket + numPacketBytes, userString.toLocal8Bit().constData(), userString.toLocal8Bit().size());
|
memcpy(getPacket + numPacketBytes, userString.toLocal8Bit().constData(), userString.toLocal8Bit().size());
|
||||||
numPacketBytes += userString.toLocal8Bit().size();
|
numPacketBytes += userString.toLocal8Bit().size();
|
||||||
getPacket[numPacketBytes++] = '\0';
|
getPacket[numPacketBytes++] = '\0';
|
||||||
|
|
||||||
// pack one byte to designate the number of keys
|
// pack one byte to designate the number of keys
|
||||||
getPacket[numPacketBytes++] = keys.size();
|
getPacket[numPacketBytes++] = keys.size();
|
||||||
|
|
||||||
QString keyString = keys.join(MULTI_KEY_VALUE_SEPARATOR);
|
QString keyString = keys.join(MULTI_KEY_VALUE_SEPARATOR);
|
||||||
|
|
||||||
// pack the key string, null terminated
|
// pack the key string, null terminated
|
||||||
strcpy((char*) getPacket + numPacketBytes, keyString.toLocal8Bit().constData());
|
strcpy((char*) getPacket + numPacketBytes, keyString.toLocal8Bit().constData());
|
||||||
numPacketBytes += keyString.size() + sizeof('\0');
|
numPacketBytes += keyString.size() + sizeof('\0');
|
||||||
|
|
||||||
// add the getPacket to our vector of uncofirmed packets, will be deleted once we get a response from the nameserver
|
// add the getPacket to our vector of uncofirmed packets, will be deleted once we get a response from the nameserver
|
||||||
// _unmatchedPackets.insert(std::pair<unsigned char*, int>(getPacket, numPacketBytes));
|
// _unmatchedPackets.insert(std::pair<unsigned char*, int>(getPacket, numPacketBytes));
|
||||||
|
|
||||||
// send the get to the data server
|
// send the get to the data server
|
||||||
NodeList::getInstance()->getNodeSocket().writeDatagram((char*) getPacket, numPacketBytes,
|
NodeList::getInstance()->getNodeSocket().writeDatagram((char*) getPacket, numPacketBytes,
|
||||||
dataServerSockAddr().getAddress(),
|
dataServerSockAddr().getAddress(),
|
||||||
|
@ -120,25 +120,25 @@ void DataServerClient::processConfirmFromDataServer(unsigned char* packetData, i
|
||||||
void DataServerClient::processSendFromDataServer(unsigned char* packetData, int numPacketBytes) {
|
void DataServerClient::processSendFromDataServer(unsigned char* packetData, int numPacketBytes) {
|
||||||
// pull the user string from the packet so we know who to associate this with
|
// pull the user string from the packet so we know who to associate this with
|
||||||
int numHeaderBytes = numBytesForPacketHeader(packetData);
|
int numHeaderBytes = numBytesForPacketHeader(packetData);
|
||||||
|
|
||||||
char* userStringPosition = (char*) packetData + numHeaderBytes;
|
char* userStringPosition = (char*) packetData + numHeaderBytes;
|
||||||
|
|
||||||
QString userString(QByteArray(userStringPosition, strlen(userStringPosition)));
|
QString userString(QByteArray(userStringPosition, strlen(userStringPosition)));
|
||||||
|
|
||||||
QUuid userUUID(userString);
|
QUuid userUUID(userString);
|
||||||
|
|
||||||
char* keysPosition = (char*) packetData + numHeaderBytes + strlen(userStringPosition)
|
char* keysPosition = (char*) packetData + numHeaderBytes + strlen(userStringPosition)
|
||||||
+ sizeof('\0') + sizeof(unsigned char);
|
+ sizeof('\0') + sizeof(unsigned char);
|
||||||
char* valuesPosition = keysPosition + strlen(keysPosition) + sizeof('\0');
|
char* valuesPosition = keysPosition + strlen(keysPosition) + sizeof('\0');
|
||||||
|
|
||||||
QStringList keyList = QString(keysPosition).split(MULTI_KEY_VALUE_SEPARATOR);
|
QStringList keyList = QString(keysPosition).split(MULTI_KEY_VALUE_SEPARATOR);
|
||||||
QStringList valueList = QString(valuesPosition).split(MULTI_KEY_VALUE_SEPARATOR);
|
QStringList valueList = QString(valuesPosition).split(MULTI_KEY_VALUE_SEPARATOR);
|
||||||
|
|
||||||
// user string was UUID, find matching avatar and associate data
|
// user string was UUID, find matching avatar and associate data
|
||||||
for (int i = 0; i < keyList.size(); i++) {
|
for (size_t i = 0; i < keyList.size(); i++) {
|
||||||
if (valueList[i] != " ") {
|
if (valueList[i] != " ") {
|
||||||
if (keyList[i] == DataServerKey::FaceMeshURL) {
|
if (keyList[i] == DataServerKey::FaceMeshURL) {
|
||||||
|
|
||||||
if (userUUID.isNull() || userUUID == Application::getInstance()->getProfile()->getUUID()) {
|
if (userUUID.isNull() || userUUID == Application::getInstance()->getProfile()->getUUID()) {
|
||||||
qDebug("Changing user's face model URL to %s\n", valueList[i].toLocal8Bit().constData());
|
qDebug("Changing user's face model URL to %s\n", valueList[i].toLocal8Bit().constData());
|
||||||
Application::getInstance()->getProfile()->setFaceModelURL(QUrl(valueList[i]));
|
Application::getInstance()->getProfile()->setFaceModelURL(QUrl(valueList[i]));
|
||||||
|
@ -148,7 +148,7 @@ void DataServerClient::processSendFromDataServer(unsigned char* packetData, int
|
||||||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||||
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
||||||
Avatar* avatar = (Avatar *) node->getLinkedData();
|
Avatar* avatar = (Avatar *) node->getLinkedData();
|
||||||
|
|
||||||
if (avatar->getUUID() == userUUID) {
|
if (avatar->getUUID() == userUUID) {
|
||||||
QMetaObject::invokeMethod(&avatar->getHead().getFaceModel(),
|
QMetaObject::invokeMethod(&avatar->getHead().getFaceModel(),
|
||||||
"setURL", Q_ARG(QUrl, QUrl(valueList[i])));
|
"setURL", Q_ARG(QUrl, QUrl(valueList[i])));
|
||||||
|
@ -157,7 +157,7 @@ void DataServerClient::processSendFromDataServer(unsigned char* packetData, int
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (keyList[i] == DataServerKey::SkeletonURL) {
|
} else if (keyList[i] == DataServerKey::SkeletonURL) {
|
||||||
|
|
||||||
if (userUUID.isNull() || userUUID == Application::getInstance()->getProfile()->getUUID()) {
|
if (userUUID.isNull() || userUUID == Application::getInstance()->getProfile()->getUUID()) {
|
||||||
qDebug("Changing user's skeleton URL to %s\n", valueList[i].toLocal8Bit().constData());
|
qDebug("Changing user's skeleton URL to %s\n", valueList[i].toLocal8Bit().constData());
|
||||||
Application::getInstance()->getProfile()->setSkeletonModelURL(QUrl(valueList[i]));
|
Application::getInstance()->getProfile()->setSkeletonModelURL(QUrl(valueList[i]));
|
||||||
|
@ -167,7 +167,7 @@ void DataServerClient::processSendFromDataServer(unsigned char* packetData, int
|
||||||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||||
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
||||||
Avatar* avatar = (Avatar *) node->getLinkedData();
|
Avatar* avatar = (Avatar *) node->getLinkedData();
|
||||||
|
|
||||||
if (avatar->getUUID() == userUUID) {
|
if (avatar->getUUID() == userUUID) {
|
||||||
QMetaObject::invokeMethod(&avatar->getSkeletonModel(), "setURL",
|
QMetaObject::invokeMethod(&avatar->getSkeletonModel(), "setURL",
|
||||||
Q_ARG(QUrl, QUrl(valueList[i])));
|
Q_ARG(QUrl, QUrl(valueList[i])));
|
||||||
|
@ -177,33 +177,33 @@ void DataServerClient::processSendFromDataServer(unsigned char* packetData, int
|
||||||
}
|
}
|
||||||
} else if (keyList[i] == DataServerKey::Domain && keyList[i + 1] == DataServerKey::Position
|
} else if (keyList[i] == DataServerKey::Domain && keyList[i + 1] == DataServerKey::Position
|
||||||
&& valueList[i] != " " && valueList[i + 1] != " ") {
|
&& valueList[i] != " " && valueList[i + 1] != " ") {
|
||||||
|
|
||||||
QStringList coordinateItems = valueList[i + 1].split(',');
|
QStringList coordinateItems = valueList[i + 1].split(',');
|
||||||
|
|
||||||
if (coordinateItems.size() == 3) {
|
if (coordinateItems.size() == 3) {
|
||||||
|
|
||||||
// send a node kill request, indicating to other clients that they should play the "disappeared" effect
|
// send a node kill request, indicating to other clients that they should play the "disappeared" effect
|
||||||
NodeList::getInstance()->sendKillNode(&NODE_TYPE_AVATAR_MIXER, 1);
|
NodeList::getInstance()->sendKillNode(&NODE_TYPE_AVATAR_MIXER, 1);
|
||||||
|
|
||||||
qDebug() << "Changing domain to" << valueList[i].toLocal8Bit().constData() <<
|
qDebug() << "Changing domain to" << valueList[i].toLocal8Bit().constData() <<
|
||||||
"and position to" << valueList[i + 1].toLocal8Bit().constData() <<
|
"and position to" << valueList[i + 1].toLocal8Bit().constData() <<
|
||||||
"to go to" << userString << "\n";
|
"to go to" << userString << "\n";
|
||||||
|
|
||||||
NodeList::getInstance()->setDomainHostname(valueList[i]);
|
NodeList::getInstance()->setDomainHostname(valueList[i]);
|
||||||
|
|
||||||
glm::vec3 newPosition(coordinateItems[0].toFloat(),
|
glm::vec3 newPosition(coordinateItems[0].toFloat(),
|
||||||
coordinateItems[1].toFloat(),
|
coordinateItems[1].toFloat(),
|
||||||
coordinateItems[2].toFloat());
|
coordinateItems[2].toFloat());
|
||||||
Application::getInstance()->getAvatar()->setPosition(newPosition);
|
Application::getInstance()->getAvatar()->setPosition(newPosition);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (keyList[i] == DataServerKey::UUID) {
|
} else if (keyList[i] == DataServerKey::UUID) {
|
||||||
// this is the user's UUID - set it on the profile
|
// this is the user's UUID - set it on the profile
|
||||||
Application::getInstance()->getProfile()->setUUID(valueList[i]);
|
Application::getInstance()->getProfile()->setUUID(valueList[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove the matched packet from our map so it isn't re-sent to the data-server
|
// remove the matched packet from our map so it isn't re-sent to the data-server
|
||||||
// removeMatchedPacketFromMap(packetData, numPacketBytes);
|
// removeMatchedPacketFromMap(packetData, numPacketBytes);
|
||||||
}
|
}
|
||||||
|
@ -228,12 +228,12 @@ void DataServerClient::removeMatchedPacketFromMap(unsigned char* packetData, int
|
||||||
if (memcmp(mapIterator->first + sizeof(PACKET_TYPE),
|
if (memcmp(mapIterator->first + sizeof(PACKET_TYPE),
|
||||||
packetData + sizeof(PACKET_TYPE),
|
packetData + sizeof(PACKET_TYPE),
|
||||||
numPacketBytes - sizeof(PACKET_TYPE)) == 0) {
|
numPacketBytes - sizeof(PACKET_TYPE)) == 0) {
|
||||||
|
|
||||||
// this is a match - remove the confirmed packet from the vector and delete associated member
|
// this is a match - remove the confirmed packet from the vector and delete associated member
|
||||||
// so it isn't sent back out
|
// so it isn't sent back out
|
||||||
delete[] mapIterator->first;
|
delete[] mapIterator->first;
|
||||||
_unmatchedPackets.erase(mapIterator);
|
_unmatchedPackets.erase(mapIterator);
|
||||||
|
|
||||||
// we've matched the packet - bail out
|
// we've matched the packet - bail out
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -919,7 +919,7 @@ void MyAvatar::updateChatCircle(float deltaTime) {
|
||||||
|
|
||||||
// compute the accumulated centers
|
// compute the accumulated centers
|
||||||
glm::vec3 center = _position;
|
glm::vec3 center = _position;
|
||||||
for (int i = 0; i < sortedAvatars.size(); i++) {
|
for (size_t i = 0; i < sortedAvatars.size(); i++) {
|
||||||
SortedAvatar& sortedAvatar = sortedAvatars[i];
|
SortedAvatar& sortedAvatar = sortedAvatars[i];
|
||||||
sortedAvatar.accumulatedCenter = (center += sortedAvatar.avatar->getPosition()) / (i + 2.0f);
|
sortedAvatar.accumulatedCenter = (center += sortedAvatar.avatar->getPosition()) / (i + 2.0f);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,21 +21,21 @@ void SkeletonModel::simulate(float deltaTime) {
|
||||||
if (!isActive()) {
|
if (!isActive()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
setTranslation(_owningAvatar->getPosition());
|
setTranslation(_owningAvatar->getPosition());
|
||||||
setRotation(_owningAvatar->getOrientation() * glm::angleAxis(180.0f, 0.0f, 1.0f, 0.0f));
|
setRotation(_owningAvatar->getOrientation() * glm::angleAxis(180.0f, 0.0f, 1.0f, 0.0f));
|
||||||
const float MODEL_SCALE = 0.0006f;
|
const float MODEL_SCALE = 0.0006f;
|
||||||
setScale(glm::vec3(1.0f, 1.0f, 1.0f) * _owningAvatar->getScale() * MODEL_SCALE);
|
setScale(glm::vec3(1.0f, 1.0f, 1.0f) * _owningAvatar->getScale() * MODEL_SCALE);
|
||||||
|
|
||||||
Model::simulate(deltaTime);
|
Model::simulate(deltaTime);
|
||||||
|
|
||||||
// find the left and rightmost active Leap palms
|
// find the left and rightmost active Leap palms
|
||||||
int leftPalmIndex, rightPalmIndex;
|
int leftPalmIndex, rightPalmIndex;
|
||||||
HandData& hand = _owningAvatar->getHand();
|
HandData& hand = _owningAvatar->getHand();
|
||||||
hand.getLeftRightPalmIndices(leftPalmIndex, rightPalmIndex);
|
hand.getLeftRightPalmIndices(leftPalmIndex, rightPalmIndex);
|
||||||
|
|
||||||
const float HAND_RESTORATION_RATE = 0.25f;
|
const float HAND_RESTORATION_RATE = 0.25f;
|
||||||
|
|
||||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||||
if (leftPalmIndex == -1) {
|
if (leftPalmIndex == -1) {
|
||||||
// no Leap data; set hands from mouse
|
// no Leap data; set hands from mouse
|
||||||
|
@ -45,13 +45,13 @@ void SkeletonModel::simulate(float deltaTime) {
|
||||||
applyHandPosition(geometry.rightHandJointIndex, _owningAvatar->getHandPosition());
|
applyHandPosition(geometry.rightHandJointIndex, _owningAvatar->getHandPosition());
|
||||||
}
|
}
|
||||||
restoreLeftHandPosition(HAND_RESTORATION_RATE);
|
restoreLeftHandPosition(HAND_RESTORATION_RATE);
|
||||||
|
|
||||||
} else if (leftPalmIndex == rightPalmIndex) {
|
} else if (leftPalmIndex == rightPalmIndex) {
|
||||||
// right hand only
|
// right hand only
|
||||||
applyPalmData(geometry.rightHandJointIndex, geometry.rightFingerJointIndices, geometry.rightFingertipJointIndices,
|
applyPalmData(geometry.rightHandJointIndex, geometry.rightFingerJointIndices, geometry.rightFingertipJointIndices,
|
||||||
hand.getPalms()[leftPalmIndex]);
|
hand.getPalms()[leftPalmIndex]);
|
||||||
restoreLeftHandPosition(HAND_RESTORATION_RATE);
|
restoreLeftHandPosition(HAND_RESTORATION_RATE);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
applyPalmData(geometry.leftHandJointIndex, geometry.leftFingerJointIndices, geometry.leftFingertipJointIndices,
|
applyPalmData(geometry.leftHandJointIndex, geometry.leftFingerJointIndices, geometry.leftFingertipJointIndices,
|
||||||
hand.getPalms()[leftPalmIndex]);
|
hand.getPalms()[leftPalmIndex]);
|
||||||
|
@ -65,39 +65,39 @@ bool SkeletonModel::render(float alpha) {
|
||||||
if (_jointStates.isEmpty()) {
|
if (_jointStates.isEmpty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// only render the balls and sticks if the skeleton has no meshes
|
// only render the balls and sticks if the skeleton has no meshes
|
||||||
if (_meshStates.isEmpty()) {
|
if (_meshStates.isEmpty()) {
|
||||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||||
|
|
||||||
glm::vec3 skinColor, darkSkinColor;
|
glm::vec3 skinColor, darkSkinColor;
|
||||||
_owningAvatar->getSkinColors(skinColor, darkSkinColor);
|
_owningAvatar->getSkinColors(skinColor, darkSkinColor);
|
||||||
|
|
||||||
for (int i = 0; i < _jointStates.size(); i++) {
|
for (size_t i = 0; i < _jointStates.size(); i++) {
|
||||||
glPushMatrix();
|
glPushMatrix();
|
||||||
|
|
||||||
glm::vec3 position;
|
glm::vec3 position;
|
||||||
getJointPosition(i, position);
|
getJointPosition(i, position);
|
||||||
Application::getInstance()->loadTranslatedViewMatrix(position);
|
Application::getInstance()->loadTranslatedViewMatrix(position);
|
||||||
|
|
||||||
glm::quat rotation;
|
glm::quat rotation;
|
||||||
getJointRotation(i, rotation);
|
getJointRotation(i, rotation);
|
||||||
glm::vec3 axis = glm::axis(rotation);
|
glm::vec3 axis = glm::axis(rotation);
|
||||||
glRotatef(glm::angle(rotation), axis.x, axis.y, axis.z);
|
glRotatef(glm::angle(rotation), axis.x, axis.y, axis.z);
|
||||||
|
|
||||||
glColor4f(skinColor.r, skinColor.g, skinColor.b, alpha);
|
glColor4f(skinColor.r, skinColor.g, skinColor.b, alpha);
|
||||||
const float BALL_RADIUS = 0.005f;
|
const float BALL_RADIUS = 0.005f;
|
||||||
const int BALL_SUBDIVISIONS = 10;
|
const int BALL_SUBDIVISIONS = 10;
|
||||||
glutSolidSphere(BALL_RADIUS * _owningAvatar->getScale(), BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
|
glutSolidSphere(BALL_RADIUS * _owningAvatar->getScale(), BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
|
||||||
|
|
||||||
glPopMatrix();
|
glPopMatrix();
|
||||||
|
|
||||||
int parentIndex = geometry.joints[i].parentIndex;
|
int parentIndex = geometry.joints[i].parentIndex;
|
||||||
if (parentIndex == -1) {
|
if (parentIndex == -1) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
glColor4f(darkSkinColor.r, darkSkinColor.g, darkSkinColor.b, alpha);
|
glColor4f(darkSkinColor.r, darkSkinColor.g, darkSkinColor.b, alpha);
|
||||||
|
|
||||||
glm::vec3 parentPosition;
|
glm::vec3 parentPosition;
|
||||||
getJointPosition(parentIndex, parentPosition);
|
getJointPosition(parentIndex, parentPosition);
|
||||||
const float STICK_RADIUS = BALL_RADIUS * 0.1f;
|
const float STICK_RADIUS = BALL_RADIUS * 0.1f;
|
||||||
|
@ -105,13 +105,13 @@ bool SkeletonModel::render(float alpha) {
|
||||||
STICK_RADIUS * _owningAvatar->getScale());
|
STICK_RADIUS * _owningAvatar->getScale());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Model::render(alpha);
|
Model::render(alpha);
|
||||||
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::CollisionProxies)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::CollisionProxies)) {
|
||||||
renderCollisionProxies(alpha);
|
renderCollisionProxies(alpha);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ void SkeletonModel::applyHandPosition(int jointIndex, const glm::vec3& position)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
setJointPosition(jointIndex, position);
|
setJointPosition(jointIndex, position);
|
||||||
|
|
||||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||||
glm::vec3 handPosition, elbowPosition;
|
glm::vec3 handPosition, elbowPosition;
|
||||||
getJointPosition(jointIndex, handPosition);
|
getJointPosition(jointIndex, handPosition);
|
||||||
|
@ -142,7 +142,7 @@ void SkeletonModel::applyHandPosition(int jointIndex, const glm::vec3& position)
|
||||||
}
|
}
|
||||||
glm::quat handRotation;
|
glm::quat handRotation;
|
||||||
getJointRotation(jointIndex, handRotation, true);
|
getJointRotation(jointIndex, handRotation, true);
|
||||||
|
|
||||||
// align hand with forearm
|
// align hand with forearm
|
||||||
float sign = (jointIndex == geometry.rightHandJointIndex) ? 1.0f : -1.0f;
|
float sign = (jointIndex == geometry.rightHandJointIndex) ? 1.0f : -1.0f;
|
||||||
applyRotationDelta(jointIndex, rotationBetween(handRotation * glm::vec3(-sign, 0.0f, 0.0f), forearmVector), false);
|
applyRotationDelta(jointIndex, rotationBetween(handRotation * glm::vec3(-sign, 0.0f, 0.0f), forearmVector), false);
|
||||||
|
@ -160,7 +160,7 @@ void SkeletonModel::applyPalmData(int jointIndex, const QVector<int>& fingerJoin
|
||||||
getJointRotation(jointIndex, palmRotation, true);
|
getJointRotation(jointIndex, palmRotation, true);
|
||||||
applyRotationDelta(jointIndex, rotationBetween(palmRotation * geometry.palmDirection, palm.getNormal()), false);
|
applyRotationDelta(jointIndex, rotationBetween(palmRotation * geometry.palmDirection, palm.getNormal()), false);
|
||||||
getJointRotation(jointIndex, palmRotation, true);
|
getJointRotation(jointIndex, palmRotation, true);
|
||||||
|
|
||||||
// sort the finger indices by raw x, get the average direction
|
// sort the finger indices by raw x, get the average direction
|
||||||
QVector<IndexValue> fingerIndices;
|
QVector<IndexValue> fingerIndices;
|
||||||
glm::vec3 direction;
|
glm::vec3 direction;
|
||||||
|
@ -175,7 +175,7 @@ void SkeletonModel::applyPalmData(int jointIndex, const QVector<int>& fingerJoin
|
||||||
fingerIndices.append(indexValue);
|
fingerIndices.append(indexValue);
|
||||||
}
|
}
|
||||||
qSort(fingerIndices.begin(), fingerIndices.end());
|
qSort(fingerIndices.begin(), fingerIndices.end());
|
||||||
|
|
||||||
// rotate palm according to average finger direction
|
// rotate palm according to average finger direction
|
||||||
float directionLength = glm::length(direction);
|
float directionLength = glm::length(direction);
|
||||||
const int MIN_ROTATION_FINGERS = 3;
|
const int MIN_ROTATION_FINGERS = 3;
|
||||||
|
@ -183,31 +183,31 @@ void SkeletonModel::applyPalmData(int jointIndex, const QVector<int>& fingerJoin
|
||||||
applyRotationDelta(jointIndex, rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), direction), false);
|
applyRotationDelta(jointIndex, rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), direction), false);
|
||||||
getJointRotation(jointIndex, palmRotation, true);
|
getJointRotation(jointIndex, palmRotation, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// no point in continuing if there are no fingers
|
// no point in continuing if there are no fingers
|
||||||
if (palm.getNumFingers() == 0 || fingerJointIndices.isEmpty()) {
|
if (palm.getNumFingers() == 0 || fingerJointIndices.isEmpty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// match them up as best we can
|
// match them up as best we can
|
||||||
float proportion = fingerIndices.size() / (float)fingerJointIndices.size();
|
float proportion = fingerIndices.size() / (float)fingerJointIndices.size();
|
||||||
for (int i = 0; i < fingerJointIndices.size(); i++) {
|
for (size_t i = 0; i < fingerJointIndices.size(); i++) {
|
||||||
int fingerIndex = fingerIndices.at(roundf(i * proportion)).index;
|
int fingerIndex = fingerIndices.at(roundf(i * proportion)).index;
|
||||||
glm::vec3 fingerVector = palm.getFingers()[fingerIndex].getTipPosition() -
|
glm::vec3 fingerVector = palm.getFingers()[fingerIndex].getTipPosition() -
|
||||||
palm.getFingers()[fingerIndex].getRootPosition();
|
palm.getFingers()[fingerIndex].getRootPosition();
|
||||||
|
|
||||||
int fingerJointIndex = fingerJointIndices.at(i);
|
int fingerJointIndex = fingerJointIndices.at(i);
|
||||||
int fingertipJointIndex = fingertipJointIndices.at(i);
|
int fingertipJointIndex = fingertipJointIndices.at(i);
|
||||||
glm::vec3 jointVector = extractTranslation(geometry.joints.at(fingertipJointIndex).bindTransform) -
|
glm::vec3 jointVector = extractTranslation(geometry.joints.at(fingertipJointIndex).bindTransform) -
|
||||||
extractTranslation(geometry.joints.at(fingerJointIndex).bindTransform);
|
extractTranslation(geometry.joints.at(fingerJointIndex).bindTransform);
|
||||||
|
|
||||||
setJointRotation(fingerJointIndex, rotationBetween(palmRotation * jointVector, fingerVector) * palmRotation, true);
|
setJointRotation(fingerJointIndex, rotationBetween(palmRotation * jointVector, fingerVector) * palmRotation, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SkeletonModel::updateJointState(int index) {
|
void SkeletonModel::updateJointState(int index) {
|
||||||
Model::updateJointState(index);
|
Model::updateJointState(index);
|
||||||
|
|
||||||
if (index == _geometry->getFBXGeometry().rootJointIndex) {
|
if (index == _geometry->getFBXGeometry().rootJointIndex) {
|
||||||
JointState& state = _jointStates[index];
|
JointState& state = _jointStates[index];
|
||||||
state.transform[3][0] = 0.0f;
|
state.transform[3][0] = 0.0f;
|
||||||
|
|
|
@ -32,7 +32,7 @@ Faceshift::Faceshift() :
|
||||||
_rightBlinkIndex(1),
|
_rightBlinkIndex(1),
|
||||||
_leftEyeOpenIndex(8),
|
_leftEyeOpenIndex(8),
|
||||||
_rightEyeOpenIndex(9),
|
_rightEyeOpenIndex(9),
|
||||||
_browDownLeftIndex(14),
|
_browDownLeftIndex(14),
|
||||||
_browDownRightIndex(15),
|
_browDownRightIndex(15),
|
||||||
_browUpCenterIndex(16),
|
_browUpCenterIndex(16),
|
||||||
_browUpLeftIndex(17),
|
_browUpLeftIndex(17),
|
||||||
|
@ -49,9 +49,9 @@ Faceshift::Faceshift() :
|
||||||
connect(&_tcpSocket, SIGNAL(connected()), SLOT(noteConnected()));
|
connect(&_tcpSocket, SIGNAL(connected()), SLOT(noteConnected()));
|
||||||
connect(&_tcpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(noteError(QAbstractSocket::SocketError)));
|
connect(&_tcpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(noteError(QAbstractSocket::SocketError)));
|
||||||
connect(&_tcpSocket, SIGNAL(readyRead()), SLOT(readFromSocket()));
|
connect(&_tcpSocket, SIGNAL(readyRead()), SLOT(readFromSocket()));
|
||||||
|
|
||||||
connect(&_udpSocket, SIGNAL(readyRead()), SLOT(readPendingDatagrams()));
|
connect(&_udpSocket, SIGNAL(readyRead()), SLOT(readPendingDatagrams()));
|
||||||
|
|
||||||
_udpSocket.bind(FACESHIFT_PORT);
|
_udpSocket.bind(FACESHIFT_PORT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,14 +67,14 @@ void Faceshift::update() {
|
||||||
// get the euler angles relative to the window
|
// get the euler angles relative to the window
|
||||||
glm::vec3 eulers = safeEulerAngles(_headRotation * glm::quat(glm::radians(glm::vec3(
|
glm::vec3 eulers = safeEulerAngles(_headRotation * glm::quat(glm::radians(glm::vec3(
|
||||||
(_eyeGazeLeftPitch + _eyeGazeRightPitch) / 2.0f, (_eyeGazeLeftYaw + _eyeGazeRightYaw) / 2.0f, 0.0f))));
|
(_eyeGazeLeftPitch + _eyeGazeRightPitch) / 2.0f, (_eyeGazeLeftYaw + _eyeGazeRightYaw) / 2.0f, 0.0f))));
|
||||||
|
|
||||||
// compute and subtract the long term average
|
// compute and subtract the long term average
|
||||||
const float LONG_TERM_AVERAGE_SMOOTHING = 0.999f;
|
const float LONG_TERM_AVERAGE_SMOOTHING = 0.999f;
|
||||||
if (!_longTermAverageInitialized) {
|
if (!_longTermAverageInitialized) {
|
||||||
_longTermAverageEyePitch = eulers.x;
|
_longTermAverageEyePitch = eulers.x;
|
||||||
_longTermAverageEyeYaw = eulers.y;
|
_longTermAverageEyeYaw = eulers.y;
|
||||||
_longTermAverageInitialized = true;
|
_longTermAverageInitialized = true;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
_longTermAverageEyePitch = glm::mix(eulers.x, _longTermAverageEyePitch, LONG_TERM_AVERAGE_SMOOTHING);
|
_longTermAverageEyePitch = glm::mix(eulers.x, _longTermAverageEyePitch, LONG_TERM_AVERAGE_SMOOTHING);
|
||||||
_longTermAverageEyeYaw = glm::mix(eulers.y, _longTermAverageEyeYaw, LONG_TERM_AVERAGE_SMOOTHING);
|
_longTermAverageEyeYaw = glm::mix(eulers.y, _longTermAverageEyeYaw, LONG_TERM_AVERAGE_SMOOTHING);
|
||||||
|
@ -107,7 +107,7 @@ void Faceshift::updateFakeCoefficients(float leftBlink, float rightBlink, float
|
||||||
void Faceshift::setTCPEnabled(bool enabled) {
|
void Faceshift::setTCPEnabled(bool enabled) {
|
||||||
if ((_tcpEnabled = enabled)) {
|
if ((_tcpEnabled = enabled)) {
|
||||||
connectSocket();
|
connectSocket();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
_tcpSocket.disconnectFromHost();
|
_tcpSocket.disconnectFromHost();
|
||||||
}
|
}
|
||||||
|
@ -118,7 +118,7 @@ void Faceshift::connectSocket() {
|
||||||
if (!_tcpRetryCount) {
|
if (!_tcpRetryCount) {
|
||||||
qDebug("Faceshift: Connecting...\n");
|
qDebug("Faceshift: Connecting...\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
_tcpSocket.connectToHost("localhost", FACESHIFT_PORT);
|
_tcpSocket.connectToHost("localhost", FACESHIFT_PORT);
|
||||||
_tracking = false;
|
_tracking = false;
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ void Faceshift::connectSocket() {
|
||||||
|
|
||||||
void Faceshift::noteConnected() {
|
void Faceshift::noteConnected() {
|
||||||
qDebug("Faceshift: Connected.\n");
|
qDebug("Faceshift: Connected.\n");
|
||||||
|
|
||||||
// request the list of blendshape names
|
// request the list of blendshape names
|
||||||
string message;
|
string message;
|
||||||
fsBinaryStream::encode_message(message, fsMsgSendBlendshapeNames());
|
fsBinaryStream::encode_message(message, fsMsgSendBlendshapeNames());
|
||||||
|
@ -176,7 +176,7 @@ void Faceshift::receive(const QByteArray& buffer) {
|
||||||
if ((_tracking = data.m_trackingSuccessful)) {
|
if ((_tracking = data.m_trackingSuccessful)) {
|
||||||
glm::quat newRotation = glm::quat(data.m_headRotation.w, -data.m_headRotation.x,
|
glm::quat newRotation = glm::quat(data.m_headRotation.w, -data.m_headRotation.x,
|
||||||
data.m_headRotation.y, -data.m_headRotation.z);
|
data.m_headRotation.y, -data.m_headRotation.z);
|
||||||
// Compute angular velocity of the head
|
// Compute angular velocity of the head
|
||||||
glm::quat r = newRotation * glm::inverse(_headRotation);
|
glm::quat r = newRotation * glm::inverse(_headRotation);
|
||||||
float theta = 2 * acos(r.w);
|
float theta = 2 * acos(r.w);
|
||||||
if (theta > EPSILON) {
|
if (theta > EPSILON) {
|
||||||
|
@ -187,7 +187,7 @@ void Faceshift::receive(const QByteArray& buffer) {
|
||||||
_headAngularVelocity = glm::vec3(0,0,0);
|
_headAngularVelocity = glm::vec3(0,0,0);
|
||||||
}
|
}
|
||||||
_headRotation = newRotation;
|
_headRotation = newRotation;
|
||||||
|
|
||||||
const float TRANSLATION_SCALE = 0.02f;
|
const float TRANSLATION_SCALE = 0.02f;
|
||||||
_headTranslation = glm::vec3(data.m_headTranslation.x, data.m_headTranslation.y,
|
_headTranslation = glm::vec3(data.m_headTranslation.x, data.m_headTranslation.y,
|
||||||
-data.m_headTranslation.z) * TRANSLATION_SCALE;
|
-data.m_headTranslation.z) * TRANSLATION_SCALE;
|
||||||
|
@ -196,17 +196,17 @@ void Faceshift::receive(const QByteArray& buffer) {
|
||||||
_eyeGazeRightPitch = -data.m_eyeGazeRightPitch;
|
_eyeGazeRightPitch = -data.m_eyeGazeRightPitch;
|
||||||
_eyeGazeRightYaw = data.m_eyeGazeRightYaw;
|
_eyeGazeRightYaw = data.m_eyeGazeRightYaw;
|
||||||
_blendshapeCoefficients = data.m_coeffs;
|
_blendshapeCoefficients = data.m_coeffs;
|
||||||
|
|
||||||
_lastTrackingStateReceived = usecTimestampNow();
|
_lastTrackingStateReceived = usecTimestampNow();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES: {
|
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES: {
|
||||||
const vector<string>& names = static_cast<fsMsgBlendshapeNames*>(msg.get())->blendshape_names();
|
const vector<string>& names = static_cast<fsMsgBlendshapeNames*>(msg.get())->blendshape_names();
|
||||||
for (int i = 0; i < names.size(); i++) {
|
for (size_t i = 0; i < names.size(); i++) {
|
||||||
if (names[i] == "EyeBlink_L") {
|
if (names[i] == "EyeBlink_L") {
|
||||||
_leftBlinkIndex = i;
|
_leftBlinkIndex = i;
|
||||||
|
|
||||||
} else if (names[i] == "EyeBlink_R") {
|
} else if (names[i] == "EyeBlink_R") {
|
||||||
_rightBlinkIndex = i;
|
_rightBlinkIndex = i;
|
||||||
|
|
||||||
|
@ -233,10 +233,10 @@ void Faceshift::receive(const QByteArray& buffer) {
|
||||||
|
|
||||||
} else if (names[i] == "JawOpen") {
|
} else if (names[i] == "JawOpen") {
|
||||||
_jawOpenIndex = i;
|
_jawOpenIndex = i;
|
||||||
|
|
||||||
} else if (names[i] == "MouthSmile_L") {
|
} else if (names[i] == "MouthSmile_L") {
|
||||||
_mouthSmileLeftIndex = i;
|
_mouthSmileLeftIndex = i;
|
||||||
|
|
||||||
} else if (names[i] == "MouthSmile_R") {
|
} else if (names[i] == "MouthSmile_R") {
|
||||||
_mouthSmileRightIndex = i;
|
_mouthSmileRightIndex = i;
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1251,7 +1251,7 @@ void OctreeElement::addDeleteHook(OctreeElementDeleteHook* hook) {
|
||||||
|
|
||||||
void OctreeElement::removeDeleteHook(OctreeElementDeleteHook* hook) {
|
void OctreeElement::removeDeleteHook(OctreeElementDeleteHook* hook) {
|
||||||
_deleteHooksLock.lockForWrite();
|
_deleteHooksLock.lockForWrite();
|
||||||
for (int i = 0; i < _deleteHooks.size(); i++) {
|
for (unsigned int i = 0; i < _deleteHooks.size(); i++) {
|
||||||
if (_deleteHooks[i] == hook) {
|
if (_deleteHooks[i] == hook) {
|
||||||
_deleteHooks.erase(_deleteHooks.begin() + i);
|
_deleteHooks.erase(_deleteHooks.begin() + i);
|
||||||
break;
|
break;
|
||||||
|
@ -1262,7 +1262,7 @@ void OctreeElement::removeDeleteHook(OctreeElementDeleteHook* hook) {
|
||||||
|
|
||||||
void OctreeElement::notifyDeleteHooks() {
|
void OctreeElement::notifyDeleteHooks() {
|
||||||
_deleteHooksLock.lockForRead();
|
_deleteHooksLock.lockForRead();
|
||||||
for (int i = 0; i < _deleteHooks.size(); i++) {
|
for (unsigned int i = 0; i < _deleteHooks.size(); i++) {
|
||||||
_deleteHooks[i]->elementDeleted(this);
|
_deleteHooks[i]->elementDeleted(this);
|
||||||
}
|
}
|
||||||
_deleteHooksLock.unlock();
|
_deleteHooksLock.unlock();
|
||||||
|
@ -1275,7 +1275,7 @@ void OctreeElement::addUpdateHook(OctreeElementUpdateHook* hook) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void OctreeElement::removeUpdateHook(OctreeElementUpdateHook* hook) {
|
void OctreeElement::removeUpdateHook(OctreeElementUpdateHook* hook) {
|
||||||
for (int i = 0; i < _updateHooks.size(); i++) {
|
for (unsigned int i = 0; i < _updateHooks.size(); i++) {
|
||||||
if (_updateHooks[i] == hook) {
|
if (_updateHooks[i] == hook) {
|
||||||
_updateHooks.erase(_updateHooks.begin() + i);
|
_updateHooks.erase(_updateHooks.begin() + i);
|
||||||
return;
|
return;
|
||||||
|
@ -1284,7 +1284,7 @@ void OctreeElement::removeUpdateHook(OctreeElementUpdateHook* hook) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void OctreeElement::notifyUpdateHooks() {
|
void OctreeElement::notifyUpdateHooks() {
|
||||||
for (int i = 0; i < _updateHooks.size(); i++) {
|
for (unsigned int i = 0; i < _updateHooks.size(); i++) {
|
||||||
_updateHooks[i]->elementUpdated(this);
|
_updateHooks[i]->elementUpdated(this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue