mirror of
https://github.com/overte-org/overte.git
synced 2025-04-07 17:52:26 +02:00
fix windows warnings
This commit is contained in:
parent
1358f68868
commit
825c1c8876
10 changed files with 459 additions and 459 deletions
|
@ -64,7 +64,7 @@ void attachNewBufferToNode(Node *newNode) {
|
|||
AudioMixer::AudioMixer(const unsigned char* dataBuffer, int numBytes) :
|
||||
ThreadedAssignment(dataBuffer, numBytes)
|
||||
{
|
||||
|
||||
|
||||
}
|
||||
|
||||
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
||||
|
@ -73,79 +73,79 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
float attenuationCoefficient = 1.0f;
|
||||
int numSamplesDelay = 0;
|
||||
float weakChannelAmplitudeRatio = 1.0f;
|
||||
|
||||
|
||||
const int PHASE_DELAY_AT_90 = 20;
|
||||
|
||||
|
||||
if (bufferToAdd != listeningNodeBuffer) {
|
||||
// if the two buffer pointers do not match then these are different buffers
|
||||
|
||||
|
||||
glm::vec3 listenerPosition = listeningNodeBuffer->getPosition();
|
||||
glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition();
|
||||
glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation());
|
||||
|
||||
|
||||
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
||||
float radius = 0.0f;
|
||||
|
||||
|
||||
if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) {
|
||||
InjectedAudioRingBuffer* injectedBuffer = (InjectedAudioRingBuffer*) bufferToAdd;
|
||||
radius = injectedBuffer->getRadius();
|
||||
attenuationCoefficient *= injectedBuffer->getAttenuationRatio();
|
||||
}
|
||||
|
||||
|
||||
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
||||
// this is either not a spherical source, or the listener is outside the sphere
|
||||
|
||||
|
||||
if (radius > 0) {
|
||||
// this is a spherical source - the distance used for the coefficient
|
||||
// needs to be the closest point on the boundary to the source
|
||||
|
||||
|
||||
// ovveride the distance to the node with the distance to the point on the
|
||||
// boundary of the sphere
|
||||
distanceSquareToSource -= (radius * radius);
|
||||
|
||||
|
||||
} else {
|
||||
// calculate the angle delivery for off-axis attenuation
|
||||
glm::vec3 rotatedListenerPosition = glm::inverse(bufferToAdd->getOrientation()) * relativePosition;
|
||||
|
||||
|
||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedListenerPosition));
|
||||
|
||||
|
||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||
|
||||
|
||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f));
|
||||
|
||||
|
||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||
attenuationCoefficient *= offAxisCoefficient;
|
||||
}
|
||||
|
||||
|
||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||
|
||||
|
||||
const float DISTANCE_SCALE = 2.5f;
|
||||
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
||||
const float DISTANCE_LOG_BASE = 2.5f;
|
||||
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
||||
|
||||
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
||||
DISTANCE_SCALE_LOG +
|
||||
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
||||
distanceCoefficient = std::min(1.0f, distanceCoefficient);
|
||||
|
||||
|
||||
// multiply the current attenuation coefficient by the distance coefficient
|
||||
attenuationCoefficient *= distanceCoefficient;
|
||||
|
||||
|
||||
// project the rotated source position vector onto the XZ plane
|
||||
rotatedSourcePosition.y = 0.0f;
|
||||
|
||||
|
||||
// produce an oriented angle about the y-axis
|
||||
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedSourcePosition),
|
||||
glm::vec3(0.0f, 1.0f, 0.0f));
|
||||
|
||||
|
||||
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
||||
|
||||
|
||||
// figure out the number of samples of delay and the ratio of the amplitude
|
||||
// in the weak channel for audio spatialization
|
||||
float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource)));
|
||||
|
@ -153,11 +153,11 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
||||
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
||||
|
||||
|
||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 2) {
|
||||
if ((s / 2) < numSamplesDelay) {
|
||||
// pull the earlier sample for the delayed channel
|
||||
|
@ -165,12 +165,12 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
_clientSamples[s + delayedChannelOffset] = glm::clamp(_clientSamples[s + delayedChannelOffset] + earlierSample,
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
|
||||
// pull the current sample for the good channel
|
||||
int16_t currentSample = (*bufferToAdd)[s / 2] * attenuationCoefficient;
|
||||
_clientSamples[s + goodChannelOffset] = glm::clamp(_clientSamples[s + goodChannelOffset] + currentSample,
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
|
||||
|
||||
if ((s / 2) + numSamplesDelay < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||
// place the current sample at the right spot in the delayed channel
|
||||
int16_t clampedSample = glm::clamp((int) (_clientSamples[s + (numSamplesDelay * 2) + delayedChannelOffset]
|
||||
|
@ -183,22 +183,22 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
|
||||
void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
|
||||
|
||||
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
|
||||
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_clientSamples, 0, sizeof(_clientSamples));
|
||||
|
||||
|
||||
// loop through all other nodes that have sufficient audio to mix
|
||||
for (NodeList::iterator otherNode = nodeList->begin(); otherNode != nodeList->end(); otherNode++) {
|
||||
if (otherNode->getLinkedData()) {
|
||||
|
||||
|
||||
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
||||
|
||||
|
||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||
for (int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) {
|
||||
for (unsigned int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) {
|
||||
PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];
|
||||
|
||||
|
||||
if ((*otherNode != *node
|
||||
|| otherNodeBuffer->shouldLoopbackForNode())
|
||||
&& otherNodeBuffer->willBeAddedToMix()) {
|
||||
|
@ -217,14 +217,14 @@ void AudioMixer::processDatagram(const QByteArray& dataByteArray, const HifiSock
|
|||
|| dataByteArray[0] == PACKET_TYPE_INJECT_AUDIO) {
|
||||
QUuid nodeUUID = QUuid::fromRfc4122(dataByteArray.mid(numBytesForPacketHeader((unsigned char*) dataByteArray.data()),
|
||||
NUM_BYTES_RFC4122_UUID));
|
||||
|
||||
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
|
||||
|
||||
Node* matchingNode = nodeList->nodeWithUUID(nodeUUID);
|
||||
|
||||
|
||||
if (matchingNode) {
|
||||
nodeList->updateNodeWithData(matchingNode, senderSockAddr, (unsigned char*) dataByteArray.data(), dataByteArray.size());
|
||||
|
||||
|
||||
if (!matchingNode->getActiveSocket()) {
|
||||
// we don't have an active socket for this node, but they're talking to us
|
||||
// this means they've heard from us and can reply, let's assume public is active
|
||||
|
@ -238,29 +238,29 @@ void AudioMixer::processDatagram(const QByteArray& dataByteArray, const HifiSock
|
|||
}
|
||||
|
||||
void AudioMixer::run() {
|
||||
|
||||
|
||||
commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NODE_TYPE_AUDIO_MIXER);
|
||||
|
||||
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
|
||||
|
||||
const char AUDIO_MIXER_NODE_TYPES_OF_INTEREST[2] = { NODE_TYPE_AGENT, NODE_TYPE_AUDIO_INJECTOR };
|
||||
nodeList->setNodeTypesOfInterest(AUDIO_MIXER_NODE_TYPES_OF_INTEREST, sizeof(AUDIO_MIXER_NODE_TYPES_OF_INTEREST));
|
||||
|
||||
|
||||
nodeList->linkedDataCreateCallback = attachNewBufferToNode;
|
||||
|
||||
|
||||
int nextFrame = 0;
|
||||
timeval startTime;
|
||||
|
||||
|
||||
gettimeofday(&startTime, NULL);
|
||||
|
||||
|
||||
int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MIXED_AUDIO);
|
||||
unsigned char clientPacket[NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader];
|
||||
populateTypeAndVersion(clientPacket, PACKET_TYPE_MIXED_AUDIO);
|
||||
|
||||
|
||||
while (!_isFinished) {
|
||||
|
||||
|
||||
QCoreApplication::processEvents();
|
||||
|
||||
|
||||
if (_isFinished) {
|
||||
break;
|
||||
}
|
||||
|
@ -270,33 +270,33 @@ void AudioMixer::run() {
|
|||
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||
if (node->getType() == NODE_TYPE_AGENT && node->getActiveSocket() && node->getLinkedData()
|
||||
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
|
||||
prepareMixForListeningNode(&(*node));
|
||||
|
||||
|
||||
memcpy(clientPacket + numBytesPacketHeader, _clientSamples, sizeof(_clientSamples));
|
||||
nodeList->getNodeSocket().writeDatagram((char*) clientPacket, sizeof(clientPacket),
|
||||
node->getActiveSocket()->getAddress(),
|
||||
node->getActiveSocket()->getPort());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// push forward the next output pointers for any audio buffers we used
|
||||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||
if (node->getLinkedData()) {
|
||||
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow();
|
||||
|
||||
|
||||
if (usecToSleep > 0) {
|
||||
usleep(usecToSleep);
|
||||
} else {
|
||||
qDebug("Took too much time, not sleeping!\n");
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,19 +14,19 @@
|
|||
#include "AudioMixerClientData.h"
|
||||
|
||||
AudioMixerClientData::~AudioMixerClientData() {
|
||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
||||
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||
// delete this attached PositionalAudioRingBuffer
|
||||
delete _ringBuffers[i];
|
||||
}
|
||||
}
|
||||
|
||||
AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
||||
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Microphone) {
|
||||
return (AvatarAudioRingBuffer*) _ringBuffers[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// no AvatarAudioRingBuffer found - return NULL
|
||||
return NULL;
|
||||
}
|
||||
|
@ -34,49 +34,49 @@ AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
|||
int AudioMixerClientData::parseData(unsigned char* packetData, int numBytes) {
|
||||
if (packetData[0] == PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO
|
||||
|| packetData[0] == PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO) {
|
||||
|
||||
|
||||
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
|
||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||
|
||||
|
||||
if (!avatarRingBuffer) {
|
||||
// we don't have an AvatarAudioRingBuffer yet, so add it
|
||||
avatarRingBuffer = new AvatarAudioRingBuffer();
|
||||
_ringBuffers.push_back(avatarRingBuffer);
|
||||
}
|
||||
|
||||
|
||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||
avatarRingBuffer->parseData(packetData, numBytes);
|
||||
} else {
|
||||
// this is injected audio
|
||||
|
||||
|
||||
// grab the stream identifier for this injected audio
|
||||
QByteArray rfcUUID = QByteArray((char*) packetData + numBytesForPacketHeader(packetData) + NUM_BYTES_RFC4122_UUID,
|
||||
NUM_BYTES_RFC4122_UUID);
|
||||
QUuid streamIdentifier = QUuid::fromRfc4122(rfcUUID);
|
||||
|
||||
|
||||
InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;
|
||||
|
||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
||||
|
||||
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||
if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector
|
||||
&& ((InjectedAudioRingBuffer*) _ringBuffers[i])->getStreamIdentifier() == streamIdentifier) {
|
||||
matchingInjectedRingBuffer = (InjectedAudioRingBuffer*) _ringBuffers[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!matchingInjectedRingBuffer) {
|
||||
// we don't have a matching injected audio ring buffer, so add it
|
||||
matchingInjectedRingBuffer = new InjectedAudioRingBuffer(streamIdentifier);
|
||||
_ringBuffers.push_back(matchingInjectedRingBuffer);
|
||||
}
|
||||
|
||||
|
||||
matchingInjectedRingBuffer->parseData(packetData, numBytes);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSamples) {
|
||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
||||
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||
if (_ringBuffers[i]->shouldBeAddedToMix(jitterBufferLengthSamples)) {
|
||||
// this is a ring buffer that is ready to go
|
||||
// set its flag so we know to push its buffer when all is said and done
|
||||
|
@ -86,13 +86,13 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSam
|
|||
}
|
||||
|
||||
void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
||||
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||
// this was a used buffer, push the output pointer forwards
|
||||
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
|
||||
|
||||
|
||||
if (audioBuffer->willBeAddedToMix()) {
|
||||
audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
|
||||
audioBuffer->setWillBeAddedToMix(false);
|
||||
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector
|
||||
&& audioBuffer->hasStarted() && audioBuffer->isStarved()) {
|
||||
|
|
|
@ -542,7 +542,7 @@ void DomainServer::prepopulateStaticAssignmentFile() {
|
|||
QStringList multiConfigList = multiConfig.split(";");
|
||||
|
||||
// read each config to a payload for a VS assignment
|
||||
for (int i = 0; i < multiConfigList.size(); i++) {
|
||||
for (unsigned int i = 0; i < multiConfigList.size(); i++) {
|
||||
QString config = multiConfigList.at(i);
|
||||
|
||||
qDebug("config[%d]=%s\n", i, config.toLocal8Bit().constData());
|
||||
|
@ -584,7 +584,7 @@ void DomainServer::prepopulateStaticAssignmentFile() {
|
|||
QStringList multiConfigList = multiConfig.split(";");
|
||||
|
||||
// read each config to a payload for a VS assignment
|
||||
for (int i = 0; i < multiConfigList.size(); i++) {
|
||||
for (unsigned int i = 0; i < multiConfigList.size(); i++) {
|
||||
QString config = multiConfigList.at(i);
|
||||
|
||||
qDebug("config[%d]=%s\n", i, config.toLocal8Bit().constData());
|
||||
|
|
|
@ -96,26 +96,26 @@ QAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
|||
kAudioObjectPropertyScopeGlobal,
|
||||
kAudioObjectPropertyElementMaster
|
||||
};
|
||||
|
||||
|
||||
if (mode == QAudio::AudioOutput) {
|
||||
propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
OSStatus getPropertyError = AudioObjectGetPropertyData(kAudioObjectSystemObject,
|
||||
&propertyAddress,
|
||||
0,
|
||||
NULL,
|
||||
&propertySize,
|
||||
&defaultDeviceID);
|
||||
|
||||
|
||||
if (!getPropertyError && propertySize) {
|
||||
CFStringRef deviceName = NULL;
|
||||
propertySize = sizeof(deviceName);
|
||||
propertyAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
|
||||
getPropertyError = AudioObjectGetPropertyData(defaultDeviceID, &propertyAddress, 0,
|
||||
NULL, &propertySize, &deviceName);
|
||||
|
||||
|
||||
if (!getPropertyError && propertySize) {
|
||||
// find a device in the list that matches the name we have and return it
|
||||
foreach(QAudioDeviceInfo audioDevice, QAudioDeviceInfo::availableDevices(mode)) {
|
||||
|
@ -127,7 +127,7 @@ QAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
|||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// fallback for failed lookup is the default device
|
||||
return (mode == QAudio::AudioInput) ? QAudioDeviceInfo::defaultInputDevice() : QAudioDeviceInfo::defaultOutputDevice();
|
||||
}
|
||||
|
@ -138,28 +138,28 @@ bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
|
|||
if (!audioDevice.isFormatSupported(desiredAudioFormat)) {
|
||||
qDebug() << "The desired format for audio I/O is" << desiredAudioFormat << "\n";
|
||||
qDebug() << "The desired audio format is not supported by this device.\n";
|
||||
|
||||
|
||||
if (desiredAudioFormat.channelCount() == 1) {
|
||||
adjustedAudioFormat = desiredAudioFormat;
|
||||
adjustedAudioFormat.setChannelCount(2);
|
||||
|
||||
|
||||
if (audioDevice.isFormatSupported(adjustedAudioFormat)) {
|
||||
return true;
|
||||
} else {
|
||||
adjustedAudioFormat.setChannelCount(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (audioDevice.supportedSampleRates().contains(SAMPLE_RATE * 2)) {
|
||||
// use 48, which is a sample downsample, upsample
|
||||
adjustedAudioFormat = desiredAudioFormat;
|
||||
adjustedAudioFormat.setSampleRate(SAMPLE_RATE * 2);
|
||||
|
||||
|
||||
// return the nearest in case it needs 2 channels
|
||||
adjustedAudioFormat = audioDevice.nearestFormat(adjustedAudioFormat);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
return false;
|
||||
} else {
|
||||
// set the adjustedAudioFormat to the desiredAudioFormat, since it will work
|
||||
|
@ -176,15 +176,15 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
|||
} else {
|
||||
float sourceToDestinationFactor = (sourceAudioFormat.sampleRate() / (float) destinationAudioFormat.sampleRate())
|
||||
* (sourceAudioFormat.channelCount() / (float) destinationAudioFormat.channelCount());
|
||||
|
||||
|
||||
// take into account the number of channels in source and destination
|
||||
// accomodate for the case where have an output with > 2 channels
|
||||
// this is the case with our HDMI capture
|
||||
|
||||
|
||||
if (sourceToDestinationFactor >= 2) {
|
||||
// we need to downsample from 48 to 24
|
||||
// for now this only supports a mono output - this would be the case for audio input
|
||||
|
||||
|
||||
for (int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
|
||||
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
|
||||
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
|
||||
|
@ -197,7 +197,7 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
|||
+ (sourceSamples[i + sourceAudioFormat.channelCount()] / 4);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} else {
|
||||
// upsample from 24 to 48
|
||||
// for now this only supports a stereo to stereo conversion - this is our case for network audio to output
|
||||
|
@ -205,10 +205,10 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
|||
int dtsSampleRateFactor = (destinationAudioFormat.sampleRate() / sourceAudioFormat.sampleRate());
|
||||
int sampleShift = destinationAudioFormat.channelCount() * dtsSampleRateFactor;
|
||||
int destinationToSourceFactor = (1 / sourceToDestinationFactor);
|
||||
|
||||
|
||||
for (int i = 0; i < numDestinationSamples; i += sampleShift) {
|
||||
sourceIndex = (i / destinationToSourceFactor);
|
||||
|
||||
|
||||
// fill the L/R channels and make the rest silent
|
||||
for (int j = i; j < i + sampleShift; j++) {
|
||||
if (j % destinationAudioFormat.channelCount() == 0) {
|
||||
|
@ -230,7 +230,7 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
|||
const int CALLBACK_ACCELERATOR_RATIO = 2;
|
||||
|
||||
void Audio::start() {
|
||||
|
||||
|
||||
// set up the desired audio format
|
||||
_desiredInputFormat.setSampleRate(SAMPLE_RATE);
|
||||
_desiredInputFormat.setSampleSize(16);
|
||||
|
@ -238,102 +238,102 @@ void Audio::start() {
|
|||
_desiredInputFormat.setSampleType(QAudioFormat::SignedInt);
|
||||
_desiredInputFormat.setByteOrder(QAudioFormat::LittleEndian);
|
||||
_desiredInputFormat.setChannelCount(1);
|
||||
|
||||
|
||||
_desiredOutputFormat = _desiredInputFormat;
|
||||
_desiredOutputFormat.setChannelCount(2);
|
||||
|
||||
|
||||
QAudioDeviceInfo inputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioInput);
|
||||
|
||||
|
||||
qDebug() << "The audio input device is" << inputDeviceInfo.deviceName() << "\n";
|
||||
|
||||
|
||||
if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) {
|
||||
qDebug() << "The format to be used for audio input is" << _inputFormat << "\n";
|
||||
|
||||
|
||||
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
|
||||
_numInputCallbackBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL * _inputFormat.channelCount()
|
||||
* (_inputFormat.sampleRate() / SAMPLE_RATE)
|
||||
/ CALLBACK_ACCELERATOR_RATIO;
|
||||
_audioInput->setBufferSize(_numInputCallbackBytes);
|
||||
|
||||
|
||||
QAudioDeviceInfo outputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioOutput);
|
||||
|
||||
|
||||
qDebug() << "The audio output device is" << outputDeviceInfo.deviceName() << "\n";
|
||||
|
||||
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
||||
qDebug() << "The format to be used for audio output is" << _outputFormat << "\n";
|
||||
|
||||
|
||||
_inputRingBuffer.resizeForFrameSize(_numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO / sizeof(int16_t));
|
||||
_inputDevice = _audioInput->start();
|
||||
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
|
||||
|
||||
|
||||
// setup our general output device for audio-mixer audio
|
||||
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||
_outputDevice = _audioOutput->start();
|
||||
|
||||
|
||||
// setup a loopback audio output device
|
||||
_loopbackAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||
|
||||
|
||||
gettimeofday(&_lastReceiveTime, NULL);
|
||||
}
|
||||
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
qDebug() << "Unable to set up audio I/O because of a problem with input or output formats.\n";
|
||||
}
|
||||
|
||||
void Audio::handleAudioInput() {
|
||||
static char monoAudioDataPacket[MAX_PACKET_SIZE];
|
||||
|
||||
|
||||
static int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO);
|
||||
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + NUM_BYTES_RFC4122_UUID;
|
||||
|
||||
|
||||
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes);
|
||||
|
||||
|
||||
static float inputToNetworkInputRatio = _numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO
|
||||
/ NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
|
||||
|
||||
|
||||
static int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio;
|
||||
|
||||
|
||||
QByteArray inputByteArray = _inputDevice->readAll();
|
||||
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio)) {
|
||||
// if this person wants local loopback add that to the locally injected audio
|
||||
|
||||
|
||||
if (!_loopbackOutputDevice) {
|
||||
// we didn't have the loopback output device going so set that up now
|
||||
_loopbackOutputDevice = _loopbackAudioOutput->start();
|
||||
}
|
||||
|
||||
|
||||
if (_inputFormat == _outputFormat) {
|
||||
_loopbackOutputDevice->write(inputByteArray);
|
||||
} else {
|
||||
static float loopbackOutputToInputRatio = (_outputFormat.sampleRate() / (float) _inputFormat.sampleRate())
|
||||
* (_outputFormat.channelCount() / _inputFormat.channelCount());
|
||||
|
||||
|
||||
QByteArray loopBackByteArray(inputByteArray.size() * loopbackOutputToInputRatio, 0);
|
||||
|
||||
|
||||
linearResampling((int16_t*) inputByteArray.data(), (int16_t*) loopBackByteArray.data(),
|
||||
inputByteArray.size() / sizeof(int16_t),
|
||||
loopBackByteArray.size() / sizeof(int16_t), _inputFormat, _outputFormat);
|
||||
|
||||
|
||||
_loopbackOutputDevice->write(loopBackByteArray);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
||||
|
||||
|
||||
while (_inputRingBuffer.samplesAvailable() > inputSamplesRequired) {
|
||||
|
||||
|
||||
int16_t inputAudioSamples[inputSamplesRequired];
|
||||
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
||||
|
||||
|
||||
// zero out the monoAudioSamples array and the locally injected audio
|
||||
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
||||
|
||||
// zero out the locally injected audio in preparation for audio procedural sounds
|
||||
memset(_localInjectedSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
||||
|
||||
if (!_muted) {
|
||||
// we aren't muted, downsample the input audio
|
||||
linearResampling((int16_t*) inputAudioSamples,
|
||||
|
@ -341,15 +341,15 @@ void Audio::handleAudioInput() {
|
|||
inputSamplesRequired,
|
||||
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||
_inputFormat, _desiredInputFormat);
|
||||
|
||||
|
||||
float loudness = 0;
|
||||
|
||||
|
||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||
loudness += fabsf(monoAudioSamples[i]);
|
||||
}
|
||||
|
||||
|
||||
_lastInputLoudness = loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
|
||||
|
||||
// add input data just written to the scope
|
||||
QMetaObject::invokeMethod(_scope, "addSamples", Qt::QueuedConnection,
|
||||
Q_ARG(QByteArray, QByteArray((char*) monoAudioSamples,
|
||||
|
@ -359,47 +359,47 @@ void Audio::handleAudioInput() {
|
|||
// our input loudness is 0, since we're muted
|
||||
_lastInputLoudness = 0;
|
||||
}
|
||||
|
||||
|
||||
// add procedural effects to the appropriate input samples
|
||||
addProceduralSounds(monoAudioSamples,
|
||||
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER);
|
||||
|
||||
|
||||
if (audioMixer && nodeList->getNodeActiveSocketOrPing(audioMixer)) {
|
||||
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
|
||||
|
||||
|
||||
glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition();
|
||||
glm::quat headOrientation = interfaceAvatar->getHead().getOrientation();
|
||||
|
||||
|
||||
// we need the amount of bytes in the buffer + 1 for type
|
||||
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
||||
|
||||
|
||||
PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)
|
||||
? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO : PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO;
|
||||
|
||||
|
||||
char* currentPacketPtr = monoAudioDataPacket + populateTypeAndVersion((unsigned char*) monoAudioDataPacket,
|
||||
packetType);
|
||||
|
||||
|
||||
// pack Source Data
|
||||
QByteArray rfcUUID = NodeList::getInstance()->getOwnerUUID().toRfc4122();
|
||||
memcpy(currentPacketPtr, rfcUUID.constData(), rfcUUID.size());
|
||||
currentPacketPtr += rfcUUID.size();
|
||||
|
||||
|
||||
// memcpy the three float positions
|
||||
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
|
||||
currentPacketPtr += (sizeof(headPosition));
|
||||
|
||||
|
||||
// memcpy our orientation
|
||||
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
||||
currentPacketPtr += sizeof(headOrientation);
|
||||
|
||||
|
||||
nodeList->getNodeSocket().writeDatagram(monoAudioDataPacket,
|
||||
NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes,
|
||||
audioMixer->getActiveSocket()->getAddress(),
|
||||
audioMixer->getActiveSocket()->getPort());
|
||||
|
||||
|
||||
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
|
||||
.updateValue(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes);
|
||||
}
|
||||
|
@ -409,18 +409,18 @@ void Audio::handleAudioInput() {
|
|||
void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
||||
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
|
||||
|
||||
|
||||
timeval currentReceiveTime;
|
||||
gettimeofday(¤tReceiveTime, NULL);
|
||||
_totalPacketsReceived++;
|
||||
|
||||
|
||||
double timeDiff = diffclock(&_lastReceiveTime, ¤tReceiveTime);
|
||||
|
||||
|
||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
||||
if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) {
|
||||
_stdev.addValue(timeDiff);
|
||||
}
|
||||
|
||||
|
||||
if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
|
||||
_measuredJitter = _stdev.getStDev();
|
||||
_stdev.reset();
|
||||
|
@ -432,17 +432,17 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
|||
setJitterBufferSamples(glm::clamp((int)newJitterBufferSamples, 0, MAX_JITTER_BUFFER_SAMPLES));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
_ringBuffer.parseData((unsigned char*) audioByteArray.data(), audioByteArray.size());
|
||||
|
||||
|
||||
static float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
||||
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
|
||||
|
||||
|
||||
static int numRequiredOutputSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO / networkOutputToOutputRatio;
|
||||
|
||||
|
||||
QByteArray outputBuffer;
|
||||
outputBuffer.resize(numRequiredOutputSamples * sizeof(int16_t));
|
||||
|
||||
|
||||
// if there is anything in the ring buffer, decide what to do
|
||||
if (_ringBuffer.samplesAvailable() > 0) {
|
||||
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
|
||||
|
@ -452,61 +452,61 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
|||
} else {
|
||||
// We are either already playing back, or we have enough audio to start playing back.
|
||||
_ringBuffer.setIsStarved(false);
|
||||
|
||||
|
||||
// copy the samples we'll resample from the ring buffer - this also
|
||||
// pushes the read pointer of the ring buffer forwards
|
||||
int16_t ringBufferSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
|
||||
_ringBuffer.readSamples(ringBufferSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
||||
|
||||
|
||||
// add the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL from each QByteArray
|
||||
// in our _localInjectionByteArrays QVector to the _localInjectedSamples
|
||||
|
||||
|
||||
// add to the output samples whatever is in the _localAudioOutput byte array
|
||||
// that lets this user hear sound effects and loopback (if enabled)
|
||||
|
||||
for (int b = 0; b < _localInjectionByteArrays.size(); b++) {
|
||||
|
||||
for (unsigned int b = 0; b < _localInjectionByteArrays.size(); b++) {
|
||||
QByteArray audioByteArray = _localInjectionByteArrays.at(b);
|
||||
|
||||
|
||||
int16_t* byteArraySamples = (int16_t*) audioByteArray.data();
|
||||
|
||||
|
||||
int samplesToRead = MIN(audioByteArray.size() / sizeof(int16_t),
|
||||
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
|
||||
for (int i = 0; i < samplesToRead; i++) {
|
||||
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + byteArraySamples[i],
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
|
||||
if (samplesToRead < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||
// there isn't anything left to inject from this byte array, remove it from the vector
|
||||
_localInjectionByteArrays.remove(b);
|
||||
} else {
|
||||
// pull out the bytes we just read for outputs
|
||||
audioByteArray.remove(0, samplesToRead * sizeof(int16_t));
|
||||
|
||||
|
||||
// still data left to read - replace the byte array in the QVector with the smaller one
|
||||
_localInjectionByteArrays.replace(b, audioByteArray);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||
ringBufferSamples[i * 2] = glm::clamp(ringBufferSamples[i * 2] + _localInjectedSamples[i],
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
ringBufferSamples[(i * 2) + 1] = glm::clamp(ringBufferSamples[(i * 2) + 1] + _localInjectedSamples[i],
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
|
||||
// copy the packet from the RB to the output
|
||||
linearResampling(ringBufferSamples,
|
||||
(int16_t*) outputBuffer.data(),
|
||||
NETWORK_BUFFER_LENGTH_SAMPLES_STEREO,
|
||||
numRequiredOutputSamples,
|
||||
_desiredOutputFormat, _outputFormat);
|
||||
|
||||
|
||||
if (_outputDevice) {
|
||||
|
||||
|
||||
_outputDevice->write(outputBuffer);
|
||||
|
||||
|
||||
// add output (@speakers) data just written to the scope
|
||||
QMetaObject::invokeMethod(_scope, "addSamples", Qt::QueuedConnection,
|
||||
Q_ARG(QByteArray, QByteArray((char*) ringBufferSamples,
|
||||
|
@ -514,7 +514,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
|||
Q_ARG(bool, true), Q_ARG(bool, false));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} else if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
||||
// we don't have any audio data left in the output buffer, and the ring buffer from
|
||||
// the network has nothing in it either - we just starved
|
||||
|
@ -522,9 +522,9 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
|||
_ringBuffer.setIsStarved(true);
|
||||
_numFramesDisplayStarve = 10;
|
||||
}
|
||||
|
||||
|
||||
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
||||
|
||||
|
||||
_lastReceiveTime = currentReceiveTime;
|
||||
}
|
||||
|
||||
|
@ -541,59 +541,59 @@ void Audio::render(int screenWidth, int screenHeight) {
|
|||
glLineWidth(2.0);
|
||||
glBegin(GL_LINES);
|
||||
glColor3f(1,1,1);
|
||||
|
||||
|
||||
int startX = 20.0;
|
||||
int currentX = startX;
|
||||
int topY = screenHeight - 40;
|
||||
int bottomY = screenHeight - 20;
|
||||
float frameWidth = 20.0;
|
||||
float halfY = topY + ((bottomY - topY) / 2.0);
|
||||
|
||||
|
||||
// draw the lines for the base of the ring buffer
|
||||
|
||||
|
||||
glVertex2f(currentX, topY);
|
||||
glVertex2f(currentX, bottomY);
|
||||
|
||||
|
||||
for (int i = 0; i < RING_BUFFER_LENGTH_FRAMES; i++) {
|
||||
glVertex2f(currentX, halfY);
|
||||
glVertex2f(currentX + frameWidth, halfY);
|
||||
currentX += frameWidth;
|
||||
|
||||
|
||||
glVertex2f(currentX, topY);
|
||||
glVertex2f(currentX, bottomY);
|
||||
}
|
||||
glEnd();
|
||||
|
||||
|
||||
// show a bar with the amount of audio remaining in ring buffer and output device
|
||||
// beyond the current playback
|
||||
|
||||
|
||||
int bytesLeftInAudioOutput = _audioOutput->bufferSize() - _audioOutput->bytesFree();
|
||||
float secondsLeftForAudioOutput = (bytesLeftInAudioOutput / sizeof(int16_t))
|
||||
/ ((float) _outputFormat.sampleRate() * _outputFormat.channelCount());
|
||||
float secondsLeftForRingBuffer = _ringBuffer.samplesAvailable()
|
||||
/ ((float) _desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
||||
float msLeftForAudioOutput = (secondsLeftForAudioOutput + secondsLeftForRingBuffer) * 1000;
|
||||
|
||||
|
||||
if (_numFramesDisplayStarve == 0) {
|
||||
glColor3f(0, 1, 0);
|
||||
} else {
|
||||
glColor3f(0.5 + (_numFramesDisplayStarve / 20.0f), 0, 0);
|
||||
_numFramesDisplayStarve--;
|
||||
}
|
||||
|
||||
|
||||
if (_averagedLatency == 0.0) {
|
||||
_averagedLatency = msLeftForAudioOutput;
|
||||
} else {
|
||||
_averagedLatency = 0.99f * _averagedLatency + 0.01f * (msLeftForAudioOutput);
|
||||
}
|
||||
|
||||
|
||||
glBegin(GL_QUADS);
|
||||
glVertex2f(startX, topY + 2);
|
||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth, topY + 2);
|
||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth, bottomY - 2);
|
||||
glVertex2f(startX, bottomY - 2);
|
||||
glEnd();
|
||||
|
||||
|
||||
// Show a yellow bar with the averaged msecs latency you are hearing (from time of packet receipt)
|
||||
glColor3f(1,1,0);
|
||||
glBegin(GL_QUADS);
|
||||
|
@ -602,13 +602,13 @@ void Audio::render(int screenWidth, int screenHeight) {
|
|||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth + 2, bottomY + 2);
|
||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 2, bottomY + 2);
|
||||
glEnd();
|
||||
|
||||
|
||||
char out[40];
|
||||
sprintf(out, "%3.0f\n", _averagedLatency);
|
||||
drawtext(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 10, topY - 9, 0.10, 0, 1, 0, out, 1,1,0);
|
||||
|
||||
|
||||
// Show a red bar with the 'start' point of one frame plus the jitter buffer
|
||||
|
||||
|
||||
glColor3f(1, 0, 0);
|
||||
int jitterBufferPels = (1.f + (float)getJitterBufferSamples()
|
||||
/ (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) * frameWidth;
|
||||
|
@ -620,14 +620,14 @@ void Audio::render(int screenWidth, int screenHeight) {
|
|||
} else {
|
||||
drawtext(startX, bottomY + 12, 0.10, 0, 1, 0, out, 1, 0, 0);
|
||||
}
|
||||
|
||||
|
||||
glBegin(GL_QUADS);
|
||||
glVertex2f(startX + jitterBufferPels - 2, topY - 2);
|
||||
glVertex2f(startX + jitterBufferPels + 2, topY - 2);
|
||||
glVertex2f(startX + jitterBufferPels + 2, bottomY + 2);
|
||||
glVertex2f(startX + jitterBufferPels - 2, bottomY + 2);
|
||||
glEnd();
|
||||
|
||||
|
||||
}
|
||||
renderToolIcon(screenHeight);
|
||||
}
|
||||
|
@ -638,12 +638,12 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
|||
const float MIN_AUDIBLE_VELOCITY = 0.1;
|
||||
const int VOLUME_BASELINE = 400;
|
||||
const float SOUND_PITCH = 8.f;
|
||||
|
||||
|
||||
float speed = glm::length(_lastVelocity);
|
||||
float volume = VOLUME_BASELINE * (1.f - speed / MAX_AUDIBLE_VELOCITY);
|
||||
|
||||
|
||||
float sample;
|
||||
|
||||
|
||||
// Travelling noise
|
||||
// Add a noise-modulated sinewave with volume that tapers off with speed increasing
|
||||
if ((speed > MIN_AUDIBLE_VELOCITY) && (speed < MAX_AUDIBLE_VELOCITY)) {
|
||||
|
@ -661,23 +661,23 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
|||
if (_collisionSoundMagnitude > COLLISION_SOUND_CUTOFF_LEVEL) {
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
t = (float) _proceduralEffectSample + (float) i;
|
||||
|
||||
|
||||
sample = sinf(t * _collisionSoundFrequency)
|
||||
+ sinf(t * _collisionSoundFrequency / DOWN_TWO_OCTAVES)
|
||||
+ sinf(t * _collisionSoundFrequency / DOWN_FOUR_OCTAVES * UP_MAJOR_FIFTH);
|
||||
sample *= _collisionSoundMagnitude * COLLISION_SOUND_MAX_VOLUME;
|
||||
|
||||
|
||||
int16_t collisionSample = (int16_t) sample;
|
||||
|
||||
|
||||
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + collisionSample,
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
|
||||
|
||||
_collisionSoundMagnitude *= _collisionSoundDuration;
|
||||
}
|
||||
}
|
||||
_proceduralEffectSample += numSamples;
|
||||
|
||||
|
||||
// Add a drum sound
|
||||
const float MAX_VOLUME = 32000.f;
|
||||
const float MAX_DURATION = 2.f;
|
||||
|
@ -690,13 +690,13 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
|||
sample = sinf(t * frequency);
|
||||
sample += ((randFloat() - 0.5f) * NOISE_MAGNITUDE);
|
||||
sample *= _drumSoundVolume * MAX_VOLUME;
|
||||
|
||||
|
||||
int16_t collisionSample = (int16_t) sample;
|
||||
|
||||
|
||||
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + collisionSample,
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
|
||||
|
||||
_drumSoundVolume *= (1.f - _drumSoundDecay);
|
||||
}
|
||||
_drumSoundSample += numSamples;
|
||||
|
@ -730,46 +730,46 @@ void Audio::handleAudioByteArray(const QByteArray& audioByteArray) {
|
|||
}
|
||||
|
||||
void Audio::renderToolIcon(int screenHeight) {
|
||||
|
||||
|
||||
_iconBounds = QRect(ICON_LEFT, screenHeight - BOTTOM_PADDING, ICON_SIZE, ICON_SIZE);
|
||||
glEnable(GL_TEXTURE_2D);
|
||||
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, _micTextureId);
|
||||
glColor3f(1, 1, 1);
|
||||
glBegin(GL_QUADS);
|
||||
|
||||
|
||||
glTexCoord2f(1, 1);
|
||||
glVertex2f(_iconBounds.left(), _iconBounds.top());
|
||||
|
||||
|
||||
glTexCoord2f(0, 1);
|
||||
glVertex2f(_iconBounds.right(), _iconBounds.top());
|
||||
|
||||
|
||||
glTexCoord2f(0, 0);
|
||||
glVertex2f(_iconBounds.right(), _iconBounds.bottom());
|
||||
|
||||
|
||||
glTexCoord2f(1, 0);
|
||||
glVertex2f(_iconBounds.left(), _iconBounds.bottom());
|
||||
|
||||
|
||||
glEnd();
|
||||
|
||||
|
||||
if (_muted) {
|
||||
glBindTexture(GL_TEXTURE_2D, _muteTextureId);
|
||||
glBegin(GL_QUADS);
|
||||
|
||||
|
||||
glTexCoord2f(1, 1);
|
||||
glVertex2f(_iconBounds.left(), _iconBounds.top());
|
||||
|
||||
|
||||
glTexCoord2f(0, 1);
|
||||
glVertex2f(_iconBounds.right(), _iconBounds.top());
|
||||
|
||||
|
||||
glTexCoord2f(0, 0);
|
||||
glVertex2f(_iconBounds.right(), _iconBounds.bottom());
|
||||
|
||||
|
||||
glTexCoord2f(1, 0);
|
||||
glVertex2f(_iconBounds.left(), _iconBounds.bottom());
|
||||
|
||||
|
||||
glEnd();
|
||||
}
|
||||
|
||||
|
||||
glDisable(GL_TEXTURE_2D);
|
||||
}
|
|
@ -34,33 +34,33 @@ const HifiSockAddr& DataServerClient::dataServerSockAddr() {
|
|||
void DataServerClient::putValueForKey(const QString& key, const char* value) {
|
||||
QString clientString = Application::getInstance()->getProfile()->getUserString();
|
||||
if (!clientString.isEmpty()) {
|
||||
|
||||
|
||||
unsigned char* putPacket = new unsigned char[MAX_PACKET_SIZE];
|
||||
|
||||
|
||||
// setup the header for this packet
|
||||
int numPacketBytes = populateTypeAndVersion(putPacket, PACKET_TYPE_DATA_SERVER_PUT);
|
||||
|
||||
|
||||
// pack the client UUID, null terminated
|
||||
memcpy(putPacket + numPacketBytes, clientString.toLocal8Bit().constData(), clientString.toLocal8Bit().size());
|
||||
numPacketBytes += clientString.toLocal8Bit().size();
|
||||
putPacket[numPacketBytes++] = '\0';
|
||||
|
||||
|
||||
// pack a 1 to designate that we are putting a single value
|
||||
putPacket[numPacketBytes++] = 1;
|
||||
|
||||
|
||||
// pack the key, null terminated
|
||||
strcpy((char*) putPacket + numPacketBytes, key.toLocal8Bit().constData());
|
||||
numPacketBytes += key.size();
|
||||
putPacket[numPacketBytes++] = '\0';
|
||||
|
||||
|
||||
// pack the value, null terminated
|
||||
strcpy((char*) putPacket + numPacketBytes, value);
|
||||
numPacketBytes += strlen(value);
|
||||
putPacket[numPacketBytes++] = '\0';
|
||||
|
||||
|
||||
// add the putPacket to our vector of unconfirmed packets, will be deleted once put is confirmed
|
||||
// _unmatchedPackets.insert(std::pair<unsigned char*, int>(putPacket, numPacketBytes));
|
||||
|
||||
|
||||
// send this put request to the data server
|
||||
NodeList::getInstance()->getNodeSocket().writeDatagram((char*) putPacket, numPacketBytes,
|
||||
dataServerSockAddr().getAddress(),
|
||||
|
@ -81,27 +81,27 @@ void DataServerClient::getValuesForKeysAndUUID(const QStringList& keys, const QU
|
|||
void DataServerClient::getValuesForKeysAndUserString(const QStringList& keys, const QString& userString) {
|
||||
if (!userString.isEmpty() && keys.size() <= UCHAR_MAX) {
|
||||
unsigned char* getPacket = new unsigned char[MAX_PACKET_SIZE];
|
||||
|
||||
|
||||
// setup the header for this packet
|
||||
int numPacketBytes = populateTypeAndVersion(getPacket, PACKET_TYPE_DATA_SERVER_GET);
|
||||
|
||||
|
||||
// pack the user string (could be username or UUID string), null-terminate
|
||||
memcpy(getPacket + numPacketBytes, userString.toLocal8Bit().constData(), userString.toLocal8Bit().size());
|
||||
numPacketBytes += userString.toLocal8Bit().size();
|
||||
getPacket[numPacketBytes++] = '\0';
|
||||
|
||||
|
||||
// pack one byte to designate the number of keys
|
||||
getPacket[numPacketBytes++] = keys.size();
|
||||
|
||||
|
||||
QString keyString = keys.join(MULTI_KEY_VALUE_SEPARATOR);
|
||||
|
||||
|
||||
// pack the key string, null terminated
|
||||
strcpy((char*) getPacket + numPacketBytes, keyString.toLocal8Bit().constData());
|
||||
numPacketBytes += keyString.size() + sizeof('\0');
|
||||
|
||||
|
||||
// add the getPacket to our vector of uncofirmed packets, will be deleted once we get a response from the nameserver
|
||||
// _unmatchedPackets.insert(std::pair<unsigned char*, int>(getPacket, numPacketBytes));
|
||||
|
||||
|
||||
// send the get to the data server
|
||||
NodeList::getInstance()->getNodeSocket().writeDatagram((char*) getPacket, numPacketBytes,
|
||||
dataServerSockAddr().getAddress(),
|
||||
|
@ -120,25 +120,25 @@ void DataServerClient::processConfirmFromDataServer(unsigned char* packetData, i
|
|||
void DataServerClient::processSendFromDataServer(unsigned char* packetData, int numPacketBytes) {
|
||||
// pull the user string from the packet so we know who to associate this with
|
||||
int numHeaderBytes = numBytesForPacketHeader(packetData);
|
||||
|
||||
|
||||
char* userStringPosition = (char*) packetData + numHeaderBytes;
|
||||
|
||||
|
||||
QString userString(QByteArray(userStringPosition, strlen(userStringPosition)));
|
||||
|
||||
|
||||
QUuid userUUID(userString);
|
||||
|
||||
|
||||
char* keysPosition = (char*) packetData + numHeaderBytes + strlen(userStringPosition)
|
||||
+ sizeof('\0') + sizeof(unsigned char);
|
||||
char* valuesPosition = keysPosition + strlen(keysPosition) + sizeof('\0');
|
||||
|
||||
|
||||
QStringList keyList = QString(keysPosition).split(MULTI_KEY_VALUE_SEPARATOR);
|
||||
QStringList valueList = QString(valuesPosition).split(MULTI_KEY_VALUE_SEPARATOR);
|
||||
|
||||
|
||||
// user string was UUID, find matching avatar and associate data
|
||||
for (int i = 0; i < keyList.size(); i++) {
|
||||
for (size_t i = 0; i < keyList.size(); i++) {
|
||||
if (valueList[i] != " ") {
|
||||
if (keyList[i] == DataServerKey::FaceMeshURL) {
|
||||
|
||||
|
||||
if (userUUID.isNull() || userUUID == Application::getInstance()->getProfile()->getUUID()) {
|
||||
qDebug("Changing user's face model URL to %s\n", valueList[i].toLocal8Bit().constData());
|
||||
Application::getInstance()->getProfile()->setFaceModelURL(QUrl(valueList[i]));
|
||||
|
@ -148,7 +148,7 @@ void DataServerClient::processSendFromDataServer(unsigned char* packetData, int
|
|||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
||||
Avatar* avatar = (Avatar *) node->getLinkedData();
|
||||
|
||||
|
||||
if (avatar->getUUID() == userUUID) {
|
||||
QMetaObject::invokeMethod(&avatar->getHead().getFaceModel(),
|
||||
"setURL", Q_ARG(QUrl, QUrl(valueList[i])));
|
||||
|
@ -157,7 +157,7 @@ void DataServerClient::processSendFromDataServer(unsigned char* packetData, int
|
|||
}
|
||||
}
|
||||
} else if (keyList[i] == DataServerKey::SkeletonURL) {
|
||||
|
||||
|
||||
if (userUUID.isNull() || userUUID == Application::getInstance()->getProfile()->getUUID()) {
|
||||
qDebug("Changing user's skeleton URL to %s\n", valueList[i].toLocal8Bit().constData());
|
||||
Application::getInstance()->getProfile()->setSkeletonModelURL(QUrl(valueList[i]));
|
||||
|
@ -167,7 +167,7 @@ void DataServerClient::processSendFromDataServer(unsigned char* packetData, int
|
|||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
||||
Avatar* avatar = (Avatar *) node->getLinkedData();
|
||||
|
||||
|
||||
if (avatar->getUUID() == userUUID) {
|
||||
QMetaObject::invokeMethod(&avatar->getSkeletonModel(), "setURL",
|
||||
Q_ARG(QUrl, QUrl(valueList[i])));
|
||||
|
@ -177,33 +177,33 @@ void DataServerClient::processSendFromDataServer(unsigned char* packetData, int
|
|||
}
|
||||
} else if (keyList[i] == DataServerKey::Domain && keyList[i + 1] == DataServerKey::Position
|
||||
&& valueList[i] != " " && valueList[i + 1] != " ") {
|
||||
|
||||
|
||||
QStringList coordinateItems = valueList[i + 1].split(',');
|
||||
|
||||
|
||||
if (coordinateItems.size() == 3) {
|
||||
|
||||
|
||||
// send a node kill request, indicating to other clients that they should play the "disappeared" effect
|
||||
NodeList::getInstance()->sendKillNode(&NODE_TYPE_AVATAR_MIXER, 1);
|
||||
|
||||
|
||||
qDebug() << "Changing domain to" << valueList[i].toLocal8Bit().constData() <<
|
||||
"and position to" << valueList[i + 1].toLocal8Bit().constData() <<
|
||||
"to go to" << userString << "\n";
|
||||
|
||||
|
||||
NodeList::getInstance()->setDomainHostname(valueList[i]);
|
||||
|
||||
|
||||
glm::vec3 newPosition(coordinateItems[0].toFloat(),
|
||||
coordinateItems[1].toFloat(),
|
||||
coordinateItems[2].toFloat());
|
||||
Application::getInstance()->getAvatar()->setPosition(newPosition);
|
||||
}
|
||||
|
||||
|
||||
} else if (keyList[i] == DataServerKey::UUID) {
|
||||
// this is the user's UUID - set it on the profile
|
||||
Application::getInstance()->getProfile()->setUUID(valueList[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// remove the matched packet from our map so it isn't re-sent to the data-server
|
||||
// removeMatchedPacketFromMap(packetData, numPacketBytes);
|
||||
}
|
||||
|
@ -228,12 +228,12 @@ void DataServerClient::removeMatchedPacketFromMap(unsigned char* packetData, int
|
|||
if (memcmp(mapIterator->first + sizeof(PACKET_TYPE),
|
||||
packetData + sizeof(PACKET_TYPE),
|
||||
numPacketBytes - sizeof(PACKET_TYPE)) == 0) {
|
||||
|
||||
|
||||
// this is a match - remove the confirmed packet from the vector and delete associated member
|
||||
// so it isn't sent back out
|
||||
delete[] mapIterator->first;
|
||||
_unmatchedPackets.erase(mapIterator);
|
||||
|
||||
|
||||
// we've matched the packet - bail out
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -919,7 +919,7 @@ void MyAvatar::updateChatCircle(float deltaTime) {
|
|||
|
||||
// compute the accumulated centers
|
||||
glm::vec3 center = _position;
|
||||
for (int i = 0; i < sortedAvatars.size(); i++) {
|
||||
for (size_t i = 0; i < sortedAvatars.size(); i++) {
|
||||
SortedAvatar& sortedAvatar = sortedAvatars[i];
|
||||
sortedAvatar.accumulatedCenter = (center += sortedAvatar.avatar->getPosition()) / (i + 2.0f);
|
||||
}
|
||||
|
|
|
@ -21,21 +21,21 @@ void SkeletonModel::simulate(float deltaTime) {
|
|||
if (!isActive()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
setTranslation(_owningAvatar->getPosition());
|
||||
setRotation(_owningAvatar->getOrientation() * glm::angleAxis(180.0f, 0.0f, 1.0f, 0.0f));
|
||||
const float MODEL_SCALE = 0.0006f;
|
||||
setScale(glm::vec3(1.0f, 1.0f, 1.0f) * _owningAvatar->getScale() * MODEL_SCALE);
|
||||
|
||||
|
||||
Model::simulate(deltaTime);
|
||||
|
||||
// find the left and rightmost active Leap palms
|
||||
int leftPalmIndex, rightPalmIndex;
|
||||
HandData& hand = _owningAvatar->getHand();
|
||||
int leftPalmIndex, rightPalmIndex;
|
||||
HandData& hand = _owningAvatar->getHand();
|
||||
hand.getLeftRightPalmIndices(leftPalmIndex, rightPalmIndex);
|
||||
|
||||
|
||||
const float HAND_RESTORATION_RATE = 0.25f;
|
||||
|
||||
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
if (leftPalmIndex == -1) {
|
||||
// no Leap data; set hands from mouse
|
||||
|
@ -45,13 +45,13 @@ void SkeletonModel::simulate(float deltaTime) {
|
|||
applyHandPosition(geometry.rightHandJointIndex, _owningAvatar->getHandPosition());
|
||||
}
|
||||
restoreLeftHandPosition(HAND_RESTORATION_RATE);
|
||||
|
||||
|
||||
} else if (leftPalmIndex == rightPalmIndex) {
|
||||
// right hand only
|
||||
applyPalmData(geometry.rightHandJointIndex, geometry.rightFingerJointIndices, geometry.rightFingertipJointIndices,
|
||||
applyPalmData(geometry.rightHandJointIndex, geometry.rightFingerJointIndices, geometry.rightFingertipJointIndices,
|
||||
hand.getPalms()[leftPalmIndex]);
|
||||
restoreLeftHandPosition(HAND_RESTORATION_RATE);
|
||||
|
||||
|
||||
} else {
|
||||
applyPalmData(geometry.leftHandJointIndex, geometry.leftFingerJointIndices, geometry.leftFingertipJointIndices,
|
||||
hand.getPalms()[leftPalmIndex]);
|
||||
|
@ -65,39 +65,39 @@ bool SkeletonModel::render(float alpha) {
|
|||
if (_jointStates.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// only render the balls and sticks if the skeleton has no meshes
|
||||
if (_meshStates.isEmpty()) {
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
|
||||
|
||||
glm::vec3 skinColor, darkSkinColor;
|
||||
_owningAvatar->getSkinColors(skinColor, darkSkinColor);
|
||||
|
||||
for (int i = 0; i < _jointStates.size(); i++) {
|
||||
|
||||
for (size_t i = 0; i < _jointStates.size(); i++) {
|
||||
glPushMatrix();
|
||||
|
||||
|
||||
glm::vec3 position;
|
||||
getJointPosition(i, position);
|
||||
Application::getInstance()->loadTranslatedViewMatrix(position);
|
||||
|
||||
|
||||
glm::quat rotation;
|
||||
getJointRotation(i, rotation);
|
||||
glm::vec3 axis = glm::axis(rotation);
|
||||
glRotatef(glm::angle(rotation), axis.x, axis.y, axis.z);
|
||||
|
||||
|
||||
glColor4f(skinColor.r, skinColor.g, skinColor.b, alpha);
|
||||
const float BALL_RADIUS = 0.005f;
|
||||
const int BALL_SUBDIVISIONS = 10;
|
||||
glutSolidSphere(BALL_RADIUS * _owningAvatar->getScale(), BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
|
||||
|
||||
|
||||
glPopMatrix();
|
||||
|
||||
|
||||
int parentIndex = geometry.joints[i].parentIndex;
|
||||
if (parentIndex == -1) {
|
||||
continue;
|
||||
}
|
||||
glColor4f(darkSkinColor.r, darkSkinColor.g, darkSkinColor.b, alpha);
|
||||
|
||||
|
||||
glm::vec3 parentPosition;
|
||||
getJointPosition(parentIndex, parentPosition);
|
||||
const float STICK_RADIUS = BALL_RADIUS * 0.1f;
|
||||
|
@ -105,13 +105,13 @@ bool SkeletonModel::render(float alpha) {
|
|||
STICK_RADIUS * _owningAvatar->getScale());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Model::render(alpha);
|
||||
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::CollisionProxies)) {
|
||||
renderCollisionProxies(alpha);
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ void SkeletonModel::applyHandPosition(int jointIndex, const glm::vec3& position)
|
|||
return;
|
||||
}
|
||||
setJointPosition(jointIndex, position);
|
||||
|
||||
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
glm::vec3 handPosition, elbowPosition;
|
||||
getJointPosition(jointIndex, handPosition);
|
||||
|
@ -142,7 +142,7 @@ void SkeletonModel::applyHandPosition(int jointIndex, const glm::vec3& position)
|
|||
}
|
||||
glm::quat handRotation;
|
||||
getJointRotation(jointIndex, handRotation, true);
|
||||
|
||||
|
||||
// align hand with forearm
|
||||
float sign = (jointIndex == geometry.rightHandJointIndex) ? 1.0f : -1.0f;
|
||||
applyRotationDelta(jointIndex, rotationBetween(handRotation * glm::vec3(-sign, 0.0f, 0.0f), forearmVector), false);
|
||||
|
@ -160,7 +160,7 @@ void SkeletonModel::applyPalmData(int jointIndex, const QVector<int>& fingerJoin
|
|||
getJointRotation(jointIndex, palmRotation, true);
|
||||
applyRotationDelta(jointIndex, rotationBetween(palmRotation * geometry.palmDirection, palm.getNormal()), false);
|
||||
getJointRotation(jointIndex, palmRotation, true);
|
||||
|
||||
|
||||
// sort the finger indices by raw x, get the average direction
|
||||
QVector<IndexValue> fingerIndices;
|
||||
glm::vec3 direction;
|
||||
|
@ -175,7 +175,7 @@ void SkeletonModel::applyPalmData(int jointIndex, const QVector<int>& fingerJoin
|
|||
fingerIndices.append(indexValue);
|
||||
}
|
||||
qSort(fingerIndices.begin(), fingerIndices.end());
|
||||
|
||||
|
||||
// rotate palm according to average finger direction
|
||||
float directionLength = glm::length(direction);
|
||||
const int MIN_ROTATION_FINGERS = 3;
|
||||
|
@ -183,31 +183,31 @@ void SkeletonModel::applyPalmData(int jointIndex, const QVector<int>& fingerJoin
|
|||
applyRotationDelta(jointIndex, rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), direction), false);
|
||||
getJointRotation(jointIndex, palmRotation, true);
|
||||
}
|
||||
|
||||
|
||||
// no point in continuing if there are no fingers
|
||||
if (palm.getNumFingers() == 0 || fingerJointIndices.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// match them up as best we can
|
||||
float proportion = fingerIndices.size() / (float)fingerJointIndices.size();
|
||||
for (int i = 0; i < fingerJointIndices.size(); i++) {
|
||||
for (size_t i = 0; i < fingerJointIndices.size(); i++) {
|
||||
int fingerIndex = fingerIndices.at(roundf(i * proportion)).index;
|
||||
glm::vec3 fingerVector = palm.getFingers()[fingerIndex].getTipPosition() -
|
||||
palm.getFingers()[fingerIndex].getRootPosition();
|
||||
|
||||
|
||||
int fingerJointIndex = fingerJointIndices.at(i);
|
||||
int fingertipJointIndex = fingertipJointIndices.at(i);
|
||||
glm::vec3 jointVector = extractTranslation(geometry.joints.at(fingertipJointIndex).bindTransform) -
|
||||
extractTranslation(geometry.joints.at(fingerJointIndex).bindTransform);
|
||||
|
||||
|
||||
setJointRotation(fingerJointIndex, rotationBetween(palmRotation * jointVector, fingerVector) * palmRotation, true);
|
||||
}
|
||||
}
|
||||
|
||||
void SkeletonModel::updateJointState(int index) {
|
||||
Model::updateJointState(index);
|
||||
|
||||
|
||||
if (index == _geometry->getFBXGeometry().rootJointIndex) {
|
||||
JointState& state = _jointStates[index];
|
||||
state.transform[3][0] = 0.0f;
|
||||
|
|
|
@ -32,7 +32,7 @@ Faceshift::Faceshift() :
|
|||
_rightBlinkIndex(1),
|
||||
_leftEyeOpenIndex(8),
|
||||
_rightEyeOpenIndex(9),
|
||||
_browDownLeftIndex(14),
|
||||
_browDownLeftIndex(14),
|
||||
_browDownRightIndex(15),
|
||||
_browUpCenterIndex(16),
|
||||
_browUpLeftIndex(17),
|
||||
|
@ -49,9 +49,9 @@ Faceshift::Faceshift() :
|
|||
connect(&_tcpSocket, SIGNAL(connected()), SLOT(noteConnected()));
|
||||
connect(&_tcpSocket, SIGNAL(error(QAbstractSocket::SocketError)), SLOT(noteError(QAbstractSocket::SocketError)));
|
||||
connect(&_tcpSocket, SIGNAL(readyRead()), SLOT(readFromSocket()));
|
||||
|
||||
|
||||
connect(&_udpSocket, SIGNAL(readyRead()), SLOT(readPendingDatagrams()));
|
||||
|
||||
|
||||
_udpSocket.bind(FACESHIFT_PORT);
|
||||
}
|
||||
|
||||
|
@ -67,14 +67,14 @@ void Faceshift::update() {
|
|||
// get the euler angles relative to the window
|
||||
glm::vec3 eulers = safeEulerAngles(_headRotation * glm::quat(glm::radians(glm::vec3(
|
||||
(_eyeGazeLeftPitch + _eyeGazeRightPitch) / 2.0f, (_eyeGazeLeftYaw + _eyeGazeRightYaw) / 2.0f, 0.0f))));
|
||||
|
||||
|
||||
// compute and subtract the long term average
|
||||
const float LONG_TERM_AVERAGE_SMOOTHING = 0.999f;
|
||||
if (!_longTermAverageInitialized) {
|
||||
_longTermAverageEyePitch = eulers.x;
|
||||
_longTermAverageEyeYaw = eulers.y;
|
||||
_longTermAverageInitialized = true;
|
||||
|
||||
|
||||
} else {
|
||||
_longTermAverageEyePitch = glm::mix(eulers.x, _longTermAverageEyePitch, LONG_TERM_AVERAGE_SMOOTHING);
|
||||
_longTermAverageEyeYaw = glm::mix(eulers.y, _longTermAverageEyeYaw, LONG_TERM_AVERAGE_SMOOTHING);
|
||||
|
@ -107,7 +107,7 @@ void Faceshift::updateFakeCoefficients(float leftBlink, float rightBlink, float
|
|||
void Faceshift::setTCPEnabled(bool enabled) {
|
||||
if ((_tcpEnabled = enabled)) {
|
||||
connectSocket();
|
||||
|
||||
|
||||
} else {
|
||||
_tcpSocket.disconnectFromHost();
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ void Faceshift::connectSocket() {
|
|||
if (!_tcpRetryCount) {
|
||||
qDebug("Faceshift: Connecting...\n");
|
||||
}
|
||||
|
||||
|
||||
_tcpSocket.connectToHost("localhost", FACESHIFT_PORT);
|
||||
_tracking = false;
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ void Faceshift::connectSocket() {
|
|||
|
||||
void Faceshift::noteConnected() {
|
||||
qDebug("Faceshift: Connected.\n");
|
||||
|
||||
|
||||
// request the list of blendshape names
|
||||
string message;
|
||||
fsBinaryStream::encode_message(message, fsMsgSendBlendshapeNames());
|
||||
|
@ -176,7 +176,7 @@ void Faceshift::receive(const QByteArray& buffer) {
|
|||
if ((_tracking = data.m_trackingSuccessful)) {
|
||||
glm::quat newRotation = glm::quat(data.m_headRotation.w, -data.m_headRotation.x,
|
||||
data.m_headRotation.y, -data.m_headRotation.z);
|
||||
// Compute angular velocity of the head
|
||||
// Compute angular velocity of the head
|
||||
glm::quat r = newRotation * glm::inverse(_headRotation);
|
||||
float theta = 2 * acos(r.w);
|
||||
if (theta > EPSILON) {
|
||||
|
@ -187,7 +187,7 @@ void Faceshift::receive(const QByteArray& buffer) {
|
|||
_headAngularVelocity = glm::vec3(0,0,0);
|
||||
}
|
||||
_headRotation = newRotation;
|
||||
|
||||
|
||||
const float TRANSLATION_SCALE = 0.02f;
|
||||
_headTranslation = glm::vec3(data.m_headTranslation.x, data.m_headTranslation.y,
|
||||
-data.m_headTranslation.z) * TRANSLATION_SCALE;
|
||||
|
@ -196,17 +196,17 @@ void Faceshift::receive(const QByteArray& buffer) {
|
|||
_eyeGazeRightPitch = -data.m_eyeGazeRightPitch;
|
||||
_eyeGazeRightYaw = data.m_eyeGazeRightYaw;
|
||||
_blendshapeCoefficients = data.m_coeffs;
|
||||
|
||||
|
||||
_lastTrackingStateReceived = usecTimestampNow();
|
||||
}
|
||||
break;
|
||||
}
|
||||
case fsMsg::MSG_OUT_BLENDSHAPE_NAMES: {
|
||||
const vector<string>& names = static_cast<fsMsgBlendshapeNames*>(msg.get())->blendshape_names();
|
||||
for (int i = 0; i < names.size(); i++) {
|
||||
for (size_t i = 0; i < names.size(); i++) {
|
||||
if (names[i] == "EyeBlink_L") {
|
||||
_leftBlinkIndex = i;
|
||||
|
||||
|
||||
} else if (names[i] == "EyeBlink_R") {
|
||||
_rightBlinkIndex = i;
|
||||
|
||||
|
@ -233,10 +233,10 @@ void Faceshift::receive(const QByteArray& buffer) {
|
|||
|
||||
} else if (names[i] == "JawOpen") {
|
||||
_jawOpenIndex = i;
|
||||
|
||||
|
||||
} else if (names[i] == "MouthSmile_L") {
|
||||
_mouthSmileLeftIndex = i;
|
||||
|
||||
|
||||
} else if (names[i] == "MouthSmile_R") {
|
||||
_mouthSmileRightIndex = i;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1251,7 +1251,7 @@ void OctreeElement::addDeleteHook(OctreeElementDeleteHook* hook) {
|
|||
|
||||
void OctreeElement::removeDeleteHook(OctreeElementDeleteHook* hook) {
|
||||
_deleteHooksLock.lockForWrite();
|
||||
for (int i = 0; i < _deleteHooks.size(); i++) {
|
||||
for (unsigned int i = 0; i < _deleteHooks.size(); i++) {
|
||||
if (_deleteHooks[i] == hook) {
|
||||
_deleteHooks.erase(_deleteHooks.begin() + i);
|
||||
break;
|
||||
|
@ -1262,7 +1262,7 @@ void OctreeElement::removeDeleteHook(OctreeElementDeleteHook* hook) {
|
|||
|
||||
void OctreeElement::notifyDeleteHooks() {
|
||||
_deleteHooksLock.lockForRead();
|
||||
for (int i = 0; i < _deleteHooks.size(); i++) {
|
||||
for (unsigned int i = 0; i < _deleteHooks.size(); i++) {
|
||||
_deleteHooks[i]->elementDeleted(this);
|
||||
}
|
||||
_deleteHooksLock.unlock();
|
||||
|
@ -1275,7 +1275,7 @@ void OctreeElement::addUpdateHook(OctreeElementUpdateHook* hook) {
|
|||
}
|
||||
|
||||
void OctreeElement::removeUpdateHook(OctreeElementUpdateHook* hook) {
|
||||
for (int i = 0; i < _updateHooks.size(); i++) {
|
||||
for (unsigned int i = 0; i < _updateHooks.size(); i++) {
|
||||
if (_updateHooks[i] == hook) {
|
||||
_updateHooks.erase(_updateHooks.begin() + i);
|
||||
return;
|
||||
|
@ -1284,7 +1284,7 @@ void OctreeElement::removeUpdateHook(OctreeElementUpdateHook* hook) {
|
|||
}
|
||||
|
||||
void OctreeElement::notifyUpdateHooks() {
|
||||
for (int i = 0; i < _updateHooks.size(); i++) {
|
||||
for (unsigned int i = 0; i < _updateHooks.size(); i++) {
|
||||
_updateHooks[i]->elementUpdated(this);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue