mirror of
https://github.com/overte-org/overte.git
synced 2025-04-22 17:53:32 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into model_browsing
This commit is contained in:
commit
a10a3f6f29
50 changed files with 468 additions and 474 deletions
|
@ -40,13 +40,13 @@ bool waitForVoxelServer = true;
|
|||
const int ANIMATION_LISTEN_PORT = 40107;
|
||||
int ANIMATE_FPS = 60;
|
||||
double ANIMATE_FPS_IN_MILLISECONDS = 1000.0/ANIMATE_FPS; // determines FPS from our desired FPS
|
||||
int ANIMATE_VOXELS_INTERVAL_USECS = (ANIMATE_FPS_IN_MILLISECONDS * 1000.0); // converts from milliseconds to usecs
|
||||
quint64 ANIMATE_VOXELS_INTERVAL_USECS = (ANIMATE_FPS_IN_MILLISECONDS * 1000); // converts from milliseconds to usecs
|
||||
|
||||
|
||||
int PROCESSING_FPS = 60;
|
||||
double PROCESSING_FPS_IN_MILLISECONDS = 1000.0/PROCESSING_FPS; // determines FPS from our desired FPS
|
||||
int FUDGE_USECS = 650; // a little bit of fudge to actually do some processing
|
||||
int PROCESSING_INTERVAL_USECS = (PROCESSING_FPS_IN_MILLISECONDS * 1000.0) - FUDGE_USECS; // converts from milliseconds to usecs
|
||||
quint64 PROCESSING_INTERVAL_USECS = (PROCESSING_FPS_IN_MILLISECONDS * 1000) - FUDGE_USECS; // converts from milliseconds to usecs
|
||||
|
||||
bool wantLocalDomain = false;
|
||||
|
||||
|
@ -611,9 +611,6 @@ void* animateVoxels(void* args) {
|
|||
}
|
||||
lastProcessTime = usecTimestampNow();
|
||||
|
||||
int packetsStarting = 0;
|
||||
int packetsEnding = 0;
|
||||
|
||||
// The while loop will be running at PROCESSING_FPS, but we only want to call these animation functions at
|
||||
// ANIMATE_FPS. So we check out last animate time and only call these if we've elapsed that time.
|
||||
quint64 now = usecTimestampNow();
|
||||
|
@ -627,8 +624,6 @@ void* animateVoxels(void* args) {
|
|||
animateLoopsPerAnimate++;
|
||||
|
||||
lastAnimateTime = now;
|
||||
packetsStarting = ::voxelEditPacketSender->packetsToSendCount();
|
||||
|
||||
// some animations
|
||||
//sendVoxelBlinkMessage();
|
||||
|
||||
|
@ -652,8 +647,6 @@ void* animateVoxels(void* args) {
|
|||
doBuildStreet();
|
||||
}
|
||||
|
||||
packetsEnding = ::voxelEditPacketSender->packetsToSendCount();
|
||||
|
||||
if (animationElapsed > ANIMATE_VOXELS_INTERVAL_USECS) {
|
||||
animationElapsed -= ANIMATE_VOXELS_INTERVAL_USECS; // credit ourselves one animation frame
|
||||
} else {
|
||||
|
@ -670,9 +663,9 @@ void* animateVoxels(void* args) {
|
|||
processesPerAnimate++;
|
||||
}
|
||||
// dynamically sleep until we need to fire off the next set of voxels
|
||||
quint64 usecToSleep = PROCESSING_INTERVAL_USECS - (usecTimestampNow() - lastProcessTime);
|
||||
if (usecToSleep > PROCESSING_INTERVAL_USECS) {
|
||||
usecToSleep = PROCESSING_INTERVAL_USECS;
|
||||
quint64 usecToSleep = ::PROCESSING_INTERVAL_USECS - (usecTimestampNow() - lastProcessTime);
|
||||
if (usecToSleep > ::PROCESSING_INTERVAL_USECS) {
|
||||
usecToSleep = ::PROCESSING_INTERVAL_USECS;
|
||||
}
|
||||
|
||||
if (usecToSleep > 0) {
|
||||
|
@ -758,7 +751,7 @@ AnimationServer::AnimationServer(int &argc, char **argv) :
|
|||
}
|
||||
}
|
||||
printf("ANIMATE_FPS=%d\n",ANIMATE_FPS);
|
||||
printf("ANIMATE_VOXELS_INTERVAL_USECS=%d\n",ANIMATE_VOXELS_INTERVAL_USECS);
|
||||
printf("ANIMATE_VOXELS_INTERVAL_USECS=%llu\n",ANIMATE_VOXELS_INTERVAL_USECS);
|
||||
|
||||
const char* processingFPSCommand = getCmdOption(argc, (const char**) argv, "--ProcessingFPS");
|
||||
const char* processingIntervalCommand = getCmdOption(argc, (const char**) argv, "--ProcessingInterval");
|
||||
|
@ -774,7 +767,7 @@ AnimationServer::AnimationServer(int &argc, char **argv) :
|
|||
}
|
||||
}
|
||||
printf("PROCESSING_FPS=%d\n",PROCESSING_FPS);
|
||||
printf("PROCESSING_INTERVAL_USECS=%d\n",PROCESSING_INTERVAL_USECS);
|
||||
printf("PROCESSING_INTERVAL_USECS=%llu\n",PROCESSING_INTERVAL_USECS);
|
||||
|
||||
nodeList->linkedDataCreateCallback = NULL; // do we need a callback?
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#include <mmintrin.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <fstream>
|
||||
|
@ -61,10 +62,9 @@ void attachNewBufferToNode(Node *newNode) {
|
|||
}
|
||||
|
||||
AudioMixer::AudioMixer(const QByteArray& packet) :
|
||||
ThreadedAssignment(packet),
|
||||
_clientMixBuffer(NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio), 0)
|
||||
ThreadedAssignment(packet)
|
||||
{
|
||||
connect(NodeList::getInstance(), &NodeList::uuidChanged, this, &AudioMixer::receivedSessionUUID);
|
||||
|
||||
}
|
||||
|
||||
void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
||||
|
@ -73,13 +73,10 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
float attenuationCoefficient = 1.0f;
|
||||
int numSamplesDelay = 0;
|
||||
float weakChannelAmplitudeRatio = 1.0f;
|
||||
|
||||
const int PHASE_DELAY_AT_90 = 20;
|
||||
|
||||
|
||||
if (bufferToAdd != listeningNodeBuffer) {
|
||||
// if the two buffer pointers do not match then these are different buffers
|
||||
|
||||
glm::vec3 listenerPosition = listeningNodeBuffer->getPosition();
|
||||
glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition();
|
||||
glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation());
|
||||
|
||||
|
@ -149,7 +146,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
// figure out the number of samples of delay and the ratio of the amplitude
|
||||
// in the weak channel for audio spatialization
|
||||
float sinRatio = fabsf(sinf(bearingRelativeAngleToSource));
|
||||
numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio;
|
||||
numSamplesDelay = SAMPLE_PHASE_DELAY_AT_90 * sinRatio;
|
||||
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
||||
}
|
||||
}
|
||||
|
@ -157,26 +154,130 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
||||
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
||||
|
||||
const int16_t* nextOutputStart = bufferToAdd->getNextOutput();
|
||||
|
||||
const int16_t* bufferStart = bufferToAdd->getBuffer();
|
||||
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
|
||||
|
||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 2) {
|
||||
if ((s / 2) < numSamplesDelay) {
|
||||
// pull the earlier sample for the delayed channel
|
||||
int earlierSample = (*bufferToAdd)[(s / 2) - numSamplesDelay] * attenuationCoefficient * weakChannelAmplitudeRatio;
|
||||
_clientSamples[s + delayedChannelOffset] = glm::clamp(_clientSamples[s + delayedChannelOffset] + earlierSample,
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
int16_t correctBufferSample[2], delayBufferSample[2];
|
||||
int delayedChannelIndex = 0;
|
||||
|
||||
const int SINGLE_STEREO_OFFSET = 2;
|
||||
|
||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
||||
|
||||
// setup the int16_t variables for the two sample sets
|
||||
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
|
||||
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
|
||||
|
||||
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
|
||||
|
||||
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
|
||||
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
|
||||
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
|
||||
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
|
||||
_clientSamples[delayedChannelIndex],
|
||||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
|
||||
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
|
||||
delayBufferSample[0], delayBufferSample[1]);
|
||||
|
||||
// perform the MMX add (with saturation) of two correct and delayed samples
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
// assign the results from the result of the mmx arithmetic
|
||||
_clientSamples[s + goodChannelOffset] = shortResults[3];
|
||||
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
|
||||
_clientSamples[delayedChannelIndex] = shortResults[1];
|
||||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
|
||||
}
|
||||
|
||||
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
|
||||
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
|
||||
// Basically we try to take the samples in batches of four, and then handle the remainder
|
||||
// conditionally to get rid of the rest.
|
||||
|
||||
const int DOUBLE_STEREO_OFFSET = 4;
|
||||
const int TRIPLE_STEREO_OFFSET = 6;
|
||||
|
||||
if (numSamplesDelay > 0) {
|
||||
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
|
||||
// to stick at the beginning
|
||||
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
|
||||
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
|
||||
if (delayNextOutputStart < bufferStart) {
|
||||
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
|
||||
}
|
||||
|
||||
// pull the current sample for the good channel
|
||||
int16_t currentSample = (*bufferToAdd)[s / 2] * attenuationCoefficient;
|
||||
_clientSamples[s + goodChannelOffset] = glm::clamp(_clientSamples[s + goodChannelOffset] + currentSample,
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
|
||||
if ((s / 2) + numSamplesDelay < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||
// place the current sample at the right spot in the delayed channel
|
||||
int16_t clampedSample = glm::clamp((int) (_clientSamples[s + (numSamplesDelay * 2) + delayedChannelOffset]
|
||||
+ (currentSample * weakChannelAmplitudeRatio)),
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
_clientSamples[s + (numSamplesDelay * 2) + delayedChannelOffset] = clampedSample;
|
||||
|
||||
int i = 0;
|
||||
|
||||
while (i + 3 < numSamplesDelay) {
|
||||
// handle the first cases where we can MMX add four samples at once
|
||||
int parentIndex = i * 2;
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
|
||||
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
|
||||
|
||||
// push the index
|
||||
i += 4;
|
||||
}
|
||||
|
||||
int parentIndex = i * 2;
|
||||
|
||||
if (i + 2 < numSamplesDelay) {
|
||||
// MMX add only three delayed samples
|
||||
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
|
||||
0);
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
|
||||
|
||||
} else if (i + 1 < numSamplesDelay) {
|
||||
// MMX add two delayed samples
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
|
||||
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
|
||||
} else if (i < numSamplesDelay) {
|
||||
// MMX add a single delayed sample
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
|
||||
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -185,7 +286,7 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
|||
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_clientSamples, 0, sizeof(_clientSamples));
|
||||
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
|
||||
// loop through all other nodes that have sufficient audio to mix
|
||||
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
|
||||
|
@ -199,7 +300,8 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
|||
|
||||
if ((*otherNode != *node
|
||||
|| otherNodeBuffer->shouldLoopbackForNode())
|
||||
&& otherNodeBuffer->willBeAddedToMix()) {
|
||||
&& otherNodeBuffer->willBeAddedToMix()
|
||||
&& otherNodeClientData->getNextOutputLoudness() > 0) {
|
||||
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
||||
}
|
||||
}
|
||||
|
@ -219,7 +321,8 @@ void AudioMixer::readPendingDatagrams() {
|
|||
PacketType mixerPacketType = packetTypeForPacket(receivedPacket);
|
||||
if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho
|
||||
|| mixerPacketType == PacketTypeMicrophoneAudioWithEcho
|
||||
|| mixerPacketType == PacketTypeInjectAudio) {
|
||||
|| mixerPacketType == PacketTypeInjectAudio
|
||||
|| mixerPacketType == PacketTypeSilentAudioFrame) {
|
||||
|
||||
nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket);
|
||||
} else {
|
||||
|
@ -230,10 +333,6 @@ void AudioMixer::readPendingDatagrams() {
|
|||
}
|
||||
}
|
||||
|
||||
void AudioMixer::receivedSessionUUID(const QUuid& sessionUUID) {
|
||||
populatePacketHeader(_clientMixBuffer, PacketTypeMixedAudio);
|
||||
}
|
||||
|
||||
void AudioMixer::run() {
|
||||
|
||||
commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);
|
||||
|
@ -248,17 +347,12 @@ void AudioMixer::run() {
|
|||
timeval startTime;
|
||||
|
||||
gettimeofday(&startTime, NULL);
|
||||
|
||||
int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio);
|
||||
|
||||
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO
|
||||
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
|
||||
|
||||
while (!_isFinished) {
|
||||
|
||||
QCoreApplication::processEvents();
|
||||
|
||||
if (_isFinished) {
|
||||
break;
|
||||
}
|
||||
|
||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||
if (node->getLinkedData()) {
|
||||
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES);
|
||||
|
@ -269,9 +363,11 @@ void AudioMixer::run() {
|
|||
if (node->getType() == NodeType::Agent && node->getActiveSocket() && node->getLinkedData()
|
||||
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
|
||||
prepareMixForListeningNode(node.data());
|
||||
|
||||
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
||||
|
||||
memcpy(_clientMixBuffer.data() + numBytesPacketHeader, _clientSamples, sizeof(_clientSamples));
|
||||
nodeList->writeDatagram(_clientMixBuffer, node);
|
||||
memcpy(clientMixBuffer + numBytesPacketHeader, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
nodeList->writeDatagram(clientMixBuffer, NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader, node);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -281,6 +377,12 @@ void AudioMixer::run() {
|
|||
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
||||
}
|
||||
}
|
||||
|
||||
QCoreApplication::processEvents();
|
||||
|
||||
if (_isFinished) {
|
||||
break;
|
||||
}
|
||||
|
||||
int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow();
|
||||
|
||||
|
@ -291,4 +393,6 @@ void AudioMixer::run() {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
delete[] clientMixBuffer;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
class PositionalAudioRingBuffer;
|
||||
class AvatarAudioRingBuffer;
|
||||
|
||||
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
||||
|
||||
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
|
||||
class AudioMixer : public ThreadedAssignment {
|
||||
Q_OBJECT
|
||||
|
@ -26,8 +28,6 @@ public slots:
|
|||
void run();
|
||||
|
||||
void readPendingDatagrams();
|
||||
private slots:
|
||||
void receivedSessionUUID(const QUuid& sessionUUID);
|
||||
private:
|
||||
/// adds one buffer to the mix for a listening node
|
||||
void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd,
|
||||
|
@ -36,9 +36,8 @@ private:
|
|||
/// prepares and sends a mix to one Node
|
||||
void prepareMixForListeningNode(Node* node);
|
||||
|
||||
QByteArray _clientMixBuffer;
|
||||
|
||||
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
|
||||
// client samples capacity is larger than what will be sent to optimize mixing
|
||||
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + SAMPLE_PHASE_DELAY_AT_90];
|
||||
};
|
||||
|
||||
#endif /* defined(__hifi__AudioMixer__) */
|
||||
|
|
|
@ -13,6 +13,13 @@
|
|||
|
||||
#include "AudioMixerClientData.h"
|
||||
|
||||
AudioMixerClientData::AudioMixerClientData() :
|
||||
_ringBuffers(),
|
||||
_nextOutputLoudness(0)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
AudioMixerClientData::~AudioMixerClientData() {
|
||||
for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
|
||||
// delete this attached PositionalAudioRingBuffer
|
||||
|
@ -34,7 +41,8 @@ AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
|
|||
int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||
PacketType packetType = packetTypeForPacket(packet);
|
||||
if (packetType == PacketTypeMicrophoneAudioWithEcho
|
||||
|| packetType == PacketTypeMicrophoneAudioNoEcho) {
|
||||
|| packetType == PacketTypeMicrophoneAudioNoEcho
|
||||
|| packetType == PacketTypeSilentAudioFrame) {
|
||||
|
||||
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
|
||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||
|
@ -80,6 +88,10 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSam
|
|||
// this is a ring buffer that is ready to go
|
||||
// set its flag so we know to push its buffer when all is said and done
|
||||
_ringBuffers[i]->setWillBeAddedToMix(true);
|
||||
|
||||
// calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||
// that would be mixed in
|
||||
_nextOutputLoudness = _ringBuffers[i]->averageLoudnessForBoundarySamples(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,16 +18,20 @@
|
|||
|
||||
class AudioMixerClientData : public NodeData {
|
||||
public:
|
||||
AudioMixerClientData();
|
||||
~AudioMixerClientData();
|
||||
|
||||
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
|
||||
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
|
||||
|
||||
float getNextOutputLoudness() const { return _nextOutputLoudness; }
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
|
||||
void pushBuffersAfterFrameSend();
|
||||
private:
|
||||
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
|
||||
float _nextOutputLoudness;
|
||||
};
|
||||
|
||||
#endif /* defined(__hifi__AudioMixerClientData__) */
|
||||
|
|
|
@ -62,7 +62,8 @@ void broadcastAvatarData() {
|
|||
mixedAvatarByteArray.resize(numPacketHeaderBytes);
|
||||
|
||||
AvatarMixerClientData* myData = reinterpret_cast<AvatarMixerClientData*>(node->getLinkedData());
|
||||
glm::vec3 myPosition = myData->getPosition();
|
||||
AvatarData& avatar = myData->getAvatar();
|
||||
glm::vec3 myPosition = avatar.getPosition();
|
||||
|
||||
// this is an AGENT we have received head data from
|
||||
// send back a packet with other active node data to this node
|
||||
|
@ -70,7 +71,8 @@ void broadcastAvatarData() {
|
|||
if (otherNode->getLinkedData() && otherNode->getUUID() != node->getUUID()) {
|
||||
|
||||
AvatarMixerClientData* otherNodeData = reinterpret_cast<AvatarMixerClientData*>(otherNode->getLinkedData());
|
||||
glm::vec3 otherPosition = otherNodeData->getPosition();
|
||||
AvatarData& otherAvatar = otherNodeData->getAvatar();
|
||||
glm::vec3 otherPosition = otherAvatar.getPosition();
|
||||
float distanceToAvatar = glm::length(myPosition - otherPosition);
|
||||
// The full rate distance is the distance at which EVERY update will be sent for this avatar
|
||||
// at a distance of twice the full rate distance, there will be a 50% chance of sending this avatar's update
|
||||
|
@ -79,7 +81,7 @@ void broadcastAvatarData() {
|
|||
if ((distanceToAvatar == 0.f) || (randFloat() < FULL_RATE_DISTANCE / distanceToAvatar)) {
|
||||
QByteArray avatarByteArray;
|
||||
avatarByteArray.append(otherNode->getUUID().toRfc4122());
|
||||
avatarByteArray.append(otherNodeData->toByteArray());
|
||||
avatarByteArray.append(otherAvatar.toByteArray());
|
||||
|
||||
if (avatarByteArray.size() + mixedAvatarByteArray.size() > MAX_PACKET_SIZE) {
|
||||
nodeList->writeDatagram(mixedAvatarByteArray, node);
|
||||
|
@ -110,7 +112,8 @@ void broadcastIdentityPacket() {
|
|||
if (node->getLinkedData() && node->getType() == NodeType::Agent) {
|
||||
|
||||
AvatarMixerClientData* nodeData = reinterpret_cast<AvatarMixerClientData*>(node->getLinkedData());
|
||||
QByteArray individualData = nodeData->identityByteArray();
|
||||
AvatarData& avatar = nodeData->getAvatar();
|
||||
QByteArray individualData = avatar.identityByteArray();
|
||||
individualData.replace(0, NUM_BYTES_RFC4122_UUID, node->getUUID().toRfc4122());
|
||||
|
||||
if (avatarIdentityPacket.size() + individualData.size() > MAX_PACKET_SIZE) {
|
||||
|
@ -135,9 +138,10 @@ void broadcastIdentityPacket() {
|
|||
|
||||
void broadcastBillboardPacket(const SharedNodePointer& sendingNode) {
|
||||
AvatarMixerClientData* nodeData = static_cast<AvatarMixerClientData*>(sendingNode->getLinkedData());
|
||||
AvatarData& avatar = nodeData->getAvatar();
|
||||
QByteArray packet = byteArrayWithPopulatedHeader(PacketTypeAvatarBillboard);
|
||||
packet.append(sendingNode->getUUID().toRfc4122());
|
||||
packet.append(nodeData->getBillboard());
|
||||
packet.append(avatar.getBillboard());
|
||||
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||
|
@ -190,12 +194,13 @@ void AvatarMixer::readPendingDatagrams() {
|
|||
|
||||
if (avatarNode && avatarNode->getLinkedData()) {
|
||||
AvatarMixerClientData* nodeData = reinterpret_cast<AvatarMixerClientData*>(avatarNode->getLinkedData());
|
||||
if (nodeData->hasIdentityChangedAfterParsing(receivedPacket)
|
||||
AvatarData& avatar = nodeData->getAvatar();
|
||||
if (avatar.hasIdentityChangedAfterParsing(receivedPacket)
|
||||
&& !nodeData->hasSentIdentityBetweenKeyFrames()) {
|
||||
// this avatar changed their identity in some way and we haven't sent a packet in this keyframe
|
||||
QByteArray identityPacket = byteArrayWithPopulatedHeader(PacketTypeAvatarIdentity);
|
||||
|
||||
QByteArray individualByteArray = nodeData->identityByteArray();
|
||||
QByteArray individualByteArray = avatar.identityByteArray();
|
||||
individualByteArray.replace(0, NUM_BYTES_RFC4122_UUID, avatarNode->getUUID().toRfc4122());
|
||||
|
||||
identityPacket.append(individualByteArray);
|
||||
|
@ -213,7 +218,8 @@ void AvatarMixer::readPendingDatagrams() {
|
|||
|
||||
if (avatarNode && avatarNode->getLinkedData()) {
|
||||
AvatarMixerClientData* nodeData = static_cast<AvatarMixerClientData*>(avatarNode->getLinkedData());
|
||||
if (nodeData->hasBillboardChangedAfterParsing(receivedPacket)
|
||||
AvatarData& avatar = nodeData->getAvatar();
|
||||
if (avatar.hasBillboardChangedAfterParsing(receivedPacket)
|
||||
&& !nodeData->hasSentBillboardBetweenKeyFrames()) {
|
||||
// this avatar changed their billboard and we haven't sent a packet in this keyframe
|
||||
broadcastBillboardPacket(avatarNode);
|
||||
|
|
|
@ -9,8 +9,15 @@
|
|||
#include "AvatarMixerClientData.h"
|
||||
|
||||
AvatarMixerClientData::AvatarMixerClientData() :
|
||||
NodeData(),
|
||||
_hasSentIdentityBetweenKeyFrames(false),
|
||||
_hasSentBillboardBetweenKeyFrames(false)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
int AvatarMixerClientData::parseData(const QByteArray& packet) {
|
||||
// compute the offset to the data payload
|
||||
int offset = numBytesForPacketHeader(packet);
|
||||
return _avatar.parseDataAtOffset(packet, offset);
|
||||
}
|
||||
|
|
|
@ -12,11 +12,14 @@
|
|||
#include <QtCore/QUrl>
|
||||
|
||||
#include <AvatarData.h>
|
||||
#include <NodeData.h>
|
||||
|
||||
class AvatarMixerClientData : public AvatarData {
|
||||
class AvatarMixerClientData : public NodeData {
|
||||
Q_OBJECT
|
||||
public:
|
||||
AvatarMixerClientData();
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
|
||||
bool hasSentIdentityBetweenKeyFrames() const { return _hasSentIdentityBetweenKeyFrames; }
|
||||
void setHasSentIdentityBetweenKeyFrames(bool hasSentIdentityBetweenKeyFrames)
|
||||
|
@ -25,11 +28,14 @@ public:
|
|||
bool hasSentBillboardBetweenKeyFrames() const { return _hasSentBillboardBetweenKeyFrames; }
|
||||
void setHasSentBillboardBetweenKeyFrames(bool hasSentBillboardBetweenKeyFrames)
|
||||
{ _hasSentBillboardBetweenKeyFrames = hasSentBillboardBetweenKeyFrames; }
|
||||
|
||||
AvatarData& getAvatar() { return _avatar; }
|
||||
|
||||
private:
|
||||
|
||||
bool _hasSentIdentityBetweenKeyFrames;
|
||||
bool _hasSentBillboardBetweenKeyFrames;
|
||||
AvatarData _avatar;
|
||||
};
|
||||
|
||||
#endif /* defined(__hifi__AvatarMixerClientData__) */
|
||||
|
|
|
@ -156,7 +156,7 @@ void OctreeQueryNode::resetOctreePacket(bool lastWasSurpressed) {
|
|||
_octreePacketWaiting = false;
|
||||
}
|
||||
|
||||
void OctreeQueryNode::writeToPacket(const unsigned char* buffer, int bytes) {
|
||||
void OctreeQueryNode::writeToPacket(const unsigned char* buffer, unsigned int bytes) {
|
||||
// compressed packets include lead bytes which contain compressed size, this allows packing of
|
||||
// multiple compressed portions together
|
||||
if (_currentPacketIsCompressed) {
|
||||
|
|
|
@ -31,16 +31,16 @@ public:
|
|||
|
||||
void resetOctreePacket(bool lastWasSurpressed = false); // resets octree packet to after "V" header
|
||||
|
||||
void writeToPacket(const unsigned char* buffer, int bytes); // writes to end of packet
|
||||
void writeToPacket(const unsigned char* buffer, unsigned int bytes); // writes to end of packet
|
||||
|
||||
const unsigned char* getPacket() const { return _octreePacket; }
|
||||
int getPacketLength() const { return (MAX_PACKET_SIZE - _octreePacketAvailableBytes); }
|
||||
unsigned int getPacketLength() const { return (MAX_PACKET_SIZE - _octreePacketAvailableBytes); }
|
||||
bool isPacketWaiting() const { return _octreePacketWaiting; }
|
||||
|
||||
bool packetIsDuplicate() const;
|
||||
bool shouldSuppressDuplicatePacket();
|
||||
|
||||
int getAvailable() const { return _octreePacketAvailableBytes; }
|
||||
unsigned int getAvailable() const { return _octreePacketAvailableBytes; }
|
||||
int getMaxSearchLevel() const { return _maxSearchLevel; }
|
||||
void resetMaxSearchLevel() { _maxSearchLevel = 1; }
|
||||
void incrementMaxSearchLevel() { _maxSearchLevel++; }
|
||||
|
@ -92,11 +92,11 @@ private:
|
|||
bool _viewSent;
|
||||
unsigned char* _octreePacket;
|
||||
unsigned char* _octreePacketAt;
|
||||
int _octreePacketAvailableBytes;
|
||||
unsigned int _octreePacketAvailableBytes;
|
||||
bool _octreePacketWaiting;
|
||||
|
||||
unsigned char* _lastOctreePacket;
|
||||
int _lastOctreePacketLength;
|
||||
unsigned int _lastOctreePacketLength;
|
||||
int _duplicatePacketCount;
|
||||
quint64 _firstSuppressedPacket;
|
||||
|
||||
|
|
|
@ -54,15 +54,13 @@ bool OctreeSendThread::process() {
|
|||
|
||||
nodeData = (OctreeQueryNode*) node->getLinkedData();
|
||||
|
||||
int packetsSent = 0;
|
||||
|
||||
// Sometimes the node data has not yet been linked, in which case we can't really do anything
|
||||
if (nodeData) {
|
||||
bool viewFrustumChanged = nodeData->updateCurrentViewFrustum();
|
||||
if (_myServer->wantsDebugSending() && _myServer->wantsVerboseDebug()) {
|
||||
printf("nodeData->updateCurrentViewFrustum() changed=%s\n", debug::valueOf(viewFrustumChanged));
|
||||
}
|
||||
packetsSent = packetDistributor(node, nodeData, viewFrustumChanged);
|
||||
packetDistributor(node, nodeData, viewFrustumChanged);
|
||||
}
|
||||
} else {
|
||||
_nodeMissingCount++;
|
||||
|
@ -143,9 +141,10 @@ int OctreeSendThread::handlePacketSend(const SharedNodePointer& node, OctreeQuer
|
|||
// Send the stats message to the client
|
||||
unsigned char* statsMessage = nodeData->stats.getStatsMessage();
|
||||
int statsMessageLength = nodeData->stats.getStatsMessageLength();
|
||||
int piggyBackSize = nodeData->getPacketLength() + statsMessageLength;
|
||||
|
||||
// If the size of the stats message and the voxel message will fit in a packet, then piggyback them
|
||||
if (nodeData->getPacketLength() + statsMessageLength < MAX_PACKET_SIZE) {
|
||||
if (piggyBackSize < MAX_PACKET_SIZE) {
|
||||
|
||||
// copy voxel message to back of stats message
|
||||
memcpy(statsMessage + statsMessageLength, nodeData->getPacket(), nodeData->getPacketLength());
|
||||
|
@ -494,7 +493,7 @@ int OctreeSendThread::packetDistributor(const SharedNodePointer& node, OctreeQue
|
|||
// if for some reason the finalized size is greater than our available size, then probably the "compressed"
|
||||
// form actually inflated beyond our padding, and in this case we will send the current packet, then
|
||||
// write to out new packet...
|
||||
int writtenSize = _packetData.getFinalizedSize()
|
||||
unsigned int writtenSize = _packetData.getFinalizedSize()
|
||||
+ (nodeData->getCurrentPacketIsCompressed() ? sizeof(OCTREE_PACKET_INTERNAL_SECTION_SIZE) : 0);
|
||||
|
||||
|
||||
|
@ -508,7 +507,7 @@ int OctreeSendThread::packetDistributor(const SharedNodePointer& node, OctreeQue
|
|||
}
|
||||
|
||||
if (forceDebugging || (_myServer->wantsDebugSending() && _myServer->wantsVerboseDebug())) {
|
||||
qDebug(">>>>>> calling writeToPacket() available=%d compressedSize=%d uncompressedSize=%d target=%d",
|
||||
qDebug(">>>>>> calling writeToPacket() available=%d compressedSize=%d uncompressedSize=%d target=%u",
|
||||
nodeData->getAvailable(), _packetData.getFinalizedSize(),
|
||||
_packetData.getUncompressedSize(), _packetData.getTargetSize());
|
||||
}
|
||||
|
|
|
@ -160,7 +160,7 @@ void fsBinaryStream::received(long int sz, const char *data) {
|
|||
new_end = m_end + sz;
|
||||
}
|
||||
|
||||
if (new_end > Size(m_buffer.size())) m_buffer.resize(1.5*new_end);
|
||||
if (new_end > Size(m_buffer.size())) m_buffer.resize((int)(1.5f * (float)new_end)); // HIFI: to get 1.5 without warnings
|
||||
|
||||
memcpy(&m_buffer[0] + m_end, data, sz);
|
||||
m_end += sz;
|
||||
|
@ -172,7 +172,7 @@ static bool decodeInfo(fsTrackingData & _trackingData, const std::string &buffer
|
|||
success &= read_pod<double>(_trackingData.m_timestamp, buffer, start);
|
||||
unsigned char tracking_successfull = 0;
|
||||
success &= read_pod<unsigned char>( tracking_successfull, buffer, start );
|
||||
_trackingData.m_trackingSuccessful = bool(tracking_successfull);
|
||||
_trackingData.m_trackingSuccessful = bool(tracking_successfull != 0); // HIFI: get rid of windows warning
|
||||
return success;
|
||||
}
|
||||
|
||||
|
|
|
@ -436,9 +436,8 @@ void Audio::handleAudioInput() {
|
|||
}
|
||||
}
|
||||
if (!_noiseGateOpen) {
|
||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||
monoAudioSamples[i] = 0;
|
||||
}
|
||||
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
_lastInputLoudness = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -451,7 +450,7 @@ void Audio::handleAudioInput() {
|
|||
// our input loudness is 0, since we're muted
|
||||
_lastInputLoudness = 0;
|
||||
}
|
||||
|
||||
|
||||
// add procedural effects to the appropriate input samples
|
||||
addProceduralSounds(monoAudioSamples,
|
||||
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
@ -482,9 +481,26 @@ void Audio::handleAudioInput() {
|
|||
|
||||
// we need the amount of bytes in the buffer + 1 for type
|
||||
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
||||
|
||||
PacketType packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)
|
||||
? PacketTypeMicrophoneAudioWithEcho : PacketTypeMicrophoneAudioNoEcho;
|
||||
|
||||
int numAudioBytes = 0;
|
||||
|
||||
PacketType packetType;
|
||||
if (_lastInputLoudness == 0) {
|
||||
packetType = PacketTypeSilentAudioFrame;
|
||||
|
||||
// we need to indicate how many silent samples this is to the audio mixer
|
||||
monoAudioSamples[0] = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
numAudioBytes = sizeof(int16_t);
|
||||
|
||||
} else {
|
||||
numAudioBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
|
||||
packetType = PacketTypeMicrophoneAudioWithEcho;
|
||||
} else {
|
||||
packetType = PacketTypeMicrophoneAudioNoEcho;
|
||||
}
|
||||
}
|
||||
|
||||
char* currentPacketPtr = monoAudioDataPacket + populatePacketHeader(monoAudioDataPacket, packetType);
|
||||
|
||||
|
@ -495,13 +511,11 @@ void Audio::handleAudioInput() {
|
|||
// memcpy our orientation
|
||||
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
||||
currentPacketPtr += sizeof(headOrientation);
|
||||
|
||||
nodeList->writeDatagram(monoAudioDataPacket,
|
||||
NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes,
|
||||
audioMixer);
|
||||
|
||||
nodeList->writeDatagram(monoAudioDataPacket, numAudioBytes + leadingBytes, audioMixer);
|
||||
|
||||
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
|
||||
.updateValue(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes);
|
||||
.updateValue(numAudioBytes + leadingBytes);
|
||||
}
|
||||
delete[] inputAudioSamples;
|
||||
}
|
||||
|
@ -638,7 +652,14 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
|||
|
||||
int16_t collisionSample = (int16_t) sample;
|
||||
|
||||
_lastInputLoudness = 0;
|
||||
|
||||
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
|
||||
_lastInputLoudness += fabsf(monoInput[i]);
|
||||
_lastInputLoudness /= numSamples;
|
||||
_lastInputLoudness /= MAX_SAMPLE_VALUE;
|
||||
|
||||
_localProceduralSamples[i] = glm::clamp(_localProceduralSamples[i] + collisionSample,
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
|
||||
|
@ -662,7 +683,14 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
|||
|
||||
int16_t collisionSample = (int16_t) sample;
|
||||
|
||||
_lastInputLoudness = 0;
|
||||
|
||||
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
|
||||
_lastInputLoudness += fabsf(monoInput[i]);
|
||||
_lastInputLoudness /= numSamples;
|
||||
_lastInputLoudness /= MAX_SAMPLE_VALUE;
|
||||
|
||||
_localProceduralSamples[i] = glm::clamp(_localProceduralSamples[i] + collisionSample,
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
|
||||
|
|
|
@ -185,10 +185,11 @@ int MetavoxelSystem::SimulateVisitor::visit(MetavoxelInfo& info) {
|
|||
}
|
||||
QRgb color = info.inputValues.at(0).getInlineValue<QRgb>();
|
||||
QRgb normal = info.inputValues.at(1).getInlineValue<QRgb>();
|
||||
int alpha = qAlpha(color);
|
||||
quint8 alpha = qAlpha(color);
|
||||
if (alpha > 0) {
|
||||
Point point = { glm::vec4(info.minimum + glm::vec3(info.size, info.size, info.size) * 0.5f, info.size),
|
||||
{ qRed(color), qGreen(color), qBlue(color), alpha }, { qRed(normal), qGreen(normal), qBlue(normal) } };
|
||||
{ quint8(qRed(color)), quint8(qGreen(color)), quint8(qBlue(color)), alpha },
|
||||
{ quint8(qRed(normal)), quint8(qGreen(normal)), quint8(qBlue(normal)) } };
|
||||
_points.append(point);
|
||||
}
|
||||
return STOP_RECURSION;
|
||||
|
|
|
@ -535,7 +535,7 @@ void runTimingTests() {
|
|||
}
|
||||
gettimeofday(&endTime, NULL);
|
||||
elapsedMsecs = diffclock(&startTime, &endTime);
|
||||
qDebug("rand() stored in array usecs: %f", 1000.0f * elapsedMsecs / (float) numTests);
|
||||
qDebug("rand() stored in array usecs: %f, first result:%d", 1000.0f * elapsedMsecs / (float) numTests, iResults[0]);
|
||||
|
||||
// Random number generation using randFloat()
|
||||
gettimeofday(&startTime, NULL);
|
||||
|
@ -544,7 +544,7 @@ void runTimingTests() {
|
|||
}
|
||||
gettimeofday(&endTime, NULL);
|
||||
elapsedMsecs = diffclock(&startTime, &endTime);
|
||||
qDebug("randFloat() stored in array usecs: %f", 1000.0f * elapsedMsecs / (float) numTests);
|
||||
qDebug("randFloat() stored in array usecs: %f, first result: %f", 1000.0f * elapsedMsecs / (float) numTests, fResults[0]);
|
||||
|
||||
// PowF function
|
||||
fTest = 1145323.2342f;
|
||||
|
@ -567,8 +567,8 @@ void runTimingTests() {
|
|||
}
|
||||
gettimeofday(&endTime, NULL);
|
||||
elapsedMsecs = diffclock(&startTime, &endTime);
|
||||
qDebug("vector math usecs: %f [%f msecs total for %d tests]",
|
||||
1000.0f * elapsedMsecs / (float) numTests, elapsedMsecs, numTests);
|
||||
qDebug("vector math usecs: %f [%f msecs total for %d tests], last result:%f",
|
||||
1000.0f * elapsedMsecs / (float) numTests, elapsedMsecs, numTests, distance);
|
||||
|
||||
// Vec3 test
|
||||
glm::vec3 vecA(randVector()), vecB(randVector());
|
||||
|
@ -581,7 +581,7 @@ void runTimingTests() {
|
|||
}
|
||||
gettimeofday(&endTime, NULL);
|
||||
elapsedMsecs = diffclock(&startTime, &endTime);
|
||||
qDebug("vec3 assign and dot() usecs: %f", 1000.0f * elapsedMsecs / (float) numTests);
|
||||
qDebug("vec3 assign and dot() usecs: %f, last result:%f", 1000.0f * elapsedMsecs / (float) numTests, result);
|
||||
}
|
||||
|
||||
float loadSetting(QSettings* settings, const char* name, float defaultValue) {
|
||||
|
|
|
@ -243,7 +243,7 @@ VoxelSystem::~VoxelSystem() {
|
|||
|
||||
// This is called by the main application thread on both the initialization of the application and when
|
||||
// the preferences dialog box is called/saved
|
||||
void VoxelSystem::setMaxVoxels(int maxVoxels) {
|
||||
void VoxelSystem::setMaxVoxels(unsigned long maxVoxels) {
|
||||
if (maxVoxels == _maxVoxels) {
|
||||
return;
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ int VoxelSystem::parseData(const QByteArray& packet) {
|
|||
int flightTime = arrivedAt - sentAt;
|
||||
|
||||
OCTREE_PACKET_INTERNAL_SECTION_SIZE sectionLength = 0;
|
||||
int dataBytes = packet.size() - (numBytesPacketHeader + OCTREE_PACKET_EXTRA_HEADERS_SIZE);
|
||||
unsigned int dataBytes = packet.size() - (numBytesPacketHeader + OCTREE_PACKET_EXTRA_HEADERS_SIZE);
|
||||
|
||||
int subsection = 1;
|
||||
while (dataBytes > 0) {
|
||||
|
@ -576,7 +576,7 @@ int VoxelSystem::parseData(const QByteArray& packet) {
|
|||
packetData.loadFinalizedContent(dataAt, sectionLength);
|
||||
if (Application::getInstance()->getLogger()->extraDebugging()) {
|
||||
qDebug("VoxelSystem::parseData() ... Got Packet Section"
|
||||
" color:%s compressed:%s sequence: %u flight:%d usec size:%d data:%d"
|
||||
" color:%s compressed:%s sequence: %u flight:%d usec size:%d data:%u"
|
||||
" subsection:%d sectionLength:%d uncompressed:%d",
|
||||
debug::valueOf(packetIsColored), debug::valueOf(packetIsCompressed),
|
||||
sequence, flightTime, packet.size(), dataBytes, subsection, sectionLength,
|
||||
|
@ -919,13 +919,11 @@ void VoxelSystem::copyWrittenDataSegmentToReadArrays(glBufferIndex segmentStart,
|
|||
// Depending on if we're using per vertex normals, we will need more or less vertex points per voxel
|
||||
int vertexPointsPerVoxel = GLOBAL_NORMALS_VERTEX_POINTS_PER_VOXEL;
|
||||
|
||||
GLintptr segmentStartAt = segmentStart * vertexPointsPerVoxel * sizeof(GLfloat);
|
||||
GLsizeiptr segmentSizeBytes = segmentLength * vertexPointsPerVoxel * sizeof(GLfloat);
|
||||
GLfloat* readVerticesAt = _readVerticesArray + (segmentStart * vertexPointsPerVoxel);
|
||||
GLfloat* writeVerticesAt = _writeVerticesArray + (segmentStart * vertexPointsPerVoxel);
|
||||
memcpy(readVerticesAt, writeVerticesAt, segmentSizeBytes);
|
||||
|
||||
segmentStartAt = segmentStart * vertexPointsPerVoxel * sizeof(GLubyte);
|
||||
segmentSizeBytes = segmentLength * vertexPointsPerVoxel * sizeof(GLubyte);
|
||||
GLubyte* readColorsAt = _readColorsArray + (segmentStart * vertexPointsPerVoxel);
|
||||
GLubyte* writeColorsAt = _writeColorsArray + (segmentStart * vertexPointsPerVoxel);
|
||||
|
@ -1958,10 +1956,8 @@ bool VoxelSystem::showAllSubTreeOperation(OctreeElement* element, void* extraDat
|
|||
// If we've culled at least once, then we will use the status of this voxel in the last culled frustum to determine
|
||||
// how to proceed. If we've never culled, then we just consider all these voxels to be UNKNOWN so that we will not
|
||||
// consider that case.
|
||||
ViewFrustum::location inLastCulledFrustum;
|
||||
|
||||
if (args->culledOnce && args->wantDeltaFrustums) {
|
||||
inLastCulledFrustum = voxel->inFrustum(args->lastViewFrustum);
|
||||
ViewFrustum::location inLastCulledFrustum = voxel->inFrustum(args->lastViewFrustum);
|
||||
|
||||
// if this node is fully inside our last culled view frustum, then we don't need to recurse further
|
||||
if (inLastCulledFrustum == ViewFrustum::INSIDE) {
|
||||
|
@ -2001,7 +1997,7 @@ bool VoxelSystem::hideOutOfViewOperation(OctreeElement* element, void* extraData
|
|||
// If we've culled at least once, then we will use the status of this voxel in the last culled frustum to determine
|
||||
// how to proceed. If we've never culled, then we just consider all these voxels to be UNKNOWN so that we will not
|
||||
// consider that case.
|
||||
ViewFrustum::location inLastCulledFrustum;
|
||||
ViewFrustum::location inLastCulledFrustum = ViewFrustum::OUTSIDE; // assume outside, but should get reset to actual value
|
||||
|
||||
if (args->culledOnce && args->wantDeltaFrustums) {
|
||||
inLastCulledFrustum = voxel->inFrustum(args->lastViewFrustum);
|
||||
|
|
|
@ -67,7 +67,7 @@ public:
|
|||
|
||||
ViewFrustum* getLastCulledViewFrustum() { return &_lastCulledViewFrustum; }
|
||||
|
||||
void setMaxVoxels(int maxVoxels);
|
||||
void setMaxVoxels(unsigned long maxVoxels);
|
||||
long int getMaxVoxels() const { return _maxVoxels; }
|
||||
unsigned long getVoxelMemoryUsageRAM() const { return _memoryUsageRAM; }
|
||||
unsigned long getVoxelMemoryUsageVBO() const { return _memoryUsageVBO; }
|
||||
|
@ -263,7 +263,7 @@ private:
|
|||
bool _usePrimitiveRenderer; ///< Flag primitive renderer for use
|
||||
PrimitiveRenderer* _renderer; ///< Voxel renderer
|
||||
|
||||
static const int _sNumOctantsPerHemiVoxel = 4;
|
||||
static const unsigned int _sNumOctantsPerHemiVoxel = 4;
|
||||
static int _sCorrectedChildIndex[8];
|
||||
static unsigned short _sSwizzledOcclusionBits[64]; ///< Swizzle value of bit pairs of the value of index
|
||||
static unsigned char _sOctantIndexToBitMask[8]; ///< Map octant index to partition mask
|
||||
|
|
|
@ -189,55 +189,60 @@ static TextRenderer* textRenderer(TextRendererType type) {
|
|||
return displayNameRenderer;
|
||||
}
|
||||
|
||||
void Avatar::render(bool forShadowMap) {
|
||||
void Avatar::render(const glm::vec3& cameraPosition, bool forShadowMap) {
|
||||
// simple frustum check
|
||||
float boundingRadius = getBillboardSize();
|
||||
if (Application::getInstance()->getViewFrustum()->sphereInFrustum(_position, boundingRadius) == ViewFrustum::OUTSIDE) {
|
||||
if (Application::getInstance()->getViewFrustum()->sphereInFrustum(cameraPosition, boundingRadius) == ViewFrustum::OUTSIDE) {
|
||||
return;
|
||||
}
|
||||
|
||||
glm::vec3 toTarget = _position - Application::getInstance()->getAvatar()->getPosition();
|
||||
float lengthToTarget = glm::length(toTarget);
|
||||
glm::vec3 toTarget = cameraPosition - Application::getInstance()->getAvatar()->getPosition();
|
||||
float distanceToTarget = glm::length(toTarget);
|
||||
|
||||
{
|
||||
// glow when moving in the distance
|
||||
|
||||
// glow when moving far away
|
||||
const float GLOW_DISTANCE = 20.0f;
|
||||
Glower glower(_moving && lengthToTarget > GLOW_DISTANCE && !forShadowMap ? 1.0f : 0.0f);
|
||||
Glower glower(_moving && distanceToTarget > GLOW_DISTANCE && !forShadowMap ? 1.0f : 0.0f);
|
||||
|
||||
// render body
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::Avatars)) {
|
||||
renderBody(forShadowMap);
|
||||
}
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::RenderSkeletonCollisionProxies)) {
|
||||
_skeletonModel.renderCollisionProxies(0.7f);
|
||||
}
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::RenderHeadCollisionProxies)) {
|
||||
getHead()->getFaceModel().renderCollisionProxies(0.7f);
|
||||
}
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::Avatars)) {
|
||||
renderBody();
|
||||
}
|
||||
|
||||
// render voice intensity sphere for avatars that are farther away
|
||||
const float MAX_SPHERE_ANGLE = 10.f * RADIANS_PER_DEGREE;
|
||||
const float MIN_SPHERE_ANGLE = 1.f * RADIANS_PER_DEGREE;
|
||||
const float MIN_SPHERE_SIZE = 0.01f;
|
||||
const float SPHERE_LOUDNESS_SCALING = 0.0005f;
|
||||
const float SPHERE_COLOR[] = { 0.5f, 0.8f, 0.8f };
|
||||
float height = getSkeletonHeight();
|
||||
glm::vec3 delta = height * (getHead()->getCameraOrientation() * IDENTITY_UP) / 2.f;
|
||||
float angle = abs(angleBetween(toTarget + delta, toTarget - delta));
|
||||
float sphereRadius = getHead()->getAverageLoudness() * SPHERE_LOUDNESS_SCALING;
|
||||
|
||||
if (!forShadowMap && (sphereRadius > MIN_SPHERE_SIZE) && (angle < MAX_SPHERE_ANGLE) && (angle > MIN_SPHERE_ANGLE)) {
|
||||
glColor4f(SPHERE_COLOR[0], SPHERE_COLOR[1], SPHERE_COLOR[2], 1.f - angle / MAX_SPHERE_ANGLE);
|
||||
glPushMatrix();
|
||||
glTranslatef(_position.x, _position.y, _position.z);
|
||||
glScalef(height, height, height);
|
||||
glutSolidSphere(sphereRadius, 15, 15);
|
||||
glPopMatrix();
|
||||
// quick check before falling into the code below:
|
||||
// (a 10 degree breadth of an almost 2 meter avatar kicks in at about 12m)
|
||||
const float MIN_VOICE_SPHERE_DISTANCE = 12.f;
|
||||
if (distanceToTarget > MIN_VOICE_SPHERE_DISTANCE) {
|
||||
// render voice intensity sphere for avatars that are farther away
|
||||
const float MAX_SPHERE_ANGLE = 10.f * RADIANS_PER_DEGREE;
|
||||
const float MIN_SPHERE_ANGLE = 1.f * RADIANS_PER_DEGREE;
|
||||
const float MIN_SPHERE_SIZE = 0.01f;
|
||||
const float SPHERE_LOUDNESS_SCALING = 0.0005f;
|
||||
const float SPHERE_COLOR[] = { 0.5f, 0.8f, 0.8f };
|
||||
float height = getSkeletonHeight();
|
||||
glm::vec3 delta = height * (getHead()->getCameraOrientation() * IDENTITY_UP) / 2.f;
|
||||
float angle = abs(angleBetween(toTarget + delta, toTarget - delta));
|
||||
float sphereRadius = getHead()->getAverageLoudness() * SPHERE_LOUDNESS_SCALING;
|
||||
|
||||
if (!forShadowMap && (sphereRadius > MIN_SPHERE_SIZE) && (angle < MAX_SPHERE_ANGLE) && (angle > MIN_SPHERE_ANGLE)) {
|
||||
glColor4f(SPHERE_COLOR[0], SPHERE_COLOR[1], SPHERE_COLOR[2], 1.f - angle / MAX_SPHERE_ANGLE);
|
||||
glPushMatrix();
|
||||
glTranslatef(_position.x, _position.y, _position.z);
|
||||
glScalef(height, height, height);
|
||||
glutSolidSphere(sphereRadius, 15, 15);
|
||||
glPopMatrix();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const float DISPLAYNAME_DISTANCE = 10.0f;
|
||||
setShowDisplayName(!forShadowMap && lengthToTarget < DISPLAYNAME_DISTANCE);
|
||||
setShowDisplayName(!forShadowMap && distanceToTarget < DISPLAYNAME_DISTANCE);
|
||||
if (forShadowMap) {
|
||||
return;
|
||||
}
|
||||
|
@ -257,7 +262,6 @@ void Avatar::render(bool forShadowMap) {
|
|||
glm::vec3 chatAxis = glm::axis(chatRotation);
|
||||
glRotatef(glm::degrees(glm::angle(chatRotation)), chatAxis.x, chatAxis.y, chatAxis.z);
|
||||
|
||||
|
||||
glColor3f(0.f, 0.8f, 0.f);
|
||||
glRotatef(180.f, 0.f, 1.f, 0.f);
|
||||
glRotatef(180.f, 0.f, 0.f, 1.f);
|
||||
|
@ -302,9 +306,12 @@ glm::quat Avatar::computeRotationFromBodyToWorldUp(float proportion) const {
|
|||
return glm::angleAxis(angle * proportion, axis);
|
||||
}
|
||||
|
||||
void Avatar::renderBody() {
|
||||
void Avatar::renderBody(bool forShadowMap) {
|
||||
if (_shouldRenderBillboard || !(_skeletonModel.isRenderable() && getHead()->getFaceModel().isRenderable())) {
|
||||
// render the billboard until both models are loaded
|
||||
if (forShadowMap) {
|
||||
return;
|
||||
}
|
||||
renderBillboard();
|
||||
return;
|
||||
}
|
||||
|
@ -626,11 +633,11 @@ void Avatar::setBillboard(const QByteArray& billboard) {
|
|||
_billboardTexture.reset();
|
||||
}
|
||||
|
||||
int Avatar::parseData(const QByteArray& packet) {
|
||||
int Avatar::parseDataAtOffset(const QByteArray& packet, int offset) {
|
||||
// change in position implies movement
|
||||
glm::vec3 oldPosition = _position;
|
||||
|
||||
int bytesRead = AvatarData::parseData(packet);
|
||||
int bytesRead = AvatarData::parseDataAtOffset(packet, offset);
|
||||
|
||||
const float MOVE_DISTANCE_THRESHOLD = 0.001f;
|
||||
_moving = glm::distance(oldPosition, _position) > MOVE_DISTANCE_THRESHOLD;
|
||||
|
|
|
@ -74,7 +74,7 @@ public:
|
|||
|
||||
void init();
|
||||
void simulate(float deltaTime);
|
||||
void render(bool forShadowMap = false);
|
||||
virtual void render(const glm::vec3& cameraPosition, bool forShadowMap);
|
||||
|
||||
//setters
|
||||
void setDisplayingLookatVectors(bool displayingLookatVectors) { getHead()->setRenderLookatVectors(displayingLookatVectors); }
|
||||
|
@ -133,7 +133,7 @@ public:
|
|||
|
||||
void setShowDisplayName(bool showDisplayName);
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
int parseDataAtOffset(const QByteArray& packet, int offset);
|
||||
|
||||
static void renderJointConnectingCone(glm::vec3 position1, glm::vec3 position2, float radius1, float radius2);
|
||||
|
||||
|
@ -181,6 +181,7 @@ protected:
|
|||
float getPelvisToHeadLength() const;
|
||||
|
||||
void renderDisplayName();
|
||||
virtual void renderBody(bool forShadowMap);
|
||||
|
||||
private:
|
||||
|
||||
|
@ -189,7 +190,6 @@ private:
|
|||
bool _shouldRenderBillboard;
|
||||
bool _modelsDirty;
|
||||
|
||||
void renderBody();
|
||||
void renderBillboard();
|
||||
|
||||
float getBillboardSize() const;
|
||||
|
|
|
@ -77,7 +77,7 @@ void AvatarManager::renderAvatars(bool forShadowMapOrMirror, bool selfAvatarOnly
|
|||
"Application::renderAvatars()");
|
||||
bool renderLookAtVectors = Menu::getInstance()->isOptionChecked(MenuOption::LookAtVectors);
|
||||
|
||||
|
||||
glm::vec3 cameraPosition = Application::getInstance()->getCamera()->getPosition();
|
||||
|
||||
if (!selfAvatarOnly) {
|
||||
foreach (const AvatarSharedPointer& avatarPointer, _avatarHash) {
|
||||
|
@ -85,17 +85,13 @@ void AvatarManager::renderAvatars(bool forShadowMapOrMirror, bool selfAvatarOnly
|
|||
if (!avatar->isInitialized()) {
|
||||
continue;
|
||||
}
|
||||
if (avatar == static_cast<Avatar*>(_myAvatar.data())) {
|
||||
_myAvatar->render(forShadowMapOrMirror);
|
||||
} else {
|
||||
avatar->render(forShadowMapOrMirror);
|
||||
}
|
||||
avatar->render(cameraPosition, forShadowMapOrMirror);
|
||||
avatar->setDisplayingLookatVectors(renderLookAtVectors);
|
||||
}
|
||||
renderAvatarFades(forShadowMapOrMirror);
|
||||
renderAvatarFades(cameraPosition, forShadowMapOrMirror);
|
||||
} else {
|
||||
// just render myAvatar
|
||||
_myAvatar->render(forShadowMapOrMirror);
|
||||
_myAvatar->render(cameraPosition, forShadowMapOrMirror);
|
||||
_myAvatar->setDisplayingLookatVectors(renderLookAtVectors);
|
||||
}
|
||||
}
|
||||
|
@ -118,13 +114,15 @@ void AvatarManager::simulateAvatarFades(float deltaTime) {
|
|||
}
|
||||
}
|
||||
|
||||
void AvatarManager::renderAvatarFades(bool forShadowMap) {
|
||||
void AvatarManager::renderAvatarFades(const glm::vec3& cameraPosition, bool forShadowMap) {
|
||||
// render avatar fades
|
||||
Glower glower(forShadowMap ? 0.0f : 1.0f);
|
||||
|
||||
foreach(const AvatarSharedPointer& fadingAvatar, _avatarFades) {
|
||||
Avatar* avatar = static_cast<Avatar*>(fadingAvatar.data());
|
||||
avatar->render(forShadowMap);
|
||||
if (avatar != static_cast<Avatar*>(_myAvatar.data())) {
|
||||
avatar->render(cameraPosition, forShadowMap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,14 +148,11 @@ void AvatarManager::processAvatarMixerDatagram(const QByteArray& datagram, const
|
|||
void AvatarManager::processAvatarDataPacket(const QByteArray &datagram, const QWeakPointer<Node> &mixerWeakPointer) {
|
||||
int bytesRead = numBytesForPacketHeader(datagram);
|
||||
|
||||
QByteArray dummyAvatarByteArray = byteArrayWithPopulatedHeader(PacketTypeAvatarData);
|
||||
int numDummyHeaderBytes = dummyAvatarByteArray.size();
|
||||
int numDummyHeaderBytesWithoutUUID = numDummyHeaderBytes - NUM_BYTES_RFC4122_UUID;
|
||||
|
||||
// enumerate over all of the avatars in this packet
|
||||
// only add them if mixerWeakPointer points to something (meaning that mixer is still around)
|
||||
while (bytesRead < datagram.size() && mixerWeakPointer.data()) {
|
||||
QUuid nodeUUID = QUuid::fromRfc4122(datagram.mid(bytesRead, NUM_BYTES_RFC4122_UUID));
|
||||
bytesRead += NUM_BYTES_RFC4122_UUID;
|
||||
|
||||
AvatarSharedPointer matchingAvatar = _avatarHash.value(nodeUUID);
|
||||
|
||||
|
@ -173,16 +168,9 @@ void AvatarManager::processAvatarDataPacket(const QByteArray &datagram, const QW
|
|||
qDebug() << "Adding avatar with UUID" << nodeUUID << "to AvatarManager hash.";
|
||||
}
|
||||
|
||||
// copy the rest of the packet to the avatarData holder so we can read the next Avatar from there
|
||||
dummyAvatarByteArray.resize(numDummyHeaderBytesWithoutUUID);
|
||||
|
||||
// make this Avatar's UUID the UUID in the packet and tack the remaining data onto the end
|
||||
dummyAvatarByteArray.append(datagram.mid(bytesRead));
|
||||
|
||||
// have the matching (or new) avatar parse the data from the packet
|
||||
bytesRead += matchingAvatar->parseData(dummyAvatarByteArray) - numDummyHeaderBytesWithoutUUID;
|
||||
bytesRead += matchingAvatar->parseDataAtOffset(datagram, bytesRead);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void AvatarManager::processAvatarIdentityPacket(const QByteArray &packet) {
|
||||
|
|
|
@ -45,7 +45,7 @@ private:
|
|||
void processKillAvatar(const QByteArray& datagram);
|
||||
|
||||
void simulateAvatarFades(float deltaTime);
|
||||
void renderAvatarFades(bool forShadowMap);
|
||||
void renderAvatarFades(const glm::vec3& cameraPosition, bool forShadowMap);
|
||||
|
||||
// virtual override
|
||||
AvatarHash::iterator erase(const AvatarHash::iterator& iterator);
|
||||
|
|
|
@ -206,8 +206,8 @@ void Hand::collideAgainstOurself() {
|
|||
}
|
||||
// ignoring everything below the parent of the parent of the last free joint
|
||||
int skipIndex = skeletonModel.getParentJointIndex(skeletonModel.getParentJointIndex(
|
||||
skeletonModel.getLastFreeJointIndex(((int)i == leftPalmIndex) ? skeletonModel.getLeftHandJointIndex() :
|
||||
((int)i == rightPalmIndex) ? skeletonModel.getRightHandJointIndex() : -1)));
|
||||
skeletonModel.getLastFreeJointIndex((int(i) == leftPalmIndex) ? skeletonModel.getLeftHandJointIndex() :
|
||||
(int(i) == rightPalmIndex) ? skeletonModel.getRightHandJointIndex() : -1)));
|
||||
|
||||
handCollisions.clear();
|
||||
if (_owningAvatar->findSphereCollisions(palm.getPosition(), scaledPalmRadius, handCollisions, skipIndex)) {
|
||||
|
|
|
@ -90,8 +90,7 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
|
|||
_saccade += (_saccadeTarget - _saccade) * 0.50f;
|
||||
|
||||
const float AUDIO_AVERAGING_SECS = 0.05f;
|
||||
_averageLoudness = (1.f - deltaTime / AUDIO_AVERAGING_SECS) * _averageLoudness +
|
||||
(deltaTime / AUDIO_AVERAGING_SECS) * _audioLoudness;
|
||||
_averageLoudness = glm::mix(_averageLoudness, _audioLoudness, glm::min(deltaTime / AUDIO_AVERAGING_SECS, 1.0f));
|
||||
|
||||
// Detect transition from talking to not; force blink after that and a delay
|
||||
bool forceBlink = false;
|
||||
|
|
|
@ -85,6 +85,7 @@ void MyAvatar::reset() {
|
|||
|
||||
setVelocity(glm::vec3(0,0,0));
|
||||
setThrust(glm::vec3(0,0,0));
|
||||
setOrientation(glm::quat(glm::vec3(0,0,0)));
|
||||
}
|
||||
|
||||
void MyAvatar::setMoveTarget(const glm::vec3 moveTarget) {
|
||||
|
@ -453,68 +454,14 @@ void MyAvatar::renderDebugBodyPoints() {
|
|||
|
||||
|
||||
}
|
||||
void MyAvatar::render(bool forShadowMapOrMirror) {
|
||||
|
||||
// virtual
|
||||
void MyAvatar::render(const glm::vec3& cameraPosition, bool forShadowMapOrMirror) {
|
||||
// don't render if we've been asked to disable local rendering
|
||||
if (!_shouldRender) {
|
||||
return; // exit early
|
||||
}
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::Avatars)) {
|
||||
renderBody(forShadowMapOrMirror);
|
||||
}
|
||||
// render body
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::RenderSkeletonCollisionProxies)) {
|
||||
_skeletonModel.renderCollisionProxies(0.8f);
|
||||
}
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::RenderHeadCollisionProxies)) {
|
||||
getHead()->getFaceModel().renderCollisionProxies(0.8f);
|
||||
}
|
||||
setShowDisplayName(!forShadowMapOrMirror);
|
||||
if (forShadowMapOrMirror) {
|
||||
return;
|
||||
}
|
||||
renderDisplayName();
|
||||
|
||||
if (!_chatMessage.empty()) {
|
||||
int width = 0;
|
||||
int lastWidth = 0;
|
||||
for (string::iterator it = _chatMessage.begin(); it != _chatMessage.end(); it++) {
|
||||
width += (lastWidth = textRenderer()->computeWidth(*it));
|
||||
}
|
||||
glPushMatrix();
|
||||
|
||||
glm::vec3 chatPosition = getHead()->getEyePosition() + getBodyUpDirection() * CHAT_MESSAGE_HEIGHT * _scale;
|
||||
glTranslatef(chatPosition.x, chatPosition.y, chatPosition.z);
|
||||
glm::quat chatRotation = Application::getInstance()->getCamera()->getRotation();
|
||||
glm::vec3 chatAxis = glm::axis(chatRotation);
|
||||
glRotatef(glm::degrees(glm::angle(chatRotation)), chatAxis.x, chatAxis.y, chatAxis.z);
|
||||
|
||||
glColor3f(0.f, 0.8f, 0.f);
|
||||
glRotatef(180.f, 0.f, 1.f, 0.f);
|
||||
glRotatef(180.f, 0.f, 0.f, 1.f);
|
||||
glScalef(_scale * CHAT_MESSAGE_SCALE, _scale * CHAT_MESSAGE_SCALE, 1.0f);
|
||||
|
||||
glDisable(GL_LIGHTING);
|
||||
glDepthMask(false);
|
||||
if (_keyState == NO_KEY_DOWN) {
|
||||
textRenderer()->draw(-width / 2.0f, 0, _chatMessage.c_str());
|
||||
|
||||
} else {
|
||||
// rather than using substr and allocating a new string, just replace the last
|
||||
// character with a null, then restore it
|
||||
int lastIndex = _chatMessage.size() - 1;
|
||||
char lastChar = _chatMessage[lastIndex];
|
||||
_chatMessage[lastIndex] = '\0';
|
||||
textRenderer()->draw(-width / 2.0f, 0, _chatMessage.c_str());
|
||||
_chatMessage[lastIndex] = lastChar;
|
||||
glColor3f(0.f, 1.f, 0.f);
|
||||
textRenderer()->draw(width / 2.0f - lastWidth, 0, _chatMessage.c_str() + lastIndex);
|
||||
}
|
||||
glEnable(GL_LIGHTING);
|
||||
glDepthMask(true);
|
||||
|
||||
glPopMatrix();
|
||||
}
|
||||
Avatar::render(cameraPosition, forShadowMapOrMirror);
|
||||
}
|
||||
|
||||
void MyAvatar::renderHeadMouse() const {
|
||||
|
@ -1189,9 +1136,9 @@ void MyAvatar::goToLocationFromResponse(const QJsonObject& jsonObject) {
|
|||
NodeList::getInstance()->getDomainInfo().setHostname(domainHostnameString);
|
||||
|
||||
// orient the user to face the target
|
||||
glm::quat newOrientation = glm::quat(glm::vec3(orientationItems[0].toFloat(),
|
||||
orientationItems[1].toFloat(),
|
||||
orientationItems[2].toFloat()))
|
||||
glm::quat newOrientation = glm::quat(glm::radians(glm::vec3(orientationItems[0].toFloat(),
|
||||
orientationItems[1].toFloat(),
|
||||
orientationItems[2].toFloat())))
|
||||
* glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f));
|
||||
setOrientation(newOrientation);
|
||||
|
||||
|
|
|
@ -35,7 +35,8 @@ public:
|
|||
void simulate(float deltaTime);
|
||||
void updateFromGyros(float deltaTime);
|
||||
|
||||
void render(bool forShadowMapOrMirror = false);
|
||||
void render(const glm::vec3& cameraPosition, bool forShadowMapOrMirror = false);
|
||||
void renderBody(bool forceRenderHead);
|
||||
void renderDebugBodyPoints();
|
||||
void renderHeadMouse() const;
|
||||
|
||||
|
@ -120,7 +121,6 @@ private:
|
|||
bool _billboardValid;
|
||||
|
||||
// private methods
|
||||
void renderBody(bool forceRenderHead);
|
||||
void updateThrust(float deltaTime);
|
||||
void updateHandMovementAndTouching(float deltaTime);
|
||||
void updateCollisionWithAvatars(float deltaTime);
|
||||
|
|
|
@ -140,7 +140,7 @@ void SkeletonModel::applyPalmData(int jointIndex, const QVector<int>& fingerJoin
|
|||
direction += fingerVector / length;
|
||||
}
|
||||
fingerVector = glm::inverse(palmRotation) * fingerVector * -sign;
|
||||
IndexValue indexValue = { i, atan2f(fingerVector.z, fingerVector.x) };
|
||||
IndexValue indexValue = { int(i), atan2f(fingerVector.z, fingerVector.x) };
|
||||
fingerIndices.append(indexValue);
|
||||
}
|
||||
qSort(fingerIndices.begin(), fingerIndices.end());
|
||||
|
|
|
@ -41,7 +41,7 @@ void LocalVoxelsOverlay::update(float deltatime) {
|
|||
}
|
||||
|
||||
_tree->lockForRead();
|
||||
if (_visible && _voxelCount != (int)_tree->getOctreeElementsCount()) {
|
||||
if (_visible && _voxelCount != _tree->getOctreeElementsCount()) {
|
||||
_voxelCount = _tree->getOctreeElementsCount();
|
||||
_voxelSystem->forceRedrawEntireTree();
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ private:
|
|||
|
||||
QString _treeName;
|
||||
StrongVoxelTreePointer _tree; // so that the tree doesn't get freed
|
||||
int _voxelCount;
|
||||
unsigned long _voxelCount;
|
||||
StrongVoxelSystemPointer _voxelSystem;
|
||||
};
|
||||
|
||||
|
|
|
@ -55,6 +55,20 @@ int AudioRingBuffer::parseData(const QByteArray& packet) {
|
|||
return writeData(packet.data() + numBytesPacketHeader, packet.size() - numBytesPacketHeader);
|
||||
}
|
||||
|
||||
float AudioRingBuffer::averageLoudnessForBoundarySamples(int numSamples) {
|
||||
// ForBoundarySamples means that we expect the number of samples not to roll of the end of the ring buffer
|
||||
float averageLoudness = 0;
|
||||
|
||||
for (int i = 0; i < numSamples; ++i) {
|
||||
averageLoudness += fabsf(_nextOutput[i]);
|
||||
}
|
||||
|
||||
averageLoudness /= numSamples;
|
||||
averageLoudness /= MAX_SAMPLE_VALUE;
|
||||
|
||||
return averageLoudness;
|
||||
}
|
||||
|
||||
qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {
|
||||
return readData((char*) destination, maxSamples * sizeof(int16_t));
|
||||
}
|
||||
|
@ -121,9 +135,6 @@ qint64 AudioRingBuffer::writeData(const char* data, qint64 maxSize) {
|
|||
}
|
||||
|
||||
int16_t& AudioRingBuffer::operator[](const int index) {
|
||||
// make sure this is a valid index
|
||||
assert(index > -_sampleCapacity && index < _sampleCapacity);
|
||||
|
||||
return *shiftedPositionAccomodatingWrap(_nextOutput, index);
|
||||
}
|
||||
|
||||
|
@ -145,6 +156,21 @@ unsigned int AudioRingBuffer::samplesAvailable() const {
|
|||
}
|
||||
}
|
||||
|
||||
void AudioRingBuffer::addSilentFrame(int numSilentSamples) {
|
||||
// memset zeroes into the buffer, accomodate a wrap around the end
|
||||
// push the _endOfLastWrite to the correct spot
|
||||
if (_endOfLastWrite + numSilentSamples <= _buffer + _sampleCapacity) {
|
||||
memset(_endOfLastWrite, 0, numSilentSamples * sizeof(int16_t));
|
||||
_endOfLastWrite += numSilentSamples;
|
||||
} else {
|
||||
int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
|
||||
memset(_endOfLastWrite, 0, numSamplesToEnd * sizeof(int16_t));
|
||||
memset(_buffer, 0, (numSilentSamples - numSamplesToEnd) * sizeof(int16_t));
|
||||
|
||||
_endOfLastWrite = _buffer + (numSilentSamples - numSamplesToEnd);
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioRingBuffer::isNotStarvedOrHasMinimumSamples(unsigned int numRequiredSamples) const {
|
||||
if (!_isStarved) {
|
||||
return true;
|
||||
|
|
|
@ -45,6 +45,12 @@ public:
|
|||
int getSampleCapacity() const { return _sampleCapacity; }
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
|
||||
// assume callers using this will never wrap around the end
|
||||
const int16_t* getNextOutput() { return _nextOutput; }
|
||||
const int16_t* getBuffer() { return _buffer; }
|
||||
|
||||
float averageLoudnessForBoundarySamples(int numSamples);
|
||||
|
||||
qint64 readSamples(int16_t* destination, qint64 maxSamples);
|
||||
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
|
||||
|
@ -64,6 +70,8 @@ public:
|
|||
void setIsStarved(bool isStarved) { _isStarved = isStarved; }
|
||||
|
||||
bool hasStarted() const { return _hasStarted; }
|
||||
|
||||
void addSilentFrame(int numSilentSamples);
|
||||
protected:
|
||||
// disallow copying of AudioRingBuffer objects
|
||||
AudioRingBuffer(const AudioRingBuffer&);
|
||||
|
|
|
@ -38,16 +38,27 @@ PositionalAudioRingBuffer::~PositionalAudioRingBuffer() {
|
|||
}
|
||||
|
||||
int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
|
||||
QDataStream packetStream(packet);
|
||||
|
||||
// skip the packet header (includes the source UUID)
|
||||
packetStream.skipRawData(numBytesForPacketHeader(packet));
|
||||
int readBytes = numBytesForPacketHeader(packet);
|
||||
|
||||
packetStream.skipRawData(parsePositionalData(packet.mid(packetStream.device()->pos())));
|
||||
packetStream.skipRawData(writeData(packet.data() + packetStream.device()->pos(),
|
||||
packet.size() - packetStream.device()->pos()));
|
||||
|
||||
return packetStream.device()->pos();
|
||||
readBytes += parsePositionalData(packet.mid(readBytes));
|
||||
|
||||
if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) {
|
||||
// this source had no audio to send us, but this counts as a packet
|
||||
// write silence equivalent to the number of silent samples they just sent us
|
||||
int16_t numSilentSamples;
|
||||
|
||||
memcpy(&numSilentSamples, packet.data() + readBytes, sizeof(int16_t));
|
||||
readBytes += sizeof(int16_t);
|
||||
|
||||
addSilentFrame(numSilentSamples);
|
||||
} else {
|
||||
// there is audio data to read
|
||||
readBytes += writeData(packet.data() + readBytes, packet.size() - readBytes);
|
||||
}
|
||||
|
||||
return readBytes;
|
||||
}
|
||||
|
||||
int PositionalAudioRingBuffer::parsePositionalData(const QByteArray& positionalByteArray) {
|
||||
|
|
|
@ -29,7 +29,6 @@ using namespace std;
|
|||
QNetworkAccessManager* AvatarData::networkAccessManager = NULL;
|
||||
|
||||
AvatarData::AvatarData() :
|
||||
NodeData(),
|
||||
_handPosition(0,0,0),
|
||||
_bodyYaw(-90.f),
|
||||
_bodyPitch(0.0f),
|
||||
|
@ -89,23 +88,6 @@ QByteArray AvatarData::toByteArray() {
|
|||
// Body scale
|
||||
destinationBuffer += packFloatRatioToTwoByte(destinationBuffer, _targetScale);
|
||||
|
||||
// Head rotation (NOTE: This needs to become a quaternion to save two bytes)
|
||||
destinationBuffer += packFloatAngleToTwoByte(destinationBuffer, _headData->getTweakedYaw());
|
||||
destinationBuffer += packFloatAngleToTwoByte(destinationBuffer, _headData->getTweakedPitch());
|
||||
destinationBuffer += packFloatAngleToTwoByte(destinationBuffer, _headData->getTweakedRoll());
|
||||
|
||||
|
||||
// Head lean X,Z (head lateral and fwd/back motion relative to torso)
|
||||
memcpy(destinationBuffer, &_headData->_leanSideways, sizeof(_headData->_leanSideways));
|
||||
destinationBuffer += sizeof(_headData->_leanSideways);
|
||||
memcpy(destinationBuffer, &_headData->_leanForward, sizeof(_headData->_leanForward));
|
||||
destinationBuffer += sizeof(_headData->_leanForward);
|
||||
|
||||
// Hand Position - is relative to body position
|
||||
glm::vec3 handPositionRelative = _handPosition - _position;
|
||||
memcpy(destinationBuffer, &handPositionRelative, sizeof(float) * 3);
|
||||
destinationBuffer += sizeof(float) * 3;
|
||||
|
||||
// Lookat Position
|
||||
memcpy(destinationBuffer, &_headData->_lookAtPosition, sizeof(_headData->_lookAtPosition));
|
||||
destinationBuffer += sizeof(_headData->_lookAtPosition);
|
||||
|
@ -178,14 +160,11 @@ QByteArray AvatarData::toByteArray() {
|
|||
}
|
||||
}
|
||||
|
||||
// hand data
|
||||
destinationBuffer += HandData::encodeData(_handData, destinationBuffer);
|
||||
|
||||
return avatarDataByteArray.left(destinationBuffer - startPosition);
|
||||
}
|
||||
|
||||
// called on the other nodes - assigns it to my views of the others
|
||||
int AvatarData::parseData(const QByteArray& packet) {
|
||||
// read data in packet starting at byte offset and return number of bytes parsed
|
||||
int AvatarData::parseDataAtOffset(const QByteArray& packet, int offset) {
|
||||
|
||||
// lazily allocate memory for HeadData in case we're not an Avatar instance
|
||||
if (!_headData) {
|
||||
|
@ -197,9 +176,8 @@ int AvatarData::parseData(const QByteArray& packet) {
|
|||
_handData = new HandData(this);
|
||||
}
|
||||
|
||||
// increment to push past the packet header
|
||||
const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(packet.data());
|
||||
const unsigned char* sourceBuffer = startPosition + numBytesForPacketHeader(packet);
|
||||
const unsigned char* sourceBuffer = startPosition + offset;
|
||||
|
||||
// Body world position
|
||||
memcpy(&_position, sourceBuffer, sizeof(float) * 3);
|
||||
|
@ -213,28 +191,6 @@ int AvatarData::parseData(const QByteArray& packet) {
|
|||
// Body scale
|
||||
sourceBuffer += unpackFloatRatioFromTwoByte(sourceBuffer, _targetScale);
|
||||
|
||||
// Head rotation (NOTE: This needs to become a quaternion to save two bytes)
|
||||
float headYaw, headPitch, headRoll;
|
||||
sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &headYaw);
|
||||
sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &headPitch);
|
||||
sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &headRoll);
|
||||
|
||||
_headData->setYaw(headYaw);
|
||||
_headData->setPitch(headPitch);
|
||||
_headData->setRoll(headRoll);
|
||||
|
||||
// Head position relative to pelvis
|
||||
memcpy(&_headData->_leanSideways, sourceBuffer, sizeof(_headData->_leanSideways));
|
||||
sourceBuffer += sizeof(float);
|
||||
memcpy(&_headData->_leanForward, sourceBuffer, sizeof(_headData->_leanForward));
|
||||
sourceBuffer += sizeof(_headData->_leanForward);
|
||||
|
||||
// Hand Position - is relative to body position
|
||||
glm::vec3 handPositionRelative;
|
||||
memcpy(&handPositionRelative, sourceBuffer, sizeof(float) * 3);
|
||||
_handPosition = _position + handPositionRelative;
|
||||
sourceBuffer += sizeof(float) * 3;
|
||||
|
||||
// Lookat Position
|
||||
memcpy(&_headData->_lookAtPosition, sourceBuffer, sizeof(_headData->_lookAtPosition));
|
||||
sourceBuffer += sizeof(_headData->_lookAtPosition);
|
||||
|
@ -288,13 +244,13 @@ int AvatarData::parseData(const QByteArray& packet) {
|
|||
// joint data
|
||||
int jointCount = *sourceBuffer++;
|
||||
_jointData.resize(jointCount);
|
||||
unsigned char validity;
|
||||
unsigned char validity = 0;
|
||||
int validityBit = 0;
|
||||
for (int i = 0; i < jointCount; i++) {
|
||||
if (validityBit == 0) {
|
||||
validity = *sourceBuffer++;
|
||||
}
|
||||
_jointData[i].valid = validity & (1 << validityBit);
|
||||
_jointData[i].valid = (bool)(validity & (1 << validityBit));
|
||||
validityBit = (validityBit + 1) % BITS_IN_BYTE;
|
||||
}
|
||||
for (int i = 0; i < jointCount; i++) {
|
||||
|
@ -304,12 +260,6 @@ int AvatarData::parseData(const QByteArray& packet) {
|
|||
}
|
||||
}
|
||||
|
||||
// hand data
|
||||
if (sourceBuffer - startPosition < packet.size()) {
|
||||
// check passed, bytes match
|
||||
sourceBuffer += _handData->decodeRemoteData(packet.mid(sourceBuffer - startPosition));
|
||||
}
|
||||
|
||||
return sourceBuffer - startPosition;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,6 @@ typedef unsigned long long quint64;
|
|||
|
||||
#include <CollisionInfo.h>
|
||||
#include <RegisteredMetaTypes.h>
|
||||
#include <NodeData.h>
|
||||
|
||||
#include "HeadData.h"
|
||||
#include "HandData.h"
|
||||
|
@ -74,7 +73,7 @@ class QNetworkAccessManager;
|
|||
|
||||
class JointData;
|
||||
|
||||
class AvatarData : public NodeData {
|
||||
class AvatarData : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
Q_PROPERTY(glm::vec3 position READ getPosition WRITE setPosition)
|
||||
|
@ -97,7 +96,7 @@ class AvatarData : public NodeData {
|
|||
Q_PROPERTY(QString billboardURL READ getBillboardURL WRITE setBillboardFromURL)
|
||||
public:
|
||||
AvatarData();
|
||||
~AvatarData();
|
||||
virtual ~AvatarData();
|
||||
|
||||
const glm::vec3& getPosition() const { return _position; }
|
||||
void setPosition(const glm::vec3 position) { _position = position; }
|
||||
|
@ -106,7 +105,11 @@ public:
|
|||
void setHandPosition(const glm::vec3& handPosition);
|
||||
|
||||
QByteArray toByteArray();
|
||||
int parseData(const QByteArray& packet);
|
||||
|
||||
/// \param packet byte array of data
|
||||
/// \param offset number of bytes into packet where data starts
|
||||
/// \return number of bytes parsed
|
||||
virtual int parseDataAtOffset(const QByteArray& packet, int offset);
|
||||
|
||||
// Body Rotation (degrees)
|
||||
float getBodyYaw() const { return _bodyYaw; }
|
||||
|
|
|
@ -109,122 +109,6 @@ _owningHandData(owningHandData)
|
|||
setTrailLength(standardTrailLength);
|
||||
}
|
||||
|
||||
// static
|
||||
int HandData::encodeData(HandData* hand, unsigned char* destinationBuffer) {
|
||||
if (hand) {
|
||||
return hand->encodeRemoteData(destinationBuffer);
|
||||
}
|
||||
// else encode empty data:
|
||||
// One byte for zero hands
|
||||
// One byte for error checking.
|
||||
*destinationBuffer = 0;
|
||||
*(destinationBuffer + 1) = 1;
|
||||
return 2;
|
||||
}
|
||||
|
||||
int HandData::encodeRemoteData(unsigned char* destinationBuffer) {
|
||||
const unsigned char* startPosition = destinationBuffer;
|
||||
|
||||
unsigned int numPalms = 0;
|
||||
for (unsigned int handIndex = 0; handIndex < getNumPalms(); ++handIndex) {
|
||||
PalmData& palm = getPalms()[handIndex];
|
||||
if (palm.isActive()) {
|
||||
numPalms++;
|
||||
}
|
||||
}
|
||||
*destinationBuffer++ = numPalms;
|
||||
|
||||
for (unsigned int handIndex = 0; handIndex < getNumPalms(); ++handIndex) {
|
||||
PalmData& palm = getPalms()[handIndex];
|
||||
if (palm.isActive()) {
|
||||
destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, palm.getRawPosition(), fingerVectorRadix);
|
||||
destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, palm.getRawNormal(), fingerVectorRadix);
|
||||
|
||||
unsigned int numFingers = 0;
|
||||
for (unsigned int fingerIndex = 0; fingerIndex < palm.getNumFingers(); ++fingerIndex) {
|
||||
FingerData& finger = palm.getFingers()[fingerIndex];
|
||||
if (finger.isActive()) {
|
||||
numFingers++;
|
||||
}
|
||||
}
|
||||
*destinationBuffer++ = numFingers;
|
||||
|
||||
for (unsigned int fingerIndex = 0; fingerIndex < palm.getNumFingers(); ++fingerIndex) {
|
||||
FingerData& finger = palm.getFingers()[fingerIndex];
|
||||
if (finger.isActive()) {
|
||||
destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, finger.getTipRawPosition(), fingerVectorRadix);
|
||||
destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, finger.getRootRawPosition(), fingerVectorRadix);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// One byte for error checking safety.
|
||||
size_t checkLength = destinationBuffer - startPosition;
|
||||
*destinationBuffer++ = (unsigned char)checkLength;
|
||||
|
||||
// just a double-check, while tracing a crash.
|
||||
// decodeRemoteData(destinationBuffer - (destinationBuffer - startPosition));
|
||||
|
||||
return destinationBuffer - startPosition;
|
||||
}
|
||||
|
||||
int HandData::decodeRemoteData(const QByteArray& dataByteArray) {
|
||||
const unsigned char* startPosition;
|
||||
const unsigned char* sourceBuffer = startPosition = reinterpret_cast<const unsigned char*>(dataByteArray.data());
|
||||
unsigned int numPalms = *sourceBuffer++;
|
||||
|
||||
for (unsigned int handIndex = 0; handIndex < numPalms; ++handIndex) {
|
||||
if (handIndex >= (unsigned int)getNumPalms())
|
||||
addNewPalm();
|
||||
PalmData& palm = getPalms()[handIndex];
|
||||
|
||||
glm::vec3 handPosition;
|
||||
glm::vec3 handNormal;
|
||||
sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, handPosition, fingerVectorRadix);
|
||||
sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, handNormal, fingerVectorRadix);
|
||||
unsigned int numFingers = *sourceBuffer++;
|
||||
|
||||
palm.setRawPosition(handPosition);
|
||||
palm.setRawNormal(handNormal);
|
||||
palm.setActive(true);
|
||||
|
||||
// For received data, set the sixense controller ID to match the order initialized and sent - 0 Left, 1 Right
|
||||
palm.setSixenseID(handIndex);
|
||||
|
||||
for (unsigned int fingerIndex = 0; fingerIndex < numFingers; ++fingerIndex) {
|
||||
if (fingerIndex < (unsigned int)palm.getNumFingers()) {
|
||||
FingerData& finger = palm.getFingers()[fingerIndex];
|
||||
|
||||
glm::vec3 tipPosition;
|
||||
glm::vec3 rootPosition;
|
||||
sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, tipPosition, fingerVectorRadix);
|
||||
sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, rootPosition, fingerVectorRadix);
|
||||
|
||||
finger.setRawTipPosition(tipPosition);
|
||||
finger.setRawRootPosition(rootPosition);
|
||||
finger.setActive(true);
|
||||
}
|
||||
}
|
||||
// Turn off any fingers which weren't used.
|
||||
for (unsigned int fingerIndex = numFingers; fingerIndex < palm.getNumFingers(); ++fingerIndex) {
|
||||
FingerData& finger = palm.getFingers()[fingerIndex];
|
||||
finger.setActive(false);
|
||||
}
|
||||
}
|
||||
// Turn off any hands which weren't used.
|
||||
for (unsigned int handIndex = numPalms; handIndex < getNumPalms(); ++handIndex) {
|
||||
PalmData& palm = getPalms()[handIndex];
|
||||
palm.setActive(false);
|
||||
}
|
||||
|
||||
// One byte for error checking safety.
|
||||
unsigned char requiredLength = (unsigned char)(sourceBuffer - startPosition);
|
||||
unsigned char checkLength = *sourceBuffer++;
|
||||
assert(checkLength == requiredLength);
|
||||
|
||||
return sourceBuffer - startPosition;
|
||||
}
|
||||
|
||||
void HandData::setFingerTrailLength(unsigned int length) {
|
||||
for (size_t i = 0; i < getNumPalms(); ++i) {
|
||||
PalmData& palm = getPalms()[i];
|
||||
|
|
|
@ -63,12 +63,6 @@ public:
|
|||
void setFingerTrailLength(unsigned int length);
|
||||
void updateFingerTrails();
|
||||
|
||||
static int encodeData(HandData* hand, unsigned char* destinationBuffer);
|
||||
|
||||
// Use these for sending and receiving hand data
|
||||
int encodeRemoteData(unsigned char* destinationBuffer);
|
||||
int decodeRemoteData(const QByteArray& dataByteArray);
|
||||
|
||||
/// Checks for penetration between the described sphere and the hand.
|
||||
/// \param penetratorCenter the center of the penetration test sphere
|
||||
/// \param penetratorRadius the radius of the penetration test sphere
|
||||
|
|
|
@ -1096,7 +1096,7 @@ QScriptValue ScriptedMetavoxelGuide::visit(QScriptContext* context, QScriptEngin
|
|||
QScriptValue minimum = infoValue.property(guide->_minimumHandle);
|
||||
MetavoxelInfo info = {
|
||||
glm::vec3(minimum.property(0).toNumber(), minimum.property(1).toNumber(), minimum.property(2).toNumber()),
|
||||
infoValue.property(guide->_sizeHandle).toNumber(), guide->_visitation->info.inputValues,
|
||||
float(infoValue.property(guide->_sizeHandle).toNumber()), guide->_visitation->info.inputValues,
|
||||
guide->_visitation->info.outputValues, infoValue.property(guide->_isLeafHandle).toBool() };
|
||||
|
||||
// extract and convert the values provided by the script
|
||||
|
|
|
@ -807,13 +807,14 @@ int Octree::encodeTreeBitstream(OctreeElement* node,
|
|||
|
||||
// write the octal code
|
||||
bool roomForOctalCode = false; // assume the worst
|
||||
int codeLength;
|
||||
int codeLength = 1; // assume root
|
||||
if (params.chopLevels) {
|
||||
unsigned char* newCode = chopOctalCode(node->getOctalCode(), params.chopLevels);
|
||||
roomForOctalCode = packetData->startSubTree(newCode);
|
||||
|
||||
if (newCode) {
|
||||
delete newCode;
|
||||
codeLength = numberOfThreeBitSectionsInCode(newCode);
|
||||
} else {
|
||||
codeLength = 1;
|
||||
}
|
||||
|
@ -1559,14 +1560,13 @@ void Octree::copySubTreeIntoNewTree(OctreeElement* startNode, Octree* destinatio
|
|||
}
|
||||
|
||||
static OctreePacketData packetData;
|
||||
int bytesWritten = 0;
|
||||
|
||||
while (!nodeBag.isEmpty()) {
|
||||
OctreeElement* subTree = nodeBag.extract();
|
||||
packetData.reset(); // reset the packet between usage
|
||||
// ask our tree to write a bitsteam
|
||||
EncodeBitstreamParams params(INT_MAX, IGNORE_VIEW_FRUSTUM, WANT_COLOR, NO_EXISTS_BITS, chopLevels);
|
||||
bytesWritten = encodeTreeBitstream(subTree, &packetData, nodeBag, params);
|
||||
encodeTreeBitstream(subTree, &packetData, nodeBag, params);
|
||||
// ask destination tree to read the bitstream
|
||||
ReadBitstreamToTreeParams args(WANT_COLOR, NO_EXISTS_BITS);
|
||||
destinationTree->readBitstreamToTree(packetData.getUncompressedData(), packetData.getUncompressedSize(), args);
|
||||
|
@ -1595,7 +1595,6 @@ void Octree::copyFromTreeIntoSubTree(Octree* sourceTree, OctreeElement* destinat
|
|||
nodeBag.insert(sourceTree->_rootNode);
|
||||
|
||||
static OctreePacketData packetData;
|
||||
int bytesWritten = 0;
|
||||
|
||||
while (!nodeBag.isEmpty()) {
|
||||
OctreeElement* subTree = nodeBag.extract();
|
||||
|
@ -1604,7 +1603,7 @@ void Octree::copyFromTreeIntoSubTree(Octree* sourceTree, OctreeElement* destinat
|
|||
|
||||
// ask our tree to write a bitsteam
|
||||
EncodeBitstreamParams params(INT_MAX, IGNORE_VIEW_FRUSTUM, WANT_COLOR, NO_EXISTS_BITS);
|
||||
bytesWritten = sourceTree->encodeTreeBitstream(subTree, &packetData, nodeBag, params);
|
||||
sourceTree->encodeTreeBitstream(subTree, &packetData, nodeBag, params);
|
||||
|
||||
// ask destination tree to read the bitstream
|
||||
bool wantImportProgress = true;
|
||||
|
|
|
@ -23,7 +23,7 @@ OctreePacketData::OctreePacketData(bool enableCompression, int targetSize) {
|
|||
changeSettings(enableCompression, targetSize); // does reset...
|
||||
}
|
||||
|
||||
void OctreePacketData::changeSettings(bool enableCompression, int targetSize) {
|
||||
void OctreePacketData::changeSettings(bool enableCompression, unsigned int targetSize) {
|
||||
_enableCompression = enableCompression;
|
||||
_targetSize = std::min(MAX_OCTREE_UNCOMRESSED_PACKET_SIZE, targetSize);
|
||||
reset();
|
||||
|
@ -369,7 +369,7 @@ bool OctreePacketData::compressContent() {
|
|||
|
||||
QByteArray compressedData = qCompress(uncompressedData, uncompressedSize, MAX_COMPRESSION);
|
||||
|
||||
if (compressedData.size() < MAX_OCTREE_PACKET_DATA_SIZE) {
|
||||
if (compressedData.size() < (int)MAX_OCTREE_PACKET_DATA_SIZE) {
|
||||
_compressedBytes = compressedData.size();
|
||||
for (int i = 0; i < _compressedBytes; i++) {
|
||||
_compressed[i] = compressedData[i];
|
||||
|
|
|
@ -30,15 +30,15 @@ typedef uint16_t OCTREE_PACKET_INTERNAL_SECTION_SIZE;
|
|||
const int MAX_OCTREE_PACKET_SIZE = MAX_PACKET_SIZE;
|
||||
|
||||
// this is overly conservative - sizeof(PacketType) is 8 bytes but a packed PacketType could be as small as one byte
|
||||
const int OCTREE_PACKET_EXTRA_HEADERS_SIZE = sizeof(OCTREE_PACKET_FLAGS)
|
||||
const unsigned int OCTREE_PACKET_EXTRA_HEADERS_SIZE = sizeof(OCTREE_PACKET_FLAGS)
|
||||
+ sizeof(OCTREE_PACKET_SEQUENCE) + sizeof(OCTREE_PACKET_SENT_TIME);
|
||||
|
||||
const int MAX_OCTREE_PACKET_DATA_SIZE = MAX_PACKET_SIZE - (MAX_PACKET_HEADER_BYTES + OCTREE_PACKET_EXTRA_HEADERS_SIZE);
|
||||
const unsigned int MAX_OCTREE_PACKET_DATA_SIZE = MAX_PACKET_SIZE - (MAX_PACKET_HEADER_BYTES + OCTREE_PACKET_EXTRA_HEADERS_SIZE);
|
||||
|
||||
const int MAX_OCTREE_UNCOMRESSED_PACKET_SIZE = MAX_OCTREE_PACKET_DATA_SIZE;
|
||||
const unsigned int MAX_OCTREE_UNCOMRESSED_PACKET_SIZE = MAX_OCTREE_PACKET_DATA_SIZE;
|
||||
|
||||
const int MINIMUM_ATTEMPT_MORE_PACKING = sizeof(OCTREE_PACKET_INTERNAL_SECTION_SIZE) + 40;
|
||||
const int COMPRESS_PADDING = 15;
|
||||
const unsigned int MINIMUM_ATTEMPT_MORE_PACKING = sizeof(OCTREE_PACKET_INTERNAL_SECTION_SIZE) + 40;
|
||||
const unsigned int COMPRESS_PADDING = 15;
|
||||
const int REASONABLE_NUMBER_OF_PACKING_ATTEMPTS = 5;
|
||||
|
||||
const int PACKET_IS_COLOR_BIT = 0;
|
||||
|
@ -69,7 +69,7 @@ public:
|
|||
~OctreePacketData();
|
||||
|
||||
/// change compression and target size settings
|
||||
void changeSettings(bool enableCompression = false, int targetSize = MAX_OCTREE_PACKET_DATA_SIZE);
|
||||
void changeSettings(bool enableCompression = false, unsigned int targetSize = MAX_OCTREE_PACKET_DATA_SIZE);
|
||||
|
||||
/// reset completely, all data is discarded
|
||||
void reset();
|
||||
|
@ -168,7 +168,7 @@ public:
|
|||
bool isCompressed() const { return _enableCompression; }
|
||||
|
||||
/// returns the target uncompressed size
|
||||
int getTargetSize() const { return _targetSize; }
|
||||
unsigned int getTargetSize() const { return _targetSize; }
|
||||
|
||||
/// displays contents for debugging
|
||||
void debugContent();
|
||||
|
@ -186,7 +186,7 @@ private:
|
|||
/// append a single byte, might fail if byte would cause packet to be too large
|
||||
bool append(unsigned char byte);
|
||||
|
||||
int _targetSize;
|
||||
unsigned int _targetSize;
|
||||
bool _enableCompression;
|
||||
|
||||
unsigned char _uncompressed[MAX_OCTREE_UNCOMRESSED_PACKET_SIZE];
|
||||
|
|
|
@ -57,9 +57,9 @@ void OctreeRenderer::processDatagram(const QByteArray& dataByteArray, const Shar
|
|||
bool showTimingDetails = false; // Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);
|
||||
PerformanceWarning warn(showTimingDetails, "OctreeRenderer::processDatagram()",showTimingDetails);
|
||||
|
||||
int packetLength = dataByteArray.size();
|
||||
unsigned int packetLength = dataByteArray.size();
|
||||
PacketType command = packetTypeForPacket(dataByteArray);
|
||||
int numBytesPacketHeader = numBytesForPacketHeader(dataByteArray);
|
||||
unsigned int numBytesPacketHeader = numBytesForPacketHeader(dataByteArray);
|
||||
QUuid sourceUUID = uuidFromPacketHeader(dataByteArray);
|
||||
PacketType expectedType = getExpectedPacketType();
|
||||
|
||||
|
@ -86,11 +86,11 @@ void OctreeRenderer::processDatagram(const QByteArray& dataByteArray, const Shar
|
|||
int flightTime = arrivedAt - sentAt + clockSkew;
|
||||
|
||||
OCTREE_PACKET_INTERNAL_SECTION_SIZE sectionLength = 0;
|
||||
int dataBytes = packetLength - (numBytesPacketHeader + OCTREE_PACKET_EXTRA_HEADERS_SIZE);
|
||||
unsigned int dataBytes = packetLength - (numBytesPacketHeader + OCTREE_PACKET_EXTRA_HEADERS_SIZE);
|
||||
|
||||
if (extraDebugging) {
|
||||
qDebug("OctreeRenderer::processDatagram() ... Got Packet Section"
|
||||
" color:%s compressed:%s sequence: %u flight:%d usec size:%d data:%d",
|
||||
" color:%s compressed:%s sequence: %u flight:%d usec size:%u data:%u",
|
||||
debug::valueOf(packetIsColored), debug::valueOf(packetIsCompressed),
|
||||
sequence, flightTime, packetLength, dataBytes);
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ void OctreeRenderer::processDatagram(const QByteArray& dataByteArray, const Shar
|
|||
packetData.loadFinalizedContent(dataAt, sectionLength);
|
||||
if (extraDebugging) {
|
||||
qDebug("OctreeRenderer::processDatagram() ... Got Packet Section"
|
||||
" color:%s compressed:%s sequence: %u flight:%d usec size:%d data:%d"
|
||||
" color:%s compressed:%s sequence: %u flight:%d usec size:%u data:%u"
|
||||
" subsection:%d sectionLength:%d uncompressed:%d",
|
||||
debug::valueOf(packetIsColored), debug::valueOf(packetIsCompressed),
|
||||
sequence, flightTime, packetLength, dataBytes, subsection, sectionLength,
|
||||
|
|
|
@ -919,13 +919,13 @@ void OctreeSceneStats::trackIncomingOctreePacket(const QByteArray& packet,
|
|||
if (wantExtraDebugging) {
|
||||
qDebug() << "too many _missingSequenceNumbers:" << _missingSequenceNumbers.size();
|
||||
}
|
||||
foreach(unsigned int missingItem, _missingSequenceNumbers) {
|
||||
foreach(uint16_t missingItem, _missingSequenceNumbers) {
|
||||
if (wantExtraDebugging) {
|
||||
qDebug() << "checking item:" << missingItem << "is it in need of pruning?";
|
||||
qDebug() << "(_incomingLastSequence - MAX_MISSING_SEQUENCE_OLD_AGE):"
|
||||
<< (_incomingLastSequence - MAX_MISSING_SEQUENCE_OLD_AGE);
|
||||
}
|
||||
if (missingItem <= (unsigned int)std::max(0, (int)_incomingLastSequence - (int)MAX_MISSING_SEQUENCE_OLD_AGE)) {
|
||||
if (missingItem <= std::max(0, _incomingLastSequence - MAX_MISSING_SEQUENCE_OLD_AGE)) {
|
||||
if (wantExtraDebugging) {
|
||||
qDebug() << "pruning really old missing sequence:" << missingItem;
|
||||
}
|
||||
|
|
|
@ -269,7 +269,7 @@ private:
|
|||
unsigned int _incomingLate; /// out of order later than expected
|
||||
unsigned int _incomingReallyLate; /// out of order and later than MAX_MISSING_SEQUENCE_OLD_AGE late
|
||||
unsigned int _incomingPossibleDuplicate; /// out of order possibly a duplicate
|
||||
QSet<unsigned int> _missingSequenceNumbers;
|
||||
QSet<uint16_t> _missingSequenceNumbers;
|
||||
SimpleMovingAverage _incomingFlightTimeAverage;
|
||||
|
||||
// features related items
|
||||
|
|
|
@ -267,15 +267,36 @@ void ScriptEngine::run() {
|
|||
|
||||
if (_avatarAudioBuffer && _numAvatarAudioBufferSamples > 0) {
|
||||
// if have an avatar audio stream then send it out to our audio-mixer
|
||||
QByteArray audioPacket = byteArrayWithPopulatedHeader(PacketTypeMicrophoneAudioNoEcho);
|
||||
|
||||
bool silentFrame = true;
|
||||
|
||||
// check if the all of the _numAvatarAudioBufferSamples to be sent are silence
|
||||
for (int i = 0; i < _numAvatarAudioBufferSamples; ++i) {
|
||||
if (_avatarAudioBuffer[i] != 0) {
|
||||
silentFrame = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
QByteArray audioPacket = byteArrayWithPopulatedHeader(silentFrame
|
||||
? PacketTypeSilentAudioFrame
|
||||
: PacketTypeMicrophoneAudioNoEcho);
|
||||
QDataStream packetStream(&audioPacket, QIODevice::Append);
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
packetStream.writeRawData(reinterpret_cast<const char*>(&_avatarData->getPosition()), sizeof(glm::vec3));
|
||||
glm::quat headOrientation = _avatarData->getHeadOrientation();
|
||||
packetStream.writeRawData(reinterpret_cast<const char*>(&headOrientation), sizeof(glm::quat));
|
||||
packetStream.writeRawData(reinterpret_cast<const char*>(_avatarAudioBuffer),
|
||||
_numAvatarAudioBufferSamples * sizeof(int16_t));
|
||||
|
||||
if (silentFrame) {
|
||||
// write the number of silent samples so the audio-mixer can uphold timing
|
||||
int16_t numSilentSamples = _numAvatarAudioBufferSamples;
|
||||
packetStream.writeRawData(reinterpret_cast<const char*>(&numSilentSamples), sizeof(int16_t));
|
||||
} else {
|
||||
// write the raw audio data
|
||||
packetStream.writeRawData(reinterpret_cast<const char*>(_avatarAudioBuffer),
|
||||
_numAvatarAudioBufferSamples * sizeof(int16_t));
|
||||
}
|
||||
|
||||
nodeList->broadcastToNodes(audioPacket, NodeSet() << NodeType::AudioMixer);
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
#include "HifiSockAddr.h"
|
||||
|
||||
const QString DEFAULT_DOMAIN_HOSTNAME = "root.highfidelity.io";
|
||||
const QString DEFAULT_DOMAIN_HOSTNAME = "alpha.highfidelity.io";
|
||||
const unsigned short DEFAULT_DOMAIN_SERVER_PORT = 40102;
|
||||
|
||||
class DomainInfo : public QObject {
|
||||
|
|
|
@ -267,7 +267,9 @@ void NodeList::processNodeData(const HifiSockAddr& senderSockAddr, const QByteAr
|
|||
// the node decided not to do anything with this packet
|
||||
// if it comes from a known source we should keep that node alive
|
||||
SharedNodePointer matchingNode = sendingNodeForPacket(packet);
|
||||
matchingNode->setLastHeardMicrostamp(usecTimestampNow());
|
||||
if (matchingNode) {
|
||||
matchingNode->setLastHeardMicrostamp(usecTimestampNow());
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -348,7 +350,7 @@ void NodeList::addSetOfNodeTypesToNodeInterestSet(const NodeSet& setOfNodeTypes)
|
|||
|
||||
const uint32_t RFC_5389_MAGIC_COOKIE = 0x2112A442;
|
||||
const int NUM_BYTES_STUN_HEADER = 20;
|
||||
const int NUM_STUN_REQUESTS_BEFORE_FALLBACK = 5;
|
||||
const unsigned int NUM_STUN_REQUESTS_BEFORE_FALLBACK = 5;
|
||||
|
||||
void NodeList::sendSTUNRequest() {
|
||||
const char STUN_SERVER_HOSTNAME[] = "stun.highfidelity.io";
|
||||
|
|
|
@ -45,7 +45,7 @@ int packArithmeticallyCodedValue(int value, char* destination) {
|
|||
PacketVersion versionForPacketType(PacketType type) {
|
||||
switch (type) {
|
||||
case PacketTypeAvatarData:
|
||||
return 1;
|
||||
return 2;
|
||||
case PacketTypeParticleData:
|
||||
return 1;
|
||||
case PacketTypeDomainList:
|
||||
|
|
|
@ -32,7 +32,7 @@ enum PacketType {
|
|||
PacketTypeMicrophoneAudioNoEcho,
|
||||
PacketTypeMicrophoneAudioWithEcho,
|
||||
PacketTypeBulkAvatarData,
|
||||
PacketTypeTransmitterData, // usable
|
||||
PacketTypeSilentAudioFrame,
|
||||
PacketTypeEnvironmentData,
|
||||
PacketTypeDomainListRequest,
|
||||
PacketTypeRequestAssignment,
|
||||
|
|
|
@ -313,7 +313,7 @@ bool capsuleCapsule(const CapsuleShape* capsuleA, const CapsuleShape* capsuleB,
|
|||
|
||||
// Since there are only three comparisons to do we unroll the sort algorithm...
|
||||
// and use a fifth slot as temp during swap.
|
||||
if (points[4] > points[2]) {
|
||||
if (points[1] > points[2]) {
|
||||
points[4] = points[1];
|
||||
points[1] = points[2];
|
||||
points[2] = points[4];
|
||||
|
|
|
@ -631,8 +631,7 @@ void debug::setDeadBeef(void* memoryVoid, int size) {
|
|||
}
|
||||
|
||||
void debug::checkDeadBeef(void* memoryVoid, int size) {
|
||||
unsigned char* memoryAt = (unsigned char*)memoryVoid;
|
||||
assert(memcmp(memoryAt, DEADBEEF, std::min(size, DEADBEEF_SIZE)) != 0);
|
||||
assert(memcmp((unsigned char*)memoryVoid, DEADBEEF, std::min(size, DEADBEEF_SIZE)) != 0);
|
||||
}
|
||||
|
||||
// Safe version of glm::eulerAngles; uses the factorization method described in David Eberly's
|
||||
|
|
|
@ -178,13 +178,14 @@ void VoxelTreeElement::calculateAverageFromChildren() {
|
|||
bool VoxelTreeElement::collapseChildren() {
|
||||
// scan children, verify that they are ALL present and accounted for
|
||||
bool allChildrenMatch = true; // assume the best (ottimista)
|
||||
int red,green,blue;
|
||||
int red = 0;
|
||||
int green = 0;
|
||||
int blue = 0;
|
||||
for (int i = 0; i < NUMBER_OF_CHILDREN; i++) {
|
||||
VoxelTreeElement* childAt = getChildAtIndex(i);
|
||||
// if no child, child isn't a leaf, or child doesn't have a color
|
||||
if (!childAt || !childAt->isLeaf() || !childAt->isColored()) {
|
||||
allChildrenMatch=false;
|
||||
//qDebug("SADNESS child missing or not colored! i=%d\n",i);
|
||||
allChildrenMatch = false;
|
||||
break;
|
||||
} else {
|
||||
if (i==0) {
|
||||
|
|
Loading…
Reference in a new issue