merge upstream/master into andrew/ragdoll

This commit is contained in:
Andrew Meadows 2014-06-10 15:26:16 -07:00
commit e0ebc61b25
40 changed files with 1199 additions and 527 deletions

View file

@ -173,134 +173,151 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
}
}
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
const int16_t* nextOutputStart = bufferToAdd->getNextOutput();
const int16_t* bufferStart = bufferToAdd->getBuffer();
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
int16_t correctBufferSample[2], delayBufferSample[2];
int delayedChannelIndex = 0;
const int SINGLE_STEREO_OFFSET = 2;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
if (!bufferToAdd->isStereo()) {
// this is a mono buffer, which means it gets full attenuation and spatialization
// setup the int16_t variables for the two sample sets
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
const int16_t* bufferStart = bufferToAdd->getBuffer();
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
int16_t correctBufferSample[2], delayBufferSample[2];
int delayedChannelIndex = 0;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
_clientSamples[delayedChannelIndex],
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
delayBufferSample[0], delayBufferSample[1]);
const int SINGLE_STEREO_OFFSET = 2;
// perform the MMX add (with saturation) of two correct and delayed samples
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
// assign the results from the result of the mmx arithmetic
_clientSamples[s + goodChannelOffset] = shortResults[3];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
_clientSamples[delayedChannelIndex] = shortResults[1];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
}
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
// Basically we try to take the samples in batches of four, and then handle the remainder
// conditionally to get rid of the rest.
const int DOUBLE_STEREO_OFFSET = 4;
const int TRIPLE_STEREO_OFFSET = 6;
if (numSamplesDelay > 0) {
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
if (delayNextOutputStart < bufferStart) {
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// setup the int16_t variables for the two sample sets
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
_clientSamples[delayedChannelIndex],
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
delayBufferSample[0], delayBufferSample[1]);
// perform the MMX add (with saturation) of two correct and delayed samples
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
// assign the results from the result of the mmx arithmetic
_clientSamples[s + goodChannelOffset] = shortResults[3];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
_clientSamples[delayedChannelIndex] = shortResults[1];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
}
int i = 0;
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
// Basically we try to take the samples in batches of four, and then handle the remainder
// conditionally to get rid of the rest.
while (i + 3 < numSamplesDelay) {
// handle the first cases where we can MMX add four samples at once
const int DOUBLE_STEREO_OFFSET = 4;
const int TRIPLE_STEREO_OFFSET = 6;
if (numSamplesDelay > 0) {
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
if (delayNextOutputStart < bufferStart) {
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
}
int i = 0;
while (i + 3 < numSamplesDelay) {
// handle the first cases where we can MMX add four samples at once
int parentIndex = i * 2;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
// push the index
i += 4;
}
int parentIndex = i * 2;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
// push the index
i += 4;
if (i + 2 < numSamplesDelay) {
// MMX add only three delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
} else if (i + 1 < numSamplesDelay) {
// MMX add two delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
} else if (i < numSamplesDelay) {
// MMX add a single delayed sample
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
}
}
int parentIndex = i * 2;
if (i + 2 < numSamplesDelay) {
// MMX add only three delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
} else if (i + 1 < numSamplesDelay) {
// MMX add two delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
} else if (i < numSamplesDelay) {
// MMX add a single delayed sample
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
} else {
// stereo buffer - do attenuation but no sample delay for spatialization
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// use MMX to clamp four additions at a time
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int) (nextOutputStart[s] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 1] = glm::clamp(_clientSamples[s + 1] + (int) (nextOutputStart[s + 1] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 2] = glm::clamp(_clientSamples[s + 2] + (int) (nextOutputStart[s + 2] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 3] = glm::clamp(_clientSamples[s + 3] + (int) (nextOutputStart[s + 3] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
}
}
}

View file

@ -50,10 +50,22 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
// read the first byte after the header to see if this is a stereo or mono buffer
quint8 channelFlag = packet.at(numBytesForPacketHeader(packet));
bool isStereo = channelFlag == 1;
if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) {
// there's a mismatch in the buffer channels for the incoming and current buffer
// so delete our current buffer and create a new one
_ringBuffers.removeOne(avatarRingBuffer);
avatarRingBuffer->deleteLater();
avatarRingBuffer = NULL;
}
if (!avatarRingBuffer) {
// we don't have an AvatarAudioRingBuffer yet, so add it
avatarRingBuffer = new AvatarAudioRingBuffer();
avatarRingBuffer = new AvatarAudioRingBuffer(isStereo);
_ringBuffers.push_back(avatarRingBuffer);
}
@ -106,7 +118,8 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
if (audioBuffer->willBeAddedToMix()) {
audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
audioBuffer->shiftReadPosition(audioBuffer->isStereo()
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
audioBuffer->setWillBeAddedToMix(false);
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector

View file

@ -24,14 +24,14 @@ public:
AudioMixerClientData();
~AudioMixerClientData();
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
const QList<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
int parseData(const QByteArray& packet);
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
void pushBuffersAfterFrameSend();
private:
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
QList<PositionalAudioRingBuffer*> _ringBuffers;
};
#endif // hifi_AudioMixerClientData_h

View file

@ -13,8 +13,8 @@
#include "AvatarAudioRingBuffer.h"
AvatarAudioRingBuffer::AvatarAudioRingBuffer() :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone) {
AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo) :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo) {
}

View file

@ -18,7 +18,7 @@
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
public:
AvatarAudioRingBuffer();
AvatarAudioRingBuffer(bool isStereo = false);
int parseData(const QByteArray& packet);
private:

View file

@ -106,7 +106,7 @@ int ModelServer::sendSpecialPacket(OctreeQueryNode* queryNode, const SharedNodeP
//qDebug() << "sending PacketType_MODEL_ERASE packetLength:" << packetLength;
NodeList::getInstance()->writeDatagram((char*) outputBuffer, packetLength, SharedNodePointer(node));
queryNode->incrementSequenceNumber();
queryNode->packetSent(outputBuffer, packetLength);
}
nodeData->setLastDeletedModelsSentAt(deletePacketSentAt);

View file

@ -41,7 +41,8 @@ OctreeQueryNode::OctreeQueryNode() :
_sequenceNumber(0),
_lastRootTimestamp(0),
_myPacketType(PacketTypeUnknown),
_isShuttingDown(false)
_isShuttingDown(false),
_sentPacketHistory(1000)
{
}
@ -362,3 +363,45 @@ void OctreeQueryNode::dumpOutOfView() {
}
}
}
void OctreeQueryNode::octreePacketSent() {
packetSent(_octreePacket, getPacketLength());
}
void OctreeQueryNode::packetSent(unsigned char* packet, int packetLength) {
packetSent(QByteArray((char*)packet, packetLength));
}
void OctreeQueryNode::packetSent(const QByteArray& packet) {
_sentPacketHistory.packetSent(_sequenceNumber, packet);
_sequenceNumber++;
}
bool OctreeQueryNode::hasNextNackedPacket() const {
return !_nackedSequenceNumbers.isEmpty();
}
const QByteArray* OctreeQueryNode::getNextNackedPacket() {
if (!_nackedSequenceNumbers.isEmpty()) {
// could return null if packet is not in the history
return _sentPacketHistory.getPacket(_nackedSequenceNumbers.dequeue());
}
return NULL;
}
void OctreeQueryNode::parseNackPacket(QByteArray& packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
const unsigned char* dataAt = reinterpret_cast<const unsigned char*>(packet.data()) + numBytesPacketHeader;
// read number of sequence numbers
uint16_t numSequenceNumbers = (*(uint16_t*)dataAt);
dataAt += sizeof(uint16_t);
// read sequence numbers
for (int i = 0; i < numSequenceNumbers; i++) {
OCTREE_PACKET_SEQUENCE sequenceNumber = (*(OCTREE_PACKET_SEQUENCE*)dataAt);
_nackedSequenceNumbers.enqueue(sequenceNumber);
dataAt += sizeof(OCTREE_PACKET_SEQUENCE);
}
}

View file

@ -23,6 +23,8 @@
#include <OctreeQuery.h>
#include <OctreeSceneStats.h>
#include <ThreadedAssignment.h> // for SharedAssignmentPointer
#include "SentPacketHistory.h"
#include <qqueue.h>
class OctreeSendThread;
@ -100,10 +102,16 @@ public:
void forceNodeShutdown();
bool isShuttingDown() const { return _isShuttingDown; }
void incrementSequenceNumber() { _sequenceNumber++; }
void octreePacketSent();
void packetSent(unsigned char* packet, int packetLength);
void packetSent(const QByteArray& packet);
OCTREE_PACKET_SEQUENCE getSequenceNumber() const { return _sequenceNumber; }
void parseNackPacket(QByteArray& packet);
bool hasNextNackedPacket() const;
const QByteArray* getNextNackedPacket();
private slots:
void sendThreadFinished();
@ -146,6 +154,9 @@ private:
PacketType _myPacketType;
bool _isShuttingDown;
SentPacketHistory _sentPacketHistory;
QQueue<OCTREE_PACKET_SEQUENCE> _nackedSequenceNumbers;
};
#endif // hifi_OctreeQueryNode_h

View file

@ -85,6 +85,7 @@ bool OctreeSendThread::process() {
if (nodeData && !nodeData->isShuttingDown()) {
bool viewFrustumChanged = nodeData->updateCurrentViewFrustum();
packetDistributor(nodeData, viewFrustumChanged);
resendNackedPackets(nodeData);
}
}
}
@ -214,8 +215,7 @@ int OctreeSendThread::handlePacketSend(OctreeQueryNode* nodeData, int& trueBytes
packetsSent++;
OctreeServer::didCallWriteDatagram(this);
NodeList::getInstance()->writeDatagram((char*) nodeData->getPacket(), nodeData->getPacketLength(), _node);
NodeList::getInstance()->writeDatagram((char*)nodeData->getPacket(), nodeData->getPacketLength(), _node);
packetSent = true;
thisWastedBytes = MAX_PACKET_SIZE - nodeData->getPacketLength();
@ -244,7 +244,7 @@ int OctreeSendThread::handlePacketSend(OctreeQueryNode* nodeData, int& trueBytes
if (nodeData->isPacketWaiting() && !nodeData->isShuttingDown()) {
// just send the voxel packet
OctreeServer::didCallWriteDatagram(this);
NodeList::getInstance()->writeDatagram((char*) nodeData->getPacket(), nodeData->getPacketLength(), _node);
NodeList::getInstance()->writeDatagram((char*)nodeData->getPacket(), nodeData->getPacketLength(), _node);
packetSent = true;
int thisWastedBytes = MAX_PACKET_SIZE - nodeData->getPacketLength();
@ -274,13 +274,33 @@ int OctreeSendThread::handlePacketSend(OctreeQueryNode* nodeData, int& trueBytes
trueBytesSent += nodeData->getPacketLength();
truePacketsSent++;
packetsSent++;
nodeData->incrementSequenceNumber();
nodeData->octreePacketSent();
nodeData->resetOctreePacket();
}
return packetsSent;
}
int OctreeSendThread::resendNackedPackets(OctreeQueryNode* nodeData) {
const int MAX_PACKETS_RESEND = 10;
int packetsSent = 0;
const QByteArray* packet;
while (nodeData->hasNextNackedPacket() && packetsSent < MAX_PACKETS_RESEND) {
packet = nodeData->getNextNackedPacket();
if (packet) {
NodeList::getInstance()->writeDatagram(*packet, _node);
packetsSent++;
_totalBytes += packet->size();
_totalPackets++;
_totalWastedBytes += MAX_PACKET_SIZE - packet->size();
}
}
return packetsSent;
}
/// Version of voxel distributor that sends the deepest LOD level at once
int OctreeSendThread::packetDistributor(OctreeQueryNode* nodeData, bool viewFrustumChanged) {

View file

@ -55,6 +55,9 @@ private:
int _nodeMissingCount;
bool _isShuttingDown;
int resendNackedPackets(OctreeQueryNode* nodeData);
};
#endif // hifi_OctreeSendThread_h

View file

@ -832,14 +832,13 @@ void OctreeServer::readPendingDatagrams() {
PacketType packetType = packetTypeForPacket(receivedPacket);
SharedNodePointer matchingNode = nodeList->sendingNodeForPacket(receivedPacket);
if (packetType == getMyQueryMessageType()) {
// If we got a query packet, then we're talking to an agent, and we
// need to make sure we have it in our nodeList.
if (matchingNode) {
nodeList->updateNodeWithDataFromPacket(matchingNode, receivedPacket);
OctreeQueryNode* nodeData = (OctreeQueryNode*) matchingNode->getLinkedData();
OctreeQueryNode* nodeData = (OctreeQueryNode*)matchingNode->getLinkedData();
if (nodeData && !nodeData->isOctreeSendThreadInitalized()) {
// NOTE: this is an important aspect of the proper ref counting. The send threads/node data need to
// know that the OctreeServer/Assignment will not get deleted on it while it's still active. The
// solution is to get the shared pointer for the current assignment. We need to make sure this is the
@ -848,6 +847,16 @@ void OctreeServer::readPendingDatagrams() {
nodeData->initializeOctreeSendThread(sharedAssignment, matchingNode);
}
}
} else if (packetType == PacketTypeOctreeDataNack) {
// If we got a nack packet, then we're talking to an agent, and we
// need to make sure we have it in our nodeList.
if (matchingNode) {
nodeList->updateNodeWithDataFromPacket(matchingNode, receivedPacket);
OctreeQueryNode* nodeData = (OctreeQueryNode*)matchingNode->getLinkedData();
if (nodeData) {
nodeData->parseNackPacket(receivedPacket);
}
}
} else if (packetType == PacketTypeJurisdictionRequest) {
_jurisdictionSender->queueReceivedPacket(matchingNode, receivedPacket);
} else if (_octreeInboundPacketProcessor && getOctree()->handlesEditPacketType(packetType)) {

View file

@ -0,0 +1,44 @@
//
// SentPacketHistory.cpp
// assignement-client/src/octree
//
// Created by Yixin Wang on 6/5/2014
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "SentPacketHistory.h"
SentPacketHistory::SentPacketHistory(int size)
: _sentPackets(size),
_newestPacketAt(0),
_numExistingPackets(0),
_newestSequenceNumber(0)
{
}
void SentPacketHistory::packetSent(OCTREE_PACKET_SEQUENCE sequenceNumber, const QByteArray& packet) {
_newestSequenceNumber = sequenceNumber;
// increment _newestPacketAt cyclically, insert new packet there.
// this will overwrite the oldest packet in the buffer
_newestPacketAt = (_newestPacketAt == _sentPackets.size() - 1) ? 0 : _newestPacketAt + 1;
_sentPackets[_newestPacketAt] = packet;
if (_numExistingPackets < _sentPackets.size()) {
_numExistingPackets++;
}
}
const QByteArray* SentPacketHistory::getPacket(OCTREE_PACKET_SEQUENCE sequenceNumber) const {
OCTREE_PACKET_SEQUENCE seqDiff = _newestSequenceNumber - sequenceNumber;
if (!(seqDiff >= 0 && seqDiff < _numExistingPackets)) {
return NULL;
}
int packetAt = _newestPacketAt - seqDiff;
if (packetAt < 0) {
packetAt += _sentPackets.size();
}
return &_sentPackets.at(packetAt);
}

View file

@ -0,0 +1,35 @@
//
// SentPacketHistory.h
// assignement-client/src/octree
//
// Created by Yixin Wang on 6/5/2014
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_SentPacketHistory_h
#define hifi_SentPacketHistory_h
#include <qbytearray.h>
#include <qvector.h>
#include "OctreePacketData.h"
class SentPacketHistory {
public:
SentPacketHistory(int size);
void packetSent(OCTREE_PACKET_SEQUENCE sequenceNumber, const QByteArray& packet);
const QByteArray* getPacket(OCTREE_PACKET_SEQUENCE sequenceNumber) const;
private:
QVector<QByteArray> _sentPackets; // circular buffer
int _newestPacketAt;
int _numExistingPackets;
OCTREE_PACKET_SEQUENCE _newestSequenceNumber;
};
#endif

View file

@ -106,7 +106,7 @@ int ParticleServer::sendSpecialPacket(OctreeQueryNode* queryNode, const SharedNo
//qDebug() << "sending PacketType_PARTICLE_ERASE packetLength:" << packetLength;
NodeList::getInstance()->writeDatagram((char*) outputBuffer, packetLength, SharedNodePointer(node));
queryNode->incrementSequenceNumber();
queryNode->packetSent(outputBuffer, packetLength);
}
nodeData->setLastDeletedParticlesSentAt(deletePacketSentAt);

View file

@ -75,7 +75,7 @@ int VoxelServer::sendSpecialPacket(OctreeQueryNode* queryNode, const SharedNodeP
}
NodeList::getInstance()->writeDatagram((char*) _tempOutputBuffer, envPacketLength, SharedNodePointer(node));
queryNode->incrementSequenceNumber();
queryNode->packetSent(_tempOutputBuffer, envPacketLength);
return envPacketLength;
}

View file

@ -18,7 +18,8 @@ var toolWidth = 50;
var LASER_WIDTH = 4;
var LASER_COLOR = { red: 255, green: 0, blue: 0 };
var LASER_LENGTH_FACTOR = 5;
var LASER_LENGTH_FACTOR = 500
;
var LEFT = 0;
var RIGHT = 1;
@ -42,6 +43,8 @@ var toolBar;
var jointList = MyAvatar.getJointNames();
var mode = 0;
function isLocked(properties) {
// special case to lock the ground plane model in hq.
if (location.hostname == "hq.highfidelity.io" &&
@ -57,6 +60,7 @@ function controller(wichSide) {
this.palm = 2 * wichSide;
this.tip = 2 * wichSide + 1;
this.trigger = wichSide;
this.bumper = 6 * wichSide + 5;
this.oldPalmPosition = Controller.getSpatialControlPosition(this.palm);
this.palmPosition = Controller.getSpatialControlPosition(this.palm);
@ -77,6 +81,7 @@ function controller(wichSide) {
this.rotation = this.oldRotation;
this.triggerValue = Controller.getTriggerValue(this.trigger);
this.bumperValue = Controller.isButtonPressed(this.bumper);
this.pressed = false; // is trigger pressed
this.pressing = false; // is trigger being pressed (is pressed now but wasn't previously)
@ -88,6 +93,11 @@ function controller(wichSide) {
this.oldModelPosition;
this.oldModelRadius;
this.positionAtGrab;
this.rotationAtGrab;
this.modelPositionAtGrab;
this.modelRotationAtGrab;
this.jointsIntersectingFromStart = [];
this.laser = Overlays.addOverlay("line3d", {
@ -145,6 +155,11 @@ function controller(wichSide) {
this.oldModelRotation = properties.modelRotation;
this.oldModelRadius = properties.radius;
this.positionAtGrab = this.palmPosition;
this.rotationAtGrab = this.rotation;
this.modelPositionAtGrab = properties.position;
this.modelRotationAtGrab = properties.modelRotation;
this.jointsIntersectingFromStart = [];
for (var i = 0; i < jointList.length; i++) {
var distance = Vec3.distance(MyAvatar.getJointPosition(jointList[i]), this.oldModelPosition);
@ -152,6 +167,7 @@ function controller(wichSide) {
this.jointsIntersectingFromStart.push(i);
}
}
this.showLaser(false);
}
}
@ -169,12 +185,16 @@ function controller(wichSide) {
}
}
print("closestJoint: " + jointList[closestJointIndex]);
print("closestJointDistance (attach max distance): " + closestJointDistance + " (" + this.oldModelRadius + ")");
if (closestJointIndex != -1) {
print("closestJoint: " + jointList[closestJointIndex]);
print("closestJointDistance (attach max distance): " + closestJointDistance + " (" + this.oldModelRadius + ")");
}
if (closestJointDistance < this.oldModelRadius) {
if (this.jointsIntersectingFromStart.indexOf(closestJointIndex) != -1) {
if (this.jointsIntersectingFromStart.indexOf(closestJointIndex) != -1 ||
(leftController.grabbing && rightController.grabbing &&
leftController.modelID.id == rightController.modelID.id)) {
// Do nothing
} else {
print("Attaching to " + jointList[closestJointIndex]);
@ -188,6 +208,7 @@ function controller(wichSide) {
MyAvatar.attach(this.modelURL, jointList[closestJointIndex],
attachmentOffset, attachmentRotation, 2.0 * this.oldModelRadius,
true, false);
Models.deleteModel(this.modelID);
}
}
@ -196,6 +217,7 @@ function controller(wichSide) {
this.grabbing = false;
this.modelID.isKnownID = false;
this.jointsIntersectingFromStart = [];
this.showLaser(true);
}
this.checkTrigger = function () {
@ -246,6 +268,7 @@ function controller(wichSide) {
return { valid: false };
}
this.glowedIntersectingModel = { isKnownID: false };
this.moveLaser = function () {
// the overlays here are anchored to the avatar, which means they are specified in the avatar's local frame
@ -258,46 +281,99 @@ function controller(wichSide) {
Overlays.editOverlay(this.laser, {
position: startPosition,
end: endPosition,
visible: true
end: endPosition
});
Overlays.editOverlay(this.ball, {
position: endPosition,
visible: true
position: endPosition
});
Overlays.editOverlay(this.leftRight, {
position: Vec3.sum(endPosition, Vec3.multiply(this.right, 2 * this.guideScale)),
end: Vec3.sum(endPosition, Vec3.multiply(this.right, -2 * this.guideScale)),
visible: true
end: Vec3.sum(endPosition, Vec3.multiply(this.right, -2 * this.guideScale))
});
Overlays.editOverlay(this.topDown, {position: Vec3.sum(endPosition, Vec3.multiply(this.up, 2 * this.guideScale)),
end: Vec3.sum(endPosition, Vec3.multiply(this.up, -2 * this.guideScale)),
visible: true
end: Vec3.sum(endPosition, Vec3.multiply(this.up, -2 * this.guideScale))
});
this.showLaser(!this.grabbing || mode == 0);
if (this.glowedIntersectingModel.isKnownID) {
Models.editModel(this.glowedIntersectingModel, { glowLevel: 0.0 });
this.glowedIntersectingModel.isKnownID = false;
}
if (!this.grabbing) {
var intersection = Models.findRayIntersection({
origin: this.palmPosition,
direction: this.front
});
if (intersection.accurate && intersection.modelID.isKnownID) {
this.glowedIntersectingModel = intersection.modelID;
Models.editModel(this.glowedIntersectingModel, { glowLevel: 0.25 });
}
}
}
this.hideLaser = function() {
Overlays.editOverlay(this.laser, { visible: false });
Overlays.editOverlay(this.ball, { visible: false });
Overlays.editOverlay(this.leftRight, { visible: false });
Overlays.editOverlay(this.topDown, { visible: false });
this.showLaser = function(show) {
Overlays.editOverlay(this.laser, { visible: show });
Overlays.editOverlay(this.ball, { visible: show });
Overlays.editOverlay(this.leftRight, { visible: show });
Overlays.editOverlay(this.topDown, { visible: show });
}
this.moveModel = function () {
if (this.grabbing) {
var newPosition = Vec3.sum(this.palmPosition,
Vec3.multiply(this.front, this.x));
newPosition = Vec3.sum(newPosition,
Vec3.multiply(this.up, this.y));
newPosition = Vec3.sum(newPosition,
Vec3.multiply(this.right, this.z));
if (!this.modelID.isKnownID) {
print("Unknown grabbed ID " + this.modelID.id + ", isKnown: " + this.modelID.isKnownID);
this.modelID = Models.findRayIntersection({
origin: this.palmPosition,
direction: this.front
}).modelID;
print("Identified ID " + this.modelID.id + ", isKnown: " + this.modelID.isKnownID);
}
var newPosition;
var newRotation;
var newRotation = Quat.multiply(this.rotation,
Quat.inverse(this.oldRotation));
newRotation = Quat.multiply(newRotation,
this.oldModelRotation);
switch (mode) {
case 0:
newPosition = Vec3.sum(this.palmPosition,
Vec3.multiply(this.front, this.x));
newPosition = Vec3.sum(newPosition,
Vec3.multiply(this.up, this.y));
newPosition = Vec3.sum(newPosition,
Vec3.multiply(this.right, this.z));
newRotation = Quat.multiply(this.rotation,
Quat.inverse(this.oldRotation));
newRotation = Quat.multiply(newRotation,
this.oldModelRotation);
break;
case 1:
var forward = Vec3.multiplyQbyV(MyAvatar.orientation, { x: 0, y: 0, z: -1 });
var d = Vec3.dot(forward, MyAvatar.position);
var factor1 = Vec3.dot(forward, this.positionAtGrab) - d;
var factor2 = Vec3.dot(forward, this.modelPositionAtGrab) - d;
var vector = Vec3.subtract(this.palmPosition, this.positionAtGrab);
if (factor2 < 0) {
factor2 = 0;
}
if (factor1 <= 0) {
factor1 = 1;
factor2 = 1;
}
newPosition = Vec3.sum(this.modelPositionAtGrab,
Vec3.multiply(vector,
factor2 / factor1));
newRotation = Quat.multiply(this.rotation,
Quat.inverse(this.rotationAtGrab));
newRotation = Quat.multiply(newRotation,
this.modelRotationAtGrab);
break;
}
Models.editModel(this.modelID, {
position: newPosition,
@ -341,6 +417,21 @@ function controller(wichSide) {
this.triggerValue = Controller.getTriggerValue(this.trigger);
var bumperValue = Controller.isButtonPressed(this.bumper);
if (bumperValue && !this.bumperValue) {
if (mode == 0) {
mode = 1;
Overlays.editOverlay(leftController.laser, { color: { red: 0, green: 0, blue: 255 } });
Overlays.editOverlay(rightController.laser, { color: { red: 0, green: 0, blue: 255 } });
} else {
mode = 0;
Overlays.editOverlay(leftController.laser, { color: { red: 255, green: 0, blue: 0 } });
Overlays.editOverlay(rightController.laser, { color: { red: 255, green: 0, blue: 0 } });
}
}
this.bumperValue = bumperValue;
this.checkTrigger();
this.moveLaser();
@ -356,8 +447,12 @@ function controller(wichSide) {
var attachmentIndex = -1;
var attachmentX = LASER_LENGTH_FACTOR;
var newModel;
var newProperties;
for (var i = 0; i < attachments.length; ++i) {
var position = Vec3.sum(MyAvatar.getJointPosition(attachments[i].jointName), attachments[i].translation);
var position = Vec3.sum(MyAvatar.getJointPosition(attachments[i].jointName),
Vec3.multiplyQbyV(MyAvatar.getJointCombinedRotation(attachments[i].jointName), attachments[i].translation));
var scale = attachments[i].scale;
var A = this.palmPosition;
@ -375,53 +470,56 @@ function controller(wichSide) {
}
if (attachmentIndex != -1) {
print("Detaching: " + attachments[attachmentIndex].modelURL);
MyAvatar.detachOne(attachments[attachmentIndex].modelURL, attachments[attachmentIndex].jointName);
Models.addModel({
position: Vec3.sum(MyAvatar.getJointPosition(attachments[attachmentIndex].jointName),
attachments[attachmentIndex].translation),
modelRotation: Quat.multiply(MyAvatar.getJointCombinedRotation(attachments[attachmentIndex].jointName),
attachments[attachmentIndex].rotation),
radius: attachments[attachmentIndex].scale / 2.0,
modelURL: attachments[attachmentIndex].modelURL
});
}
// There is none so ...
// Checking model tree
Vec3.print("Looking at: ", this.palmPosition);
var pickRay = { origin: this.palmPosition,
direction: Vec3.normalize(Vec3.subtract(this.tipPosition, this.palmPosition)) };
var foundIntersection = Models.findRayIntersection(pickRay);
if(!foundIntersection.accurate) {
return;
}
var foundModel = foundIntersection.modelID;
if (!foundModel.isKnownID) {
var identify = Models.identifyModel(foundModel);
if (!identify.isKnownID) {
print("Unknown ID " + identify.id + " (update loop " + foundModel.id + ")");
return;
}
foundModel = identify;
}
var properties = Models.getModelProperties(foundModel);
print("foundModel.modelURL=" + properties.modelURL);
if (isLocked(properties)) {
print("Model locked " + properties.id);
newProperties = {
position: Vec3.sum(MyAvatar.getJointPosition(attachments[attachmentIndex].jointName),
Vec3.multiplyQbyV(MyAvatar.getJointCombinedRotation(attachments[attachmentIndex].jointName), attachments[attachmentIndex].translation)),
modelRotation: Quat.multiply(MyAvatar.getJointCombinedRotation(attachments[attachmentIndex].jointName),
attachments[attachmentIndex].rotation),
radius: attachments[attachmentIndex].scale / 2.0,
modelURL: attachments[attachmentIndex].modelURL
};
newModel = Models.addModel(newProperties);
} else {
print("Checking properties: " + properties.id + " " + properties.isKnownID);
var check = this.checkModel(properties);
if (check.valid) {
this.grab(foundModel, properties);
this.x = check.x;
this.y = check.y;
this.z = check.z;
// There is none so ...
// Checking model tree
Vec3.print("Looking at: ", this.palmPosition);
var pickRay = { origin: this.palmPosition,
direction: Vec3.normalize(Vec3.subtract(this.tipPosition, this.palmPosition)) };
var foundIntersection = Models.findRayIntersection(pickRay);
if(!foundIntersection.accurate) {
print("No accurate intersection");
return;
}
newModel = foundIntersection.modelID;
if (!newModel.isKnownID) {
var identify = Models.identifyModel(newModel);
if (!identify.isKnownID) {
print("Unknown ID " + identify.id + " (update loop " + newModel.id + ")");
return;
}
newModel = identify;
}
newProperties = Models.getModelProperties(newModel);
}
print("foundModel.modelURL=" + newProperties.modelURL);
if (isLocked(newProperties)) {
print("Model locked " + newProperties.id);
} else {
this.grab(newModel, newProperties);
var check = this.checkModel(newProperties);
this.x = check.x;
this.y = check.y;
this.z = check.z;
return;
}
}
}
@ -439,38 +537,74 @@ var rightController = new controller(RIGHT);
function moveModels() {
if (leftController.grabbing && rightController.grabbing && rightController.modelID.id == leftController.modelID.id) {
//print("Both controllers");
var oldLeftPoint = Vec3.sum(leftController.oldPalmPosition, Vec3.multiply(leftController.oldFront, leftController.x));
var oldRightPoint = Vec3.sum(rightController.oldPalmPosition, Vec3.multiply(rightController.oldFront, rightController.x));
var oldMiddle = Vec3.multiply(Vec3.sum(oldLeftPoint, oldRightPoint), 0.5);
var oldLength = Vec3.length(Vec3.subtract(oldLeftPoint, oldRightPoint));
var newPosition = leftController.oldModelPosition;
var rotation = leftController.oldModelRotation;
var ratio = 1;
var leftPoint = Vec3.sum(leftController.palmPosition, Vec3.multiply(leftController.front, leftController.x));
var rightPoint = Vec3.sum(rightController.palmPosition, Vec3.multiply(rightController.front, rightController.x));
var middle = Vec3.multiply(Vec3.sum(leftPoint, rightPoint), 0.5);
var length = Vec3.length(Vec3.subtract(leftPoint, rightPoint));
var ratio = length / oldLength;
var newPosition = Vec3.sum(middle,
Vec3.multiply(Vec3.subtract(leftController.oldModelPosition, oldMiddle), ratio));
//Vec3.print("Ratio : " + ratio + " New position: ", newPosition);
var rotation = Quat.multiply(leftController.rotation,
Quat.inverse(leftController.oldRotation));
rotation = Quat.multiply(rotation, leftController.oldModelRotation);
switch (mode) {
case 0:
var oldLeftPoint = Vec3.sum(leftController.oldPalmPosition, Vec3.multiply(leftController.oldFront, leftController.x));
var oldRightPoint = Vec3.sum(rightController.oldPalmPosition, Vec3.multiply(rightController.oldFront, rightController.x));
var oldMiddle = Vec3.multiply(Vec3.sum(oldLeftPoint, oldRightPoint), 0.5);
var oldLength = Vec3.length(Vec3.subtract(oldLeftPoint, oldRightPoint));
var leftPoint = Vec3.sum(leftController.palmPosition, Vec3.multiply(leftController.front, leftController.x));
var rightPoint = Vec3.sum(rightController.palmPosition, Vec3.multiply(rightController.front, rightController.x));
var middle = Vec3.multiply(Vec3.sum(leftPoint, rightPoint), 0.5);
var length = Vec3.length(Vec3.subtract(leftPoint, rightPoint));
ratio = length / oldLength;
newPosition = Vec3.sum(middle,
Vec3.multiply(Vec3.subtract(leftController.oldModelPosition, oldMiddle), ratio));
break;
case 1:
var u = Vec3.normalize(Vec3.subtract(rightController.oldPalmPosition, leftController.oldPalmPosition));
var v = Vec3.normalize(Vec3.subtract(rightController.palmPosition, leftController.palmPosition));
var cos_theta = Vec3.dot(u, v);
if (cos_theta > 1) {
cos_theta = 1;
}
var angle = Math.acos(cos_theta) / Math.PI * 180;
if (angle < 0.1) {
return;
}
var w = Vec3.normalize(Vec3.cross(u, v));
rotation = Quat.multiply(Quat.angleAxis(angle, w), leftController.oldModelRotation);
leftController.positionAtGrab = leftController.palmPosition;
leftController.rotationAtGrab = leftController.rotation;
leftController.modelPositionAtGrab = leftController.oldModelPosition;
leftController.modelRotationAtGrab = rotation;
rightController.positionAtGrab = rightController.palmPosition;
rightController.rotationAtGrab = rightController.rotation;
rightController.modelPositionAtGrab = rightController.oldModelPosition;
rightController.modelRotationAtGrab = rotation;
break;
}
Models.editModel(leftController.modelID, {
position: newPosition,
//modelRotation: rotation,
modelRotation: rotation,
radius: leftController.oldModelRadius * ratio
});
leftController.oldModelPosition = newPosition;
leftController.oldModelRotation = rotation;
leftController.oldModelRadius *= ratio;
rightController.oldModelPosition = newPosition;
rightController.oldModelRotation = rotation;
rightController.oldModelRadius *= ratio;
return;
}
@ -498,8 +632,8 @@ function checkController(deltaTime) {
if (hydraConnected) {
hydraConnected = false;
leftController.hideLaser();
rightController.hideLaser();
leftController.showLaser(false);
rightController.showLaser(false);
}
}
@ -510,7 +644,7 @@ function initToolBar() {
toolBar = new ToolBar(0, 0, ToolBar.VERTICAL);
// New Model
newModel = toolBar.addTool({
imageURL: toolIconUrl + "voxel-tool.svg",
imageURL: toolIconUrl + "add-model-tool.svg",
subImage: { x: 0, y: Tool.IMAGE_WIDTH, width: Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT },
width: toolWidth, height: toolHeight,
visible: true,

View file

@ -167,7 +167,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_applicationOverlay(),
_runningScriptsWidget(new RunningScriptsWidget(_window)),
_runningScriptsWidgetWasVisible(false),
_trayIcon(new QSystemTrayIcon(_window))
_trayIcon(new QSystemTrayIcon(_window)),
_lastNackTime(usecTimestampNow())
{
// read the ApplicationInfo.ini file for Name/Version/Domain information
QSettings applicationInfo(Application::resourcesPath() + "info/ApplicationInfo.ini", QSettings::IniFormat);
@ -2093,6 +2094,82 @@ void Application::updateMyAvatar(float deltaTime) {
_lastQueriedViewFrustum = _viewFrustum;
}
}
// sent a nack packet containing missing sequence numbers of received packets
{
quint64 now = usecTimestampNow();
quint64 sinceLastNack = now - _lastNackTime;
const quint64 TOO_LONG_SINCE_LAST_NACK = 1 * USECS_PER_SECOND;
if (sinceLastNack > TOO_LONG_SINCE_LAST_NACK) {
_lastNackTime = now;
sendNack();
}
}
}
void Application::sendNack() {
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableNackPackets)) {
return;
}
char packet[MAX_PACKET_SIZE];
NodeList* nodeList = NodeList::getInstance();
// iterates thru all nodes in NodeList
foreach(const SharedNodePointer& node, NodeList::getInstance()->getNodeHash()) {
if (node->getActiveSocket() &&
( node->getType() == NodeType::VoxelServer
|| node->getType() == NodeType::ParticleServer
|| node->getType() == NodeType::ModelServer)
) {
QUuid nodeUUID = node->getUUID();
_octreeSceneStatsLock.lockForRead();
// retreive octree scene stats of this node
if (_octreeServerSceneStats.find(nodeUUID) == _octreeServerSceneStats.end()) {
_octreeSceneStatsLock.unlock();
continue;
}
OctreeSceneStats& stats = _octreeServerSceneStats[nodeUUID];
// check if there are any sequence numbers that need to be nacked
int numSequenceNumbersAvailable = stats.getNumSequenceNumbersToNack();
if (numSequenceNumbersAvailable == 0) {
_octreeSceneStatsLock.unlock();
continue;
}
char* dataAt = packet;
int bytesRemaining = MAX_PACKET_SIZE;
// pack header
int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeOctreeDataNack);
dataAt += numBytesPacketHeader;
bytesRemaining -= numBytesPacketHeader;
int numSequenceNumbersRoomFor = (bytesRemaining - sizeof(uint16_t)) / sizeof(OCTREE_PACKET_SEQUENCE);
// calculate and pack the number of sequence numbers
uint16_t numSequenceNumbers = min(numSequenceNumbersAvailable, numSequenceNumbersRoomFor);
uint16_t* numSequenceNumbersAt = (uint16_t*)dataAt;
*numSequenceNumbersAt = numSequenceNumbers;
dataAt += sizeof(uint16_t);
// pack sequence numbers
for (int i = 0; i < numSequenceNumbers; i++) {
OCTREE_PACKET_SEQUENCE* sequenceNumberAt = (OCTREE_PACKET_SEQUENCE*)dataAt;
*sequenceNumberAt = stats.getNextSequenceNumberToNack();
dataAt += sizeof(OCTREE_PACKET_SEQUENCE);
}
_octreeSceneStatsLock.unlock();
nodeList->writeUnverifiedDatagram(packet, dataAt - packet, node);
}
}
}
void Application::queryOctree(NodeType_t serverType, PacketType packetType, NodeToJurisdictionMap& jurisdictions) {

View file

@ -411,6 +411,8 @@ private:
static void attachNewHeadToNode(Node *newNode);
static void* networkReceive(void* args); // network receive thread
void sendNack();
MainWindow* _window;
GLCanvas* _glWidget; // our GLCanvas has a couple extra features
@ -580,6 +582,8 @@ private:
bool _runningScriptsWidgetWasVisible;
QSystemTrayIcon* _trayIcon;
quint64 _lastNackTime;
};
#endif // hifi_Application_h

View file

@ -68,6 +68,7 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
_proceduralOutputDevice(NULL),
_inputRingBuffer(0),
_ringBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL),
_isStereoInput(false),
_averagedLatency(0.0),
_measuredJitter(0),
_jitterBufferSamples(initialJitterBufferSamples),
@ -289,20 +290,27 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
if (sourceToDestinationFactor >= 2) {
// we need to downsample from 48 to 24
// for now this only supports a mono output - this would be the case for audio input
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
if (destinationAudioFormat.channelCount() == 1) {
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
(sourceSamples[i - sourceAudioFormat.channelCount()] / 2)
+ (sourceSamples[i] / 2);
} else {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
} else {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
(sourceSamples[i - sourceAudioFormat.channelCount()] / 4)
+ (sourceSamples[i] / 2)
+ (sourceSamples[i + sourceAudioFormat.channelCount()] / 4);
}
}
} else {
// this is a 48 to 24 resampling but both source and destination are two channels
// squish two samples into one in each channel
for (int i = 0; i < numSourceSamples; i += 4) {
destinationSamples[i / 2] = (sourceSamples[i] / 2) + (sourceSamples[i + 2] / 2);
destinationSamples[(i / 2) + 1] = (sourceSamples[i + 1] / 2) + (sourceSamples[i + 3] / 2);
}
}
} else {
if (sourceAudioFormat.sampleRate() == destinationAudioFormat.sampleRate()) {
// mono to stereo, same sample rate
@ -405,12 +413,12 @@ bool Audio::switchOutputToAudioDevice(const QString& outputDeviceName) {
}
void Audio::handleAudioInput() {
static char monoAudioDataPacket[MAX_PACKET_SIZE];
static char audioDataPacket[MAX_PACKET_SIZE];
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat);
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes);
static int16_t* networkAudioSamples = (int16_t*) (audioDataPacket + leadingBytes);
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
@ -452,126 +460,139 @@ void Audio::handleAudioInput() {
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
int numNetworkBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
int numNetworkSamples = _isStereoInput ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
// zero out the monoAudioSamples array and the locally injected audio
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
memset(networkAudioSamples, 0, numNetworkBytes);
if (!_muted) {
// we aren't muted, downsample the input audio
linearResampling((int16_t*) inputAudioSamples,
monoAudioSamples,
inputSamplesRequired,
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
linearResampling((int16_t*) inputAudioSamples, networkAudioSamples,
inputSamplesRequired, numNetworkSamples,
_inputFormat, _desiredInputFormat);
//
// Impose Noise Gate
//
// The Noise Gate is used to reject constant background noise by measuring the noise
// floor observed at the microphone and then opening the 'gate' to allow microphone
// signals to be transmitted when the microphone samples average level exceeds a multiple
// of the noise floor.
//
// NOISE_GATE_HEIGHT: How loud you have to speak relative to noise background to open the gate.
// Make this value lower for more sensitivity and less rejection of noise.
// NOISE_GATE_WIDTH: The number of samples in an audio frame for which the height must be exceeded
// to open the gate.
// NOISE_GATE_CLOSE_FRAME_DELAY: Once the noise is below the gate height for the frame, how many frames
// will we wait before closing the gate.
// NOISE_GATE_FRAMES_TO_AVERAGE: How many audio frames should we average together to compute noise floor.
// More means better rejection but also can reject continuous things like singing.
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
float loudness = 0;
float thisSample = 0;
int samplesOverNoiseGate = 0;
const float NOISE_GATE_HEIGHT = 7.0f;
const int NOISE_GATE_WIDTH = 5;
const int NOISE_GATE_CLOSE_FRAME_DELAY = 5;
const int NOISE_GATE_FRAMES_TO_AVERAGE = 5;
const float DC_OFFSET_AVERAGING = 0.99f;
const float CLIPPING_THRESHOLD = 0.90f;
//
// Check clipping, adjust DC offset, and check if should open noise gate
//
float measuredDcOffset = 0.0f;
// Increment the time since the last clip
if (_timeSinceLastClip >= 0.0f) {
_timeSinceLastClip += (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE;
}
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
measuredDcOffset += monoAudioSamples[i];
monoAudioSamples[i] -= (int16_t) _dcOffset;
thisSample = fabsf(monoAudioSamples[i]);
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
_timeSinceLastClip = 0.0f;
// only impose the noise gate and perform tone injection if we sending mono audio
if (!_isStereoInput) {
//
// Impose Noise Gate
//
// The Noise Gate is used to reject constant background noise by measuring the noise
// floor observed at the microphone and then opening the 'gate' to allow microphone
// signals to be transmitted when the microphone samples average level exceeds a multiple
// of the noise floor.
//
// NOISE_GATE_HEIGHT: How loud you have to speak relative to noise background to open the gate.
// Make this value lower for more sensitivity and less rejection of noise.
// NOISE_GATE_WIDTH: The number of samples in an audio frame for which the height must be exceeded
// to open the gate.
// NOISE_GATE_CLOSE_FRAME_DELAY: Once the noise is below the gate height for the frame, how many frames
// will we wait before closing the gate.
// NOISE_GATE_FRAMES_TO_AVERAGE: How many audio frames should we average together to compute noise floor.
// More means better rejection but also can reject continuous things like singing.
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
float loudness = 0;
float thisSample = 0;
int samplesOverNoiseGate = 0;
const float NOISE_GATE_HEIGHT = 7.0f;
const int NOISE_GATE_WIDTH = 5;
const int NOISE_GATE_CLOSE_FRAME_DELAY = 5;
const int NOISE_GATE_FRAMES_TO_AVERAGE = 5;
const float DC_OFFSET_AVERAGING = 0.99f;
const float CLIPPING_THRESHOLD = 0.90f;
//
// Check clipping, adjust DC offset, and check if should open noise gate
//
float measuredDcOffset = 0.0f;
// Increment the time since the last clip
if (_timeSinceLastClip >= 0.0f) {
_timeSinceLastClip += (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE;
}
loudness += thisSample;
// Noise Reduction: Count peaks above the average loudness
if (_noiseGateEnabled && (thisSample > (_noiseGateMeasuredFloor * NOISE_GATE_HEIGHT))) {
samplesOverNoiseGate++;
}
}
measuredDcOffset /= NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
if (_dcOffset == 0.0f) {
// On first frame, copy over measured offset
_dcOffset = measuredDcOffset;
} else {
_dcOffset = DC_OFFSET_AVERAGING * _dcOffset + (1.0f - DC_OFFSET_AVERAGING) * measuredDcOffset;
}
// Add tone injection if enabled
const float TONE_FREQ = 220.0f / SAMPLE_RATE * TWO_PI;
const float QUARTER_VOLUME = 8192.0f;
if (_toneInjectionEnabled) {
loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
monoAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
loudness += fabsf(monoAudioSamples[i]);
}
}
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
// If Noise Gate is enabled, check and turn the gate on and off
if (!_toneInjectionEnabled && _noiseGateEnabled) {
float averageOfAllSampleFrames = 0.0f;
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
float smallestSample = FLT_MAX;
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
float thisAverage = 0.0f;
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
thisAverage += _noiseSampleFrames[j];
averageOfAllSampleFrames += _noiseSampleFrames[j];
}
thisAverage /= NOISE_GATE_FRAMES_TO_AVERAGE;
if (thisAverage < smallestSample) {
smallestSample = thisAverage;
}
measuredDcOffset += networkAudioSamples[i];
networkAudioSamples[i] -= (int16_t) _dcOffset;
thisSample = fabsf(networkAudioSamples[i]);
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
_timeSinceLastClip = 0.0f;
}
loudness += thisSample;
// Noise Reduction: Count peaks above the average loudness
if (_noiseGateEnabled && (thisSample > (_noiseGateMeasuredFloor * NOISE_GATE_HEIGHT))) {
samplesOverNoiseGate++;
}
averageOfAllSampleFrames /= NUMBER_OF_NOISE_SAMPLE_FRAMES;
_noiseGateMeasuredFloor = smallestSample;
_noiseGateSampleCounter = 0;
}
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
_noiseGateOpen = true;
_noiseGateFramesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
measuredDcOffset /= NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
if (_dcOffset == 0.0f) {
// On first frame, copy over measured offset
_dcOffset = measuredDcOffset;
} else {
if (--_noiseGateFramesToClose == 0) {
_noiseGateOpen = false;
_dcOffset = DC_OFFSET_AVERAGING * _dcOffset + (1.0f - DC_OFFSET_AVERAGING) * measuredDcOffset;
}
// Add tone injection if enabled
const float TONE_FREQ = 220.0f / SAMPLE_RATE * TWO_PI;
const float QUARTER_VOLUME = 8192.0f;
if (_toneInjectionEnabled) {
loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
networkAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
loudness += fabsf(networkAudioSamples[i]);
}
}
if (!_noiseGateOpen) {
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
_lastInputLoudness = 0;
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
// If Noise Gate is enabled, check and turn the gate on and off
if (!_toneInjectionEnabled && _noiseGateEnabled) {
float averageOfAllSampleFrames = 0.0f;
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
float smallestSample = FLT_MAX;
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
float thisAverage = 0.0f;
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
thisAverage += _noiseSampleFrames[j];
averageOfAllSampleFrames += _noiseSampleFrames[j];
}
thisAverage /= NOISE_GATE_FRAMES_TO_AVERAGE;
if (thisAverage < smallestSample) {
smallestSample = thisAverage;
}
}
averageOfAllSampleFrames /= NUMBER_OF_NOISE_SAMPLE_FRAMES;
_noiseGateMeasuredFloor = smallestSample;
_noiseGateSampleCounter = 0;
}
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
_noiseGateOpen = true;
_noiseGateFramesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
} else {
if (--_noiseGateFramesToClose == 0) {
_noiseGateOpen = false;
}
}
if (!_noiseGateOpen) {
memset(networkAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
_lastInputLoudness = 0;
}
}
} else {
float loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; i++) {
loudness += fabsf(networkAudioSamples[i]);
}
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
}
} else {
// our input loudness is 0, since we're muted
@ -580,19 +601,19 @@ void Audio::handleAudioInput() {
// at this point we have clean monoAudioSamples, which match our target output...
// this is what we should send to our interested listeners
if (_processSpatialAudio && !_muted && _audioOutput) {
QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
if (_processSpatialAudio && !_muted && !_isStereoInput && _audioOutput) {
QByteArray monoInputData((char*)networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat);
}
if (_proceduralAudioOutput) {
processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
if (!_isStereoInput && _proceduralAudioOutput) {
processProceduralAudio(networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
}
if (_scopeEnabled && !_scopeEnabledPause) {
if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
unsigned int numMonoAudioChannels = 1;
unsigned int monoAudioChannel = 0;
addBufferToScope(_scopeInput, _scopeInputOffset, monoAudioSamples, monoAudioChannel, numMonoAudioChannels);
addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, monoAudioChannel, numMonoAudioChannels);
_scopeInputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeInputOffset %= _samplesPerScope;
}
@ -604,9 +625,7 @@ void Audio::handleAudioInput() {
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
glm::vec3 headPosition = interfaceAvatar->getHead()->getPosition();
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
// we need the amount of bytes in the buffer + 1 for type
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
quint8 isStereo = _isStereoInput ? 1 : 0;
int numAudioBytes = 0;
@ -615,11 +634,12 @@ void Audio::handleAudioInput() {
packetType = PacketTypeSilentAudioFrame;
// we need to indicate how many silent samples this is to the audio mixer
monoAudioSamples[0] = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
audioDataPacket[0] = _isStereoInput
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
: NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
numAudioBytes = sizeof(int16_t);
} else {
numAudioBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
numAudioBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
packetType = PacketTypeMicrophoneAudioWithEcho;
@ -628,7 +648,10 @@ void Audio::handleAudioInput() {
}
}
char* currentPacketPtr = monoAudioDataPacket + populatePacketHeader(monoAudioDataPacket, packetType);
char* currentPacketPtr = audioDataPacket + populatePacketHeader(audioDataPacket, packetType);
// set the mono/stereo byte
*currentPacketPtr++ = isStereo;
// memcpy the three float positions
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
@ -638,7 +661,7 @@ void Audio::handleAudioInput() {
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation);
nodeList->writeDatagram(monoAudioDataPacket, numAudioBytes + leadingBytes, audioMixer);
nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
.updateValue(numAudioBytes + leadingBytes);
@ -761,6 +784,24 @@ void Audio::toggleAudioNoiseReduction() {
_noiseGateEnabled = !_noiseGateEnabled;
}
void Audio::toggleStereoInput() {
int oldChannelCount = _desiredInputFormat.channelCount();
QAction* stereoAudioOption = Menu::getInstance()->getActionForOption(MenuOption::StereoAudio);
if (stereoAudioOption->isChecked()) {
_desiredInputFormat.setChannelCount(2);
_isStereoInput = true;
} else {
_desiredInputFormat.setChannelCount(1);
_isStereoInput = false;
}
if (oldChannelCount != _desiredInputFormat.channelCount()) {
// change in channel count for desired input format, restart the input device
switchInputToAudioDevice(_inputAudioDeviceName);
}
}
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
_ringBuffer.parseData(audioByteArray);
@ -1300,18 +1341,21 @@ bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) {
qDebug() << "The format to be used for audio input is" << _inputFormat;
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
_audioInput->setBufferSize(_numInputCallbackBytes);
// how do we want to handle input working, but output not working?
int numFrameSamples = calculateNumberOfFrameSamples(_numInputCallbackBytes);
_inputRingBuffer.resizeForFrameSize(numFrameSamples);
_inputDevice = _audioInput->start();
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
supportedFormat = true;
// if the user wants stereo but this device can't provide then bail
if (!_isStereoInput || _inputFormat.channelCount() == 2) {
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
_audioInput->setBufferSize(_numInputCallbackBytes);
// how do we want to handle input working, but output not working?
int numFrameSamples = calculateNumberOfFrameSamples(_numInputCallbackBytes);
_inputRingBuffer.resizeForFrameSize(numFrameSamples);
_inputDevice = _audioInput->start();
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
supportedFormat = true;
}
}
}
return supportedFormat;

View file

@ -85,6 +85,7 @@ public slots:
void toggleScope();
void toggleScopePause();
void toggleAudioSpatialProcessing();
void toggleStereoInput();
void selectAudioScopeFiveFrames();
void selectAudioScopeTwentyFrames();
void selectAudioScopeFiftyFrames();
@ -127,6 +128,7 @@ private:
QIODevice* _proceduralOutputDevice;
AudioRingBuffer _inputRingBuffer;
AudioRingBuffer _ringBuffer;
bool _isStereoInput;
QString _inputAudioDeviceName;
QString _outputAudioDeviceName;

View file

@ -18,6 +18,7 @@
#include "Camera.h"
#include "Menu.h"
#include "Util.h"
#include "devices/OculusManager.h"
const float CAMERA_FIRST_PERSON_MODE_UP_SHIFT = 0.0f;
const float CAMERA_FIRST_PERSON_MODE_DISTANCE = 0.0f;
@ -264,7 +265,12 @@ PickRay CameraScriptableObject::computePickRay(float x, float y) {
float screenWidth = Application::getInstance()->getGLWidget()->width();
float screenHeight = Application::getInstance()->getGLWidget()->height();
PickRay result;
_viewFrustum->computePickRay(x / screenWidth, y / screenHeight, result.origin, result.direction);
if (OculusManager::isConnected()) {
result.origin = _camera->getPosition();
Application::getInstance()->getApplicationOverlay().computeOculusPickRay(x / screenWidth, y / screenHeight, result.direction);
} else {
_viewFrustum->computePickRay(x / screenWidth, y / screenHeight, result.origin, result.direction);
}
return result;
}

View file

@ -394,6 +394,8 @@ Menu::Menu() :
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::AlignForearmsWithWrists, 0, true);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::AlternateIK, 0, false);
addCheckableActionToQMenuAndActionHash(developerMenu, MenuOption::DisableNackPackets, 0, false);
addDisabledActionAndSeparator(developerMenu, "Testing");
QMenu* timingMenu = developerMenu->addMenu("Timing and Statistics Tools");
@ -433,6 +435,8 @@ Menu::Menu() :
SLOT(toggleAudioNoiseReduction()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoLocalAudio);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::StereoAudio, 0, false,
appInstance->getAudio(), SLOT(toggleStereoInput()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::MuteAudio,
Qt::CTRL | Qt::Key_M,
false,

View file

@ -321,6 +321,7 @@ namespace MenuOption {
const QString DecreaseAvatarSize = "Decrease Avatar Size";
const QString DecreaseVoxelSize = "Decrease Voxel Size";
const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD";
const QString DisableNackPackets = "Disable NACK Packets";
const QString DisplayFrustum = "Display Frustum";
const QString DisplayHands = "Display Hands";
const QString DisplayHandTargets = "Display Hand Targets";
@ -403,6 +404,7 @@ namespace MenuOption {
const QString StandOnNearbyFloors = "Stand on nearby floors";
const QString Stars = "Stars";
const QString Stats = "Stats";
const QString StereoAudio = "Stereo Audio";
const QString StopAllScripts = "Stop All Scripts";
const QString SuppressShortTimings = "Suppress Timings Less than 10ms";
const QString TestPing = "Test Ping";

View file

@ -168,9 +168,9 @@ void SkeletonModel::getBodyShapes(QVector<const Shape*>& shapes) const {
void SkeletonModel::renderIKConstraints() {
renderJointConstraints(getRightHandJointIndex());
renderJointConstraints(getLeftHandJointIndex());
if (isActive() && _owningAvatar->isMyAvatar()) {
renderRagDoll();
}
//if (isActive() && _owningAvatar->isMyAvatar()) {
// renderRagDoll();
//}
}
class IndexValue {
@ -217,45 +217,30 @@ void SkeletonModel::applyPalmData(int jointIndex, PalmData& palm) {
if (parentJointIndex == -1) {
return;
}
// rotate palm to align with its normal (normal points out of hand's palm)
glm::quat palmRotation;
glm::quat r0, r1;
if (!Menu::getInstance()->isOptionChecked(MenuOption::AlternateIK) &&
Menu::getInstance()->isOptionChecked(MenuOption::AlignForearmsWithWrists)) {
JointState parentState = _jointStates[parentJointIndex];
palmRotation = parentState.getRotationFromBindToModelFrame();
r0 = palmRotation;
} else {
JointState state = _jointStates[jointIndex];
palmRotation = state.getRotationFromBindToModelFrame();
}
glm::quat inverseRotation = glm::inverse(_rotation);
glm::vec3 palmNormal = inverseRotation * palm.getNormal();
palmRotation = rotationBetween(palmRotation * geometry.palmDirection, palmNormal) * palmRotation;
r1 = palmRotation;
// rotate palm to align with finger direction
glm::vec3 direction = inverseRotation * palm.getFingerDirection();
palmRotation = rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), direction) * palmRotation;
// set hand position, rotation
glm::vec3 palmPosition = inverseRotation * (palm.getPosition() - _translation);
glm::vec3 palmNormal = inverseRotation * palm.getNormal();
glm::vec3 fingerDirection = inverseRotation * palm.getFingerDirection();
glm::quat palmRotation = rotationBetween(geometry.palmDirection, palmNormal);
palmRotation = rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), fingerDirection) * palmRotation;
if (Menu::getInstance()->isOptionChecked(MenuOption::AlternateIK)) {
setHandPosition(jointIndex, palmPosition, palmRotation);
} else if (Menu::getInstance()->isOptionChecked(MenuOption::AlignForearmsWithWrists)) {
glm::vec3 forearmVector = palmRotation * glm::vec3(sign, 0.0f, 0.0f);
setJointPosition(parentJointIndex, palmPosition + forearmVector *
geometry.joints.at(jointIndex).distanceToParent * extractUniformScale(_scale),
float forearmLength = geometry.joints.at(jointIndex).distanceToParent * extractUniformScale(_scale);
glm::vec3 forearm = palmRotation * glm::vec3(sign * forearmLength, 0.0f, 0.0f);
setJointPosition(parentJointIndex, palmPosition + forearm,
glm::quat(), false, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
JointState& parentState = _jointStates[parentJointIndex];
parentState.setRotationFromBindFrame(palmRotation, PALM_PRIORITY);
// lock hand to forearm by slamming its rotation (in parent-frame) to identity
_jointStates[jointIndex]._rotationInParentFrame = glm::quat();
} else {
setJointPosition(jointIndex, palmPosition, palmRotation,
true, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
setJointPosition(jointIndex, palmPosition, palmRotation,
true, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
}
}

View file

@ -1316,8 +1316,8 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& position, const gl
if (useRotation) {
JointState& state = _jointStates[jointIndex];
state.setRotation(rotation, true, priority);
endRotation = state.getRotation();
state.setRotationFromBindFrame(rotation, priority);
endRotation = state.getRotationFromBindToModelFrame();
}
// then, we go from the joint upwards, rotating the end as close as possible to the target

View file

@ -297,6 +297,25 @@ void ApplicationOverlay::displayOverlayTexture(Camera& whichCamera) {
glDisable(GL_TEXTURE_2D);
}
const float textureFov = PI / 2.5f;
void ApplicationOverlay::computeOculusPickRay(float x, float y, glm::vec3& direction) const {
glm::quat rot = Application::getInstance()->getAvatar()->getOrientation();
//invert y direction
y = 1.0 - y;
//Get position on hemisphere UI
x = sin((x - 0.5f) * textureFov);
y = sin((y - 0.5f) * textureFov);
float dist = sqrt(x * x + y * y);
float z = -sqrt(1.0f - dist * dist);
//Rotate the UI pick ray by the avatar orientation
direction = glm::normalize(rot * glm::vec3(x, y, z));
}
// Fast helper functions
inline float max(float a, float b) {
return (a > b) ? a : b;
@ -306,8 +325,6 @@ inline float min(float a, float b) {
return (a < b) ? a : b;
}
const float textureFov = PI / 2.5f;
// Draws the FBO texture for Oculus rift. TODO: Draw a curved texture instead of plane.
void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {

View file

@ -15,7 +15,7 @@
class Overlays;
class QOpenGLFramebufferObject;
// Handles the drawing of the overlays to the scree
// Handles the drawing of the overlays to the screen
class ApplicationOverlay {
public:
@ -27,6 +27,7 @@ public:
void renderOverlay(bool renderToTexture = false);
void displayOverlayTexture(Camera& whichCamera);
void displayOverlayTextureOculus(Camera& whichCamera);
void computeOculusPickRay(float x, float y, glm::vec3& direction) const;
// Getters
QOpenGLFramebufferObject* getFramebufferObject();

View file

@ -20,14 +20,15 @@
#include "PositionalAudioRingBuffer.h"
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type) :
AudioRingBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo) :
AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
_type(type),
_position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
_willBeAddedToMix(false),
_shouldLoopbackForNode(false),
_shouldOutputStarveDebug(true)
_shouldOutputStarveDebug(true),
_isStereo(isStereo)
{
}
@ -40,6 +41,9 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
// skip the packet header (includes the source UUID)
int readBytes = numBytesForPacketHeader(packet);
// hop over the channel flag that has already been read in AudioMixerClientData
readBytes += sizeof(quint8);
// read the positional data
readBytes += parsePositionalData(packet.mid(readBytes));
if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) {
@ -51,7 +55,9 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
readBytes += sizeof(int16_t);
addSilentFrame(numSilentSamples);
if (numSilentSamples > 0) {
addSilentFrame(numSilentSamples);
}
} else {
// there is audio data to read
readBytes += writeData(packet.data() + readBytes, packet.size() - readBytes);

View file

@ -24,7 +24,7 @@ public:
Injector
};
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type);
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false);
~PositionalAudioRingBuffer();
int parseData(const QByteArray& packet);
@ -41,6 +41,8 @@ public:
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; }
PositionalAudioRingBuffer::Type getType() const { return _type; }
const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; }
@ -56,6 +58,7 @@ protected:
bool _willBeAddedToMix;
bool _shouldLoopbackForNode;
bool _shouldOutputStarveDebug;
bool _isStereo;
float _nextOutputTrailingLoudness;
};

View file

@ -894,14 +894,15 @@ FBXBlendshape extractBlendshape(const FBXNode& object) {
}
void setTangents(FBXMesh& mesh, int firstIndex, int secondIndex) {
glm::vec3 normal = glm::normalize(mesh.normals.at(firstIndex));
const glm::vec3& normal = mesh.normals.at(firstIndex);
glm::vec3 bitangent = glm::cross(normal, mesh.vertices.at(secondIndex) - mesh.vertices.at(firstIndex));
if (glm::length(bitangent) < EPSILON) {
return;
}
glm::vec2 texCoordDelta = mesh.texCoords.at(secondIndex) - mesh.texCoords.at(firstIndex);
mesh.tangents[firstIndex] += glm::cross(glm::angleAxis(
- atan2f(-texCoordDelta.t, texCoordDelta.s), normal) * glm::normalize(bitangent), normal);
glm::vec3 normalizedNormal = glm::normalize(normal);
mesh.tangents[firstIndex] += glm::cross(glm::angleAxis(-atan2f(-texCoordDelta.t, texCoordDelta.s), normalizedNormal) *
glm::normalize(bitangent), normalizedNormal);
}
QVector<int> getIndices(const QVector<QString> ids, QVector<QString> modelIDs) {

View file

@ -79,10 +79,6 @@ IDStreamer& IDStreamer::operator>>(int& value) {
return *this;
}
static QByteArray getEnumName(const QMetaEnum& metaEnum) {
return QByteArray(metaEnum.scope()) + "::" + metaEnum.name();
}
int Bitstream::registerMetaObject(const char* className, const QMetaObject* metaObject) {
getMetaObjects().insert(className, metaObject);
@ -90,17 +86,6 @@ int Bitstream::registerMetaObject(const char* className, const QMetaObject* meta
for (const QMetaObject* superClass = metaObject; superClass; superClass = superClass->superClass()) {
getMetaObjectSubClasses().insert(superClass, metaObject);
}
// register the streamers for all enumerators
// temporarily disabled: crashes on Windows
//for (int i = 0; i < metaObject->enumeratorCount(); i++) {
// QMetaEnum metaEnum = metaObject->enumerator(i);
// const TypeStreamer*& streamer = getEnumStreamers()[QPair<QByteArray, QByteArray>(metaEnum.scope(), metaEnum.name())];
// if (!streamer) {
// getEnumStreamersByName().insert(getEnumName(metaEnum), streamer = new EnumTypeStreamer(metaEnum));
// }
//}
return 0;
}
@ -225,7 +210,8 @@ void Bitstream::persistWriteMappings(const WriteMappings& mappings) {
}
connect(it.key().data(), SIGNAL(destroyed(QObject*)), SLOT(clearSharedObject(QObject*)));
QPointer<SharedObject>& reference = _sharedObjectReferences[it.key()->getOriginID()];
if (reference) {
if (reference && reference != it.key()) {
// the object has been replaced by a successor, so we can forget about the original
_sharedObjectStreamer.removePersistentID(reference);
reference->disconnect(this);
}
@ -259,7 +245,8 @@ void Bitstream::persistReadMappings(const ReadMappings& mappings) {
continue;
}
QPointer<SharedObject>& reference = _sharedObjectReferences[it.value()->getRemoteOriginID()];
if (reference) {
if (reference && reference != it.value()) {
// the object has been replaced by a successor, so we can forget about the original
_sharedObjectStreamer.removePersistentValue(reference.data());
}
reference = it.value();
@ -311,7 +298,7 @@ void Bitstream::writeRawDelta(const QObject* value, const QObject* reference) {
}
const QMetaObject* metaObject = value->metaObject();
_metaObjectStreamer << metaObject;
foreach (const PropertyWriter& propertyWriter, getPropertyWriters(metaObject)) {
foreach (const PropertyWriter& propertyWriter, getPropertyWriters().value(metaObject)) {
propertyWriter.writeDelta(*this, value, reference);
}
}
@ -489,7 +476,7 @@ Bitstream& Bitstream::operator<<(const QObject* object) {
}
const QMetaObject* metaObject = object->metaObject();
_metaObjectStreamer << metaObject;
foreach (const PropertyWriter& propertyWriter, getPropertyWriters(metaObject)) {
foreach (const PropertyWriter& propertyWriter, getPropertyWriters().value(metaObject)) {
propertyWriter.write(*this, object);
}
return *this;
@ -574,7 +561,7 @@ Bitstream& Bitstream::operator<(const QMetaObject* metaObject) {
if (_metadataType == NO_METADATA) {
return *this;
}
const QVector<PropertyWriter>& propertyWriters = getPropertyWriters(metaObject);
const QVector<PropertyWriter>& propertyWriters = getPropertyWriters().value(metaObject);
*this << propertyWriters.size();
QCryptographicHash hash(QCryptographicHash::Md5);
foreach (const PropertyWriter& propertyWriter, propertyWriters) {
@ -608,7 +595,7 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
qWarning() << "Unknown class name: " << className << "\n";
}
if (_metadataType == NO_METADATA) {
objectReader = ObjectReader(className, metaObject, getPropertyReaders(metaObject));
objectReader = ObjectReader(className, metaObject, getPropertyReaders().value(metaObject));
return *this;
}
int storedPropertyCount;
@ -632,7 +619,7 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
QCryptographicHash hash(QCryptographicHash::Md5);
bool matches = true;
if (metaObject) {
const QVector<PropertyWriter>& propertyWriters = getPropertyWriters(metaObject);
const QVector<PropertyWriter>& propertyWriters = getPropertyWriters().value(metaObject);
if (propertyWriters.size() == properties.size()) {
for (int i = 0; i < propertyWriters.size(); i++) {
const PropertyWriter& propertyWriter = propertyWriters.at(i);
@ -651,7 +638,7 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
QByteArray remoteHashResult(localHashResult.size(), 0);
read(remoteHashResult.data(), remoteHashResult.size() * BITS_IN_BYTE);
if (metaObject && matches && localHashResult == remoteHashResult) {
objectReader = ObjectReader(className, metaObject, getPropertyReaders(metaObject));
objectReader = ObjectReader(className, metaObject, getPropertyReaders().value(metaObject));
return *this;
}
}
@ -988,31 +975,6 @@ void Bitstream::clearSharedObject(QObject* object) {
}
}
const QVector<PropertyWriter>& Bitstream::getPropertyWriters(const QMetaObject* metaObject) {
QVector<PropertyWriter>& propertyWriters = _propertyWriters[metaObject];
if (propertyWriters.isEmpty()) {
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
}
const TypeStreamer* streamer;
if (property.isEnumType()) {
QMetaEnum metaEnum = property.enumerator();
streamer = getEnumStreamers().value(QPair<QByteArray, QByteArray>(
QByteArray::fromRawData(metaEnum.scope(), strlen(metaEnum.scope())),
QByteArray::fromRawData(metaEnum.name(), strlen(metaEnum.name()))));
} else {
streamer = getTypeStreamers().value(property.userType());
}
if (streamer) {
propertyWriters.append(PropertyWriter(property, streamer));
}
}
}
return propertyWriters;
}
QHash<QByteArray, const QMetaObject*>& Bitstream::getMetaObjects() {
static QHash<QByteArray, const QMetaObject*> metaObjects;
return metaObjects;
@ -1028,42 +990,100 @@ QHash<int, const TypeStreamer*>& Bitstream::getTypeStreamers() {
return typeStreamers;
}
QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*>& Bitstream::getEnumStreamers() {
static QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> enumStreamers;
const QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*>& Bitstream::getEnumStreamers() {
static QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> enumStreamers = createEnumStreamers();
return enumStreamers;
}
QHash<QByteArray, const TypeStreamer*>& Bitstream::getEnumStreamersByName() {
static QHash<QByteArray, const TypeStreamer*> enumStreamersByName;
QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> Bitstream::createEnumStreamers() {
QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> enumStreamers;
foreach (const QMetaObject* metaObject, getMetaObjects()) {
for (int i = 0; i < metaObject->enumeratorCount(); i++) {
QMetaEnum metaEnum = metaObject->enumerator(i);
const TypeStreamer*& streamer = enumStreamers[QPair<QByteArray, QByteArray>(metaEnum.scope(), metaEnum.name())];
if (!streamer) {
streamer = new EnumTypeStreamer(metaEnum);
}
}
}
return enumStreamers;
}
const QHash<QByteArray, const TypeStreamer*>& Bitstream::getEnumStreamersByName() {
static QHash<QByteArray, const TypeStreamer*> enumStreamersByName = createEnumStreamersByName();
return enumStreamersByName;
}
QVector<PropertyReader> Bitstream::getPropertyReaders(const QMetaObject* metaObject) {
QVector<PropertyReader> propertyReaders;
if (!metaObject) {
return propertyReaders;
QHash<QByteArray, const TypeStreamer*> Bitstream::createEnumStreamersByName() {
QHash<QByteArray, const TypeStreamer*> enumStreamersByName;
foreach (const TypeStreamer* streamer, getEnumStreamers()) {
enumStreamersByName.insert(streamer->getName(), streamer);
}
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
}
const TypeStreamer* streamer;
if (property.isEnumType()) {
QMetaEnum metaEnum = property.enumerator();
streamer = getEnumStreamers().value(QPair<QByteArray, QByteArray>(
QByteArray::fromRawData(metaEnum.scope(), strlen(metaEnum.scope())),
QByteArray::fromRawData(metaEnum.name(), strlen(metaEnum.name()))));
} else {
streamer = getTypeStreamers().value(property.userType());
}
if (streamer) {
propertyReaders.append(PropertyReader(TypeReader(QByteArray(), streamer), property));
return enumStreamersByName;
}
const QHash<const QMetaObject*, QVector<PropertyReader> >& Bitstream::getPropertyReaders() {
static QHash<const QMetaObject*, QVector<PropertyReader> > propertyReaders = createPropertyReaders();
return propertyReaders;
}
QHash<const QMetaObject*, QVector<PropertyReader> > Bitstream::createPropertyReaders() {
QHash<const QMetaObject*, QVector<PropertyReader> > propertyReaders;
foreach (const QMetaObject* metaObject, getMetaObjects()) {
QVector<PropertyReader>& readers = propertyReaders[metaObject];
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
}
const TypeStreamer* streamer;
if (property.isEnumType()) {
QMetaEnum metaEnum = property.enumerator();
streamer = getEnumStreamers().value(QPair<QByteArray, QByteArray>(
QByteArray::fromRawData(metaEnum.scope(), strlen(metaEnum.scope())),
QByteArray::fromRawData(metaEnum.name(), strlen(metaEnum.name()))));
} else {
streamer = getTypeStreamers().value(property.userType());
}
if (streamer) {
readers.append(PropertyReader(TypeReader(QByteArray(), streamer), property));
}
}
}
return propertyReaders;
}
const QHash<const QMetaObject*, QVector<PropertyWriter> >& Bitstream::getPropertyWriters() {
static QHash<const QMetaObject*, QVector<PropertyWriter> > propertyWriters = createPropertyWriters();
return propertyWriters;
}
QHash<const QMetaObject*, QVector<PropertyWriter> > Bitstream::createPropertyWriters() {
QHash<const QMetaObject*, QVector<PropertyWriter> > propertyWriters;
foreach (const QMetaObject* metaObject, getMetaObjects()) {
QVector<PropertyWriter>& writers = propertyWriters[metaObject];
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
}
const TypeStreamer* streamer;
if (property.isEnumType()) {
QMetaEnum metaEnum = property.enumerator();
streamer = getEnumStreamers().value(QPair<QByteArray, QByteArray>(
QByteArray::fromRawData(metaEnum.scope(), strlen(metaEnum.scope())),
QByteArray::fromRawData(metaEnum.name(), strlen(metaEnum.name()))));
} else {
streamer = getTypeStreamers().value(property.userType());
}
if (streamer) {
writers.append(PropertyWriter(property, streamer));
}
}
}
return propertyWriters;
}
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer) :
_typeName(typeName),
_streamer(streamer),
@ -1461,17 +1481,21 @@ QDebug& operator<<(QDebug& debug, const QMetaObject* metaObject) {
return debug << (metaObject ? metaObject->className() : "null");
}
EnumTypeStreamer::EnumTypeStreamer(const QMetaObject* metaObject, const char* name) :
_metaObject(metaObject),
_enumName(name),
_name(QByteArray(metaObject->className()) + "::" + name),
_bits(-1) {
setType(QMetaType::Int);
}
EnumTypeStreamer::EnumTypeStreamer(const QMetaEnum& metaEnum) :
_name(QByteArray(metaEnum.scope()) + "::" + metaEnum.name()),
_metaEnum(metaEnum),
_name(getEnumName(metaEnum)) {
setType(QMetaType::Int);
int highestValue = 0;
for (int j = 0; j < metaEnum.keyCount(); j++) {
highestValue = qMax(highestValue, metaEnum.value(j));
}
_bits = getBitsForHighestValue(highestValue);
_bits(-1) {
setType(QMetaType::Int);
}
const char* EnumTypeStreamer::getName() const {
@ -1483,10 +1507,21 @@ TypeReader::Type EnumTypeStreamer::getReaderType() const {
}
int EnumTypeStreamer::getBits() const {
if (_bits == -1) {
int highestValue = 0;
QMetaEnum metaEnum = getMetaEnum();
for (int j = 0; j < metaEnum.keyCount(); j++) {
highestValue = qMax(highestValue, metaEnum.value(j));
}
const_cast<EnumTypeStreamer*>(this)->_bits = getBitsForHighestValue(highestValue);
}
return _bits;
}
QMetaEnum EnumTypeStreamer::getMetaEnum() const {
if (!_metaEnum.isValid()) {
const_cast<EnumTypeStreamer*>(this)->_metaEnum = _metaObject->enumerator(_metaObject->indexOfEnumerator(_enumName));
}
return _metaEnum;
}
@ -1496,12 +1531,12 @@ bool EnumTypeStreamer::equal(const QVariant& first, const QVariant& second) cons
void EnumTypeStreamer::write(Bitstream& out, const QVariant& value) const {
int intValue = value.toInt();
out.write(&intValue, _bits);
out.write(&intValue, getBits());
}
QVariant EnumTypeStreamer::read(Bitstream& in) const {
int intValue = 0;
in.read(&intValue, _bits);
in.read(&intValue, getBits());
return intValue;
}
@ -1511,7 +1546,7 @@ void EnumTypeStreamer::writeDelta(Bitstream& out, const QVariant& value, const Q
out << false;
} else {
out << true;
out.write(&intValue, _bits);
out.write(&intValue, getBits());
}
}
@ -1520,7 +1555,7 @@ void EnumTypeStreamer::readDelta(Bitstream& in, QVariant& value, const QVariant&
in >> changed;
if (changed) {
int intValue = 0;
in.read(&intValue, _bits);
in.read(&intValue, getBits());
value = intValue;
} else {
value = reference;
@ -1529,17 +1564,17 @@ void EnumTypeStreamer::readDelta(Bitstream& in, QVariant& value, const QVariant&
void EnumTypeStreamer::writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
int intValue = value.toInt();
out.write(&intValue, _bits);
out.write(&intValue, getBits());
}
void EnumTypeStreamer::readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
int intValue = 0;
in.read(&intValue, _bits);
in.read(&intValue, getBits());
value = intValue;
}
void EnumTypeStreamer::setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const {
if (_metaEnum.isFlag()) {
if (getMetaEnum().isFlag()) {
int combined = 0;
for (QHash<int, int>::const_iterator it = mappings.constBegin(); it != mappings.constEnd(); it++) {
if (value & it.key()) {

View file

@ -400,8 +400,6 @@ private slots:
private:
const QVector<PropertyWriter>& getPropertyWriters(const QMetaObject* metaObject);
QDataStream& _underlying;
quint8 _byte;
int _position;
@ -421,14 +419,17 @@ private:
QHash<QByteArray, const QMetaObject*> _metaObjectSubstitutions;
QHash<QByteArray, const TypeStreamer*> _typeStreamerSubstitutions;
QHash<const QMetaObject*, QVector<PropertyWriter> > _propertyWriters;
static QHash<QByteArray, const QMetaObject*>& getMetaObjects();
static QMultiHash<const QMetaObject*, const QMetaObject*>& getMetaObjectSubClasses();
static QHash<int, const TypeStreamer*>& getTypeStreamers();
static QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*>& getEnumStreamers();
static QHash<QByteArray, const TypeStreamer*>& getEnumStreamersByName();
static QVector<PropertyReader> getPropertyReaders(const QMetaObject* metaObject);
static const QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*>& getEnumStreamers();
static QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> createEnumStreamers();
static const QHash<QByteArray, const TypeStreamer*>& getEnumStreamersByName();
static QHash<QByteArray, const TypeStreamer*> createEnumStreamersByName();
static const QHash<const QMetaObject*, QVector<PropertyReader> >& getPropertyReaders();
static QHash<const QMetaObject*, QVector<PropertyReader> > createPropertyReaders();
static const QHash<const QMetaObject*, QVector<PropertyWriter> >& getPropertyWriters();
static QHash<const QMetaObject*, QVector<PropertyWriter> > createPropertyWriters();
};
template<class T> inline void Bitstream::writeDelta(const T& value, const T& reference) {
@ -938,8 +939,9 @@ public:
class EnumTypeStreamer : public TypeStreamer {
public:
EnumTypeStreamer(const QMetaObject* metaObject, const char* name);
EnumTypeStreamer(const QMetaEnum& metaEnum);
virtual const char* getName() const;
virtual TypeReader::Type getReaderType() const;
virtual int getBits() const;
@ -955,8 +957,10 @@ public:
private:
QMetaEnum _metaEnum;
const QMetaObject* _metaObject;
const char* _enumName;
QByteArray _name;
QMetaEnum _metaEnum;
int _bits;
};
@ -1037,12 +1041,12 @@ public:
};
/// Macro for registering simple type streamers.
#define REGISTER_SIMPLE_TYPE_STREAMER(x) static int x##Streamer = \
Bitstream::registerTypeStreamer(qMetaTypeId<x>(), new SimpleTypeStreamer<x>());
#define REGISTER_SIMPLE_TYPE_STREAMER(X) static int X##Streamer = \
Bitstream::registerTypeStreamer(qMetaTypeId<X>(), new SimpleTypeStreamer<X>());
/// Macro for registering collection type streamers.
#define REGISTER_COLLECTION_TYPE_STREAMER(x) static int x##Streamer = \
Bitstream::registerTypeStreamer(qMetaTypeId<x>(), new CollectionTypeStreamer<x>());
#define REGISTER_COLLECTION_TYPE_STREAMER(X) static int x##Streamer = \
Bitstream::registerTypeStreamer(qMetaTypeId<X>(), new CollectionTypeStreamer<X>());
/// Declares the metatype and the streaming operators. The last lines
/// ensure that the generated file will be included in the link phase.
@ -1077,14 +1081,42 @@ public:
_Pragma(STRINGIFY(unused(_TypePtr##X)))
#endif
#define DECLARE_ENUM_METATYPE(S, N) Q_DECLARE_METATYPE(S::N) \
Bitstream& operator<<(Bitstream& out, const S::N& obj); \
Bitstream& operator>>(Bitstream& in, S::N& obj); \
template<> inline void Bitstream::writeRawDelta(const S::N& value, const S::N& reference) { *this << value; } \
template<> inline void Bitstream::readRawDelta(S::N& value, const S::N& reference) { *this >> value; }
#define IMPLEMENT_ENUM_METATYPE(S, N) \
static int S##N##MetaTypeId = registerEnumMetaType<S::N>(&S::staticMetaObject, #N); \
Bitstream& operator<<(Bitstream& out, const S::N& obj) { \
static int bits = Bitstream::getTypeStreamer(qMetaTypeId<S::N>())->getBits(); \
return out.write(&obj, bits); \
} \
Bitstream& operator>>(Bitstream& in, S::N& obj) { \
static int bits = Bitstream::getTypeStreamer(qMetaTypeId<S::N>())->getBits(); \
obj = (S::N)0; \
return in.read(&obj, bits); \
}
/// Registers a simple type and its streamer.
/// \return the metatype id
template<class T> int registerSimpleMetaType() {
int type = qRegisterMetaType<T>();
Bitstream::registerTypeStreamer(type, new SimpleTypeStreamer<T>());
return type;
}
/// Registers an enum type and its streamer.
/// \return the metatype id
template<class T> int registerEnumMetaType(const QMetaObject* metaObject, const char* name) {
int type = qRegisterMetaType<T>();
Bitstream::registerTypeStreamer(type, new EnumTypeStreamer(metaObject, name));
return type;
}
/// Registers a streamable type and its streamer.
/// \return the metatype id
template<class T> int registerStreamableMetaType() {
int type = qRegisterMetaType<T>();
Bitstream::registerTypeStreamer(type, new StreamableTypeStreamer<T>());
@ -1092,6 +1124,7 @@ template<class T> int registerStreamableMetaType() {
}
/// Registers a collection type and its streamer.
/// \return the metatype id
template<class T> int registerCollectionMetaType() {
int type = qRegisterMetaType<T>();
Bitstream::registerTypeStreamer(type, new CollectionTypeStreamer<T>());

View file

@ -185,7 +185,7 @@ void ModelTree::addModel(const ModelItemID& modelID, const ModelItemProperties&
glm::vec3 position = model.getPosition();
float size = std::max(MINIMUM_MODEL_ELEMENT_SIZE, model.getRadius());
ModelTreeElement* element = (ModelTreeElement*)getOrCreateChildElementAt(position.x, position.y, position.z, size);
ModelTreeElement* element = static_cast<ModelTreeElement*>(getOrCreateChildElementAt(position.x, position.y, position.z, size));
element->storeModel(model);
_isDirty = true;

View file

@ -313,7 +313,7 @@ void AccountManager::requestAccessToken(const QString& login, const QString& pas
QByteArray postData;
postData.append("grant_type=password&");
postData.append("username=" + login + "&");
postData.append("password=" + password + "&");
postData.append("password=" + QUrl::toPercentEncoding(password) + "&");
postData.append("scope=" + ACCOUNT_MANAGER_REQUESTED_SCOPE);
request.setUrl(grantURL);

View file

@ -47,6 +47,10 @@ int packArithmeticallyCodedValue(int value, char* destination) {
PacketVersion versionForPacketType(PacketType type) {
switch (type) {
case PacketTypeMicrophoneAudioNoEcho:
case PacketTypeMicrophoneAudioWithEcho:
case PacketTypeSilentAudioFrame:
return 1;
case PacketTypeAvatarData:
return 3;
case PacketTypeAvatarIdentity:
@ -104,7 +108,7 @@ int populatePacketHeader(char* packet, PacketType type, const QUuid& connectionU
position += NUM_BYTES_RFC4122_UUID;
if (!NON_VERIFIED_PACKETS.contains(type)) {
// pack 16 bytes of zeros where the md5 hash will be placed one data is packed
// pack 16 bytes of zeros where the md5 hash will be placed once data is packed
memset(position, 0, NUM_BYTES_MD5_HASH);
position += NUM_BYTES_MD5_HASH;
}

View file

@ -66,6 +66,7 @@ enum PacketType {
PacketTypeModelAddOrEdit,
PacketTypeModelErase,
PacketTypeModelAddResponse,
PacketTypeOctreeDataNack
};
typedef char PacketVersion;
@ -74,7 +75,8 @@ const QSet<PacketType> NON_VERIFIED_PACKETS = QSet<PacketType>()
<< PacketTypeDomainServerRequireDTLS << PacketTypeDomainConnectRequest
<< PacketTypeDomainList << PacketTypeDomainListRequest << PacketTypeDomainOAuthRequest
<< PacketTypeCreateAssignment << PacketTypeRequestAssignment << PacketTypeStunResponse
<< PacketTypeNodeJsonStats << PacketTypeVoxelQuery << PacketTypeParticleQuery << PacketTypeModelQuery;
<< PacketTypeNodeJsonStats << PacketTypeVoxelQuery << PacketTypeParticleQuery << PacketTypeModelQuery
<< PacketTypeOctreeDataNack;
const int NUM_BYTES_MD5_HASH = 16;
const int NUM_STATIC_HEADER_BYTES = sizeof(PacketVersion) + NUM_BYTES_RFC4122_UUID;

View file

@ -46,6 +46,7 @@ OctreeSceneStats::OctreeSceneStats() :
_incomingReallyLate(0),
_incomingPossibleDuplicate(0),
_missingSequenceNumbers(),
_sequenceNumbersToNack(),
_incomingFlightTimeAverage(samples),
_jurisdictionRoot(NULL)
{
@ -158,6 +159,7 @@ void OctreeSceneStats::copyFromOther(const OctreeSceneStats& other) {
_incomingPossibleDuplicate = other._incomingPossibleDuplicate;
_missingSequenceNumbers = other._missingSequenceNumbers;
_sequenceNumbersToNack = other._sequenceNumbersToNack;
}
@ -926,6 +928,7 @@ void OctreeSceneStats::trackIncomingOctreePacket(const QByteArray& packet,
qDebug() << "found it in _missingSequenceNumbers";
}
_missingSequenceNumbers.remove(sequence);
_sequenceNumbersToNack.remove(sequence);
_incomingLikelyLost--;
_incomingRecovered++;
} else {
@ -955,6 +958,7 @@ void OctreeSceneStats::trackIncomingOctreePacket(const QByteArray& packet,
_incomingLikelyLost += missing;
for(unsigned int missingSequence = expected; missingSequence < sequence; missingSequence++) {
_missingSequenceNumbers << missingSequence;
_sequenceNumbersToNack << missingSequence;
}
}
}
@ -982,9 +986,20 @@ void OctreeSceneStats::trackIncomingOctreePacket(const QByteArray& packet,
qDebug() << "pruning really old missing sequence:" << missingItem;
}
_missingSequenceNumbers.remove(missingItem);
_sequenceNumbersToNack.remove(missingItem);
}
}
}
}
int OctreeSceneStats::getNumSequenceNumbersToNack() const {
return _sequenceNumbersToNack.size();
}
uint16_t OctreeSceneStats::getNextSequenceNumberToNack() {
QSet<uint16_t>::Iterator it = _sequenceNumbersToNack.begin();
uint16_t sequenceNumber = *it;
_sequenceNumbersToNack.remove(sequenceNumber);
return sequenceNumber;
}

View file

@ -16,6 +16,7 @@
#include <NodeList.h>
#include <SharedUtil.h>
#include "JurisdictionMap.h"
#include "OctreePacketData.h"
#define GREENISH 0x40ff40d0
#define YELLOWISH 0xffef40c0
@ -172,6 +173,9 @@ public:
quint32 getIncomingPossibleDuplicate() const { return _incomingPossibleDuplicate; }
float getIncomingFlightTimeAverage() { return _incomingFlightTimeAverage.getAverage(); }
int getNumSequenceNumbersToNack() const;
OCTREE_PACKET_SEQUENCE getNextSequenceNumberToNack();
private:
void copyFromOther(const OctreeSceneStats& other);
@ -272,7 +276,8 @@ private:
quint32 _incomingLate; /// out of order later than expected
quint32 _incomingReallyLate; /// out of order and later than MAX_MISSING_SEQUENCE_OLD_AGE late
quint32 _incomingPossibleDuplicate; /// out of order possibly a duplicate
QSet<uint16_t> _missingSequenceNumbers;
QSet<OCTREE_PACKET_SEQUENCE> _missingSequenceNumbers;
QSet<OCTREE_PACKET_SEQUENCE> _sequenceNumbersToNack;
SimpleMovingAverage _incomingFlightTimeAverage;
// features related items

View file

@ -20,12 +20,16 @@
REGISTER_META_OBJECT(TestSharedObjectA)
REGISTER_META_OBJECT(TestSharedObjectB)
IMPLEMENT_ENUM_METATYPE(TestSharedObjectA, TestEnum)
MetavoxelTests::MetavoxelTests(int& argc, char** argv) :
QCoreApplication(argc, argv) {
}
static int datagramsSent = 0;
static int datagramsReceived = 0;
static int bytesSent = 0;
static int bytesReceived = 0;
static int highPriorityMessagesSent = 0;
static int highPriorityMessagesReceived = 0;
static int unreliableMessagesSent = 0;
@ -36,6 +40,7 @@ static int streamedBytesSent = 0;
static int streamedBytesReceived = 0;
static int sharedObjectsCreated = 0;
static int sharedObjectsDestroyed = 0;
static int objectMutationsPerformed = 0;
static QByteArray createRandomBytes(int minimumSize, int maximumSize) {
QByteArray bytes(randIntInRange(minimumSize, maximumSize), 0);
@ -80,6 +85,7 @@ static TestMessageC createRandomMessageC() {
message.bar = rand();
message.baz = randFloat();
message.bong.foo = createRandomBytes();
message.bong.baz = getRandomTestEnum();
return message;
}
@ -180,7 +186,7 @@ bool MetavoxelTests::run() {
bob.setOther(&alice);
// perform a large number of simulation iterations
const int SIMULATION_ITERATIONS = 100000;
const int SIMULATION_ITERATIONS = 10000;
for (int i = 0; i < SIMULATION_ITERATIONS; i++) {
if (alice.simulate(i) || bob.simulate(i)) {
return true;
@ -191,8 +197,10 @@ bool MetavoxelTests::run() {
qDebug() << "Sent" << unreliableMessagesSent << "unreliable messages, received" << unreliableMessagesReceived;
qDebug() << "Sent" << reliableMessagesSent << "reliable messages, received" << reliableMessagesReceived;
qDebug() << "Sent" << streamedBytesSent << "streamed bytes, received" << streamedBytesReceived;
qDebug() << "Sent" << datagramsSent << "datagrams, received" << datagramsReceived;
qDebug() << "Sent" << datagramsSent << "datagrams with" << bytesSent << "bytes, received" <<
datagramsReceived << "with" << bytesReceived << "bytes";
qDebug() << "Created" << sharedObjectsCreated << "shared objects, destroyed" << sharedObjectsDestroyed;
qDebug() << "Performed" << objectMutationsPerformed << "object mutations";
qDebug();
qDebug() << "Running serialization tests...";
@ -226,6 +234,20 @@ Endpoint::Endpoint(const QByteArray& datagramHeader) :
connect(_sequencer, SIGNAL(receivedHighPriorityMessage(const QVariant&)),
SLOT(handleHighPriorityMessage(const QVariant&)));
connect(_sequencer, SIGNAL(sendAcknowledged(int)), SLOT(clearSendRecordsBefore(int)));
connect(_sequencer, SIGNAL(receiveAcknowledged(int)), SLOT(clearReceiveRecordsBefore(int)));
// insert the baseline send record
SendRecord sendRecord = { 0 };
_sendRecords.append(sendRecord);
// insert the baseline receive record
ReceiveRecord receiveRecord = { 0 };
_receiveRecords.append(receiveRecord);
// create the object that represents out delta-encoded state
_localState = new TestSharedObjectA();
connect(_sequencer->getReliableInputChannel(), SIGNAL(receivedMessage(const QVariant&)),
SLOT(handleReliableMessage(const QVariant&)));
@ -252,16 +274,40 @@ static QVariant createRandomMessage() {
return QVariant::fromValue(message);
}
case 1: {
TestMessageB message = { createRandomBytes(), createRandomSharedObject() };
TestMessageB message = { createRandomBytes(), createRandomSharedObject(), getRandomTestEnum() };
return QVariant::fromValue(message);
}
case 2:
default: {
return QVariant::fromValue(createRandomMessageC());
}
}
}
static SharedObjectPointer mutate(const SharedObjectPointer& state) {
switch(randIntInRange(0, 3)) {
case 0: {
SharedObjectPointer newState = state->clone(true);
static_cast<TestSharedObjectA*>(newState.data())->setFoo(randFloat());
objectMutationsPerformed++;
return newState;
}
case 1: {
SharedObjectPointer newState = state->clone(true);
static_cast<TestSharedObjectA*>(newState.data())->setBaz(getRandomTestEnum());
objectMutationsPerformed++;
return newState;
}
case 2: {
SharedObjectPointer newState = state->clone(true);
static_cast<TestSharedObjectA*>(newState.data())->setBong(getRandomTestFlags());
objectMutationsPerformed++;
return newState;
}
default:
return state;
}
}
static bool messagesEqual(const QVariant& firstMessage, const QVariant& secondMessage) {
int type = firstMessage.userType();
if (secondMessage.userType() != type) {
@ -273,7 +319,7 @@ static bool messagesEqual(const QVariant& firstMessage, const QVariant& secondMe
} else if (type == TestMessageB::Type) {
TestMessageB first = firstMessage.value<TestMessageB>();
TestMessageB second = secondMessage.value<TestMessageB>();
return first.foo == second.foo && equals(first.bar, second.bar);
return first.foo == second.foo && equals(first.bar, second.bar) && first.baz == second.baz;
} else if (type == TestMessageC::Type) {
return firstMessage.value<TestMessageC>() == secondMessage.value<TestMessageC>();
@ -320,10 +366,13 @@ bool Endpoint::simulate(int iterationNumber) {
_reliableMessagesToSend -= 1.0f;
}
// tweak the local state
_localState = mutate(_localState);
// send a packet
try {
Bitstream& out = _sequencer->startPacket();
SequencedTestMessage message = { iterationNumber, createRandomMessage() };
SequencedTestMessage message = { iterationNumber, createRandomMessage(), _localState };
_unreliableMessagesSent.append(message);
unreliableMessagesSent++;
out << message;
@ -334,11 +383,16 @@ bool Endpoint::simulate(int iterationNumber) {
return true;
}
// record the send
SendRecord record = { _sequencer->getOutgoingPacketNumber(), _localState };
_sendRecords.append(record);
return false;
}
void Endpoint::sendDatagram(const QByteArray& datagram) {
datagramsSent++;
bytesSent += datagram.size();
// some datagrams are dropped
const float DROP_PROBABILITY = 0.1f;
@ -364,6 +418,7 @@ void Endpoint::sendDatagram(const QByteArray& datagram) {
_other->_sequencer->receivedDatagram(datagram);
datagramsReceived++;
bytesReceived += datagram.size();
}
void Endpoint::handleHighPriorityMessage(const QVariant& message) {
@ -384,12 +439,21 @@ void Endpoint::readMessage(Bitstream& in) {
SequencedTestMessage message;
in >> message;
_remoteState = message.state;
// record the receipt
ReceiveRecord record = { _sequencer->getIncomingPacketNumber(), message.state };
_receiveRecords.append(record);
for (QList<SequencedTestMessage>::iterator it = _other->_unreliableMessagesSent.begin();
it != _other->_unreliableMessagesSent.end(); it++) {
if (it->sequenceNumber == message.sequenceNumber) {
if (!messagesEqual(it->submessage, message.submessage)) {
throw QString("Sent/received unreliable message mismatch.");
}
if (!it->state->equals(message.state)) {
throw QString("Delta-encoded object mismatch.");
}
_other->_unreliableMessagesSent.erase(_other->_unreliableMessagesSent.begin(), it + 1);
unreliableMessagesReceived++;
return;
@ -427,6 +491,14 @@ void Endpoint::readReliableChannel() {
streamedBytesReceived += bytes.size();
}
void Endpoint::clearSendRecordsBefore(int index) {
_sendRecords.erase(_sendRecords.begin(), _sendRecords.begin() + index + 1);
}
void Endpoint::clearReceiveRecordsBefore(int index) {
_receiveRecords.erase(_receiveRecords.begin(), _receiveRecords.begin() + index + 1);
}
TestSharedObjectA::TestSharedObjectA(float foo, TestEnum baz, TestFlags bong) :
_foo(foo),
_baz(baz),

View file

@ -54,9 +54,30 @@ private slots:
void handleReliableMessage(const QVariant& message);
void readReliableChannel();
void clearSendRecordsBefore(int index);
void clearReceiveRecordsBefore(int index);
private:
class SendRecord {
public:
int packetNumber;
SharedObjectPointer localState;
};
class ReceiveRecord {
public:
int packetNumber;
SharedObjectPointer remoteState;
};
DatagramSequencer* _sequencer;
QList<SendRecord> _sendRecords;
QList<ReceiveRecord> _receiveRecords;
SharedObjectPointer _localState;
SharedObjectPointer _remoteState;
Endpoint* _other;
QList<QPair<QByteArray, int> > _delayedDatagrams;
float _highPriorityMessagesToSend;
@ -106,6 +127,8 @@ private:
TestFlags _bong;
};
DECLARE_ENUM_METATYPE(TestSharedObjectA, TestEnum)
/// Another simple shared object.
class TestSharedObjectB : public SharedObject {
Q_OBJECT
@ -169,6 +192,7 @@ public:
STREAM QByteArray foo;
STREAM SharedObjectPointer bar;
STREAM TestSharedObjectA::TestEnum baz;
};
DECLARE_STREAMABLE_METATYPE(TestMessageB)
@ -192,6 +216,7 @@ public:
STREAM int sequenceNumber;
STREAM QVariant submessage;
STREAM SharedObjectPointer state;
};
DECLARE_STREAMABLE_METATYPE(SequencedTestMessage)