Merge branch 'master' of https://github.com/worklist/hifi into octreeWireformatImprovements

This commit is contained in:
ZappoMan 2014-06-09 14:10:47 -07:00
commit c5647addf4
72 changed files with 3172 additions and 912 deletions

View file

@ -173,134 +173,151 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
}
}
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
const int16_t* nextOutputStart = bufferToAdd->getNextOutput();
const int16_t* bufferStart = bufferToAdd->getBuffer();
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
int16_t correctBufferSample[2], delayBufferSample[2];
int delayedChannelIndex = 0;
const int SINGLE_STEREO_OFFSET = 2;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
if (!bufferToAdd->isStereo()) {
// this is a mono buffer, which means it gets full attenuation and spatialization
// setup the int16_t variables for the two sample sets
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
const int16_t* bufferStart = bufferToAdd->getBuffer();
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
int16_t correctBufferSample[2], delayBufferSample[2];
int delayedChannelIndex = 0;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
_clientSamples[delayedChannelIndex],
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
delayBufferSample[0], delayBufferSample[1]);
const int SINGLE_STEREO_OFFSET = 2;
// perform the MMX add (with saturation) of two correct and delayed samples
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
// assign the results from the result of the mmx arithmetic
_clientSamples[s + goodChannelOffset] = shortResults[3];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
_clientSamples[delayedChannelIndex] = shortResults[1];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
}
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
// Basically we try to take the samples in batches of four, and then handle the remainder
// conditionally to get rid of the rest.
const int DOUBLE_STEREO_OFFSET = 4;
const int TRIPLE_STEREO_OFFSET = 6;
if (numSamplesDelay > 0) {
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
if (delayNextOutputStart < bufferStart) {
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// setup the int16_t variables for the two sample sets
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
_clientSamples[delayedChannelIndex],
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
delayBufferSample[0], delayBufferSample[1]);
// perform the MMX add (with saturation) of two correct and delayed samples
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
// assign the results from the result of the mmx arithmetic
_clientSamples[s + goodChannelOffset] = shortResults[3];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
_clientSamples[delayedChannelIndex] = shortResults[1];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
}
int i = 0;
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
// Basically we try to take the samples in batches of four, and then handle the remainder
// conditionally to get rid of the rest.
while (i + 3 < numSamplesDelay) {
// handle the first cases where we can MMX add four samples at once
const int DOUBLE_STEREO_OFFSET = 4;
const int TRIPLE_STEREO_OFFSET = 6;
if (numSamplesDelay > 0) {
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
if (delayNextOutputStart < bufferStart) {
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
}
int i = 0;
while (i + 3 < numSamplesDelay) {
// handle the first cases where we can MMX add four samples at once
int parentIndex = i * 2;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
// push the index
i += 4;
}
int parentIndex = i * 2;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
// push the index
i += 4;
if (i + 2 < numSamplesDelay) {
// MMX add only three delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
} else if (i + 1 < numSamplesDelay) {
// MMX add two delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
} else if (i < numSamplesDelay) {
// MMX add a single delayed sample
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
}
}
int parentIndex = i * 2;
if (i + 2 < numSamplesDelay) {
// MMX add only three delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
} else if (i + 1 < numSamplesDelay) {
// MMX add two delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
} else if (i < numSamplesDelay) {
// MMX add a single delayed sample
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
} else {
// stereo buffer - do attenuation but no sample delay for spatialization
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// use MMX to clamp four additions at a time
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int) (nextOutputStart[s] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 1] = glm::clamp(_clientSamples[s + 1] + (int) (nextOutputStart[s + 1] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 2] = glm::clamp(_clientSamples[s + 2] + (int) (nextOutputStart[s + 2] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 3] = glm::clamp(_clientSamples[s + 3] + (int) (nextOutputStart[s + 3] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
}
}
}

View file

@ -50,10 +50,22 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
// read the first byte after the header to see if this is a stereo or mono buffer
quint8 channelFlag = packet.at(numBytesForPacketHeader(packet));
bool isStereo = channelFlag == 1;
if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) {
// there's a mismatch in the buffer channels for the incoming and current buffer
// so delete our current buffer and create a new one
_ringBuffers.removeOne(avatarRingBuffer);
avatarRingBuffer->deleteLater();
avatarRingBuffer = NULL;
}
if (!avatarRingBuffer) {
// we don't have an AvatarAudioRingBuffer yet, so add it
avatarRingBuffer = new AvatarAudioRingBuffer();
avatarRingBuffer = new AvatarAudioRingBuffer(isStereo);
_ringBuffers.push_back(avatarRingBuffer);
}
@ -106,7 +118,8 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
if (audioBuffer->willBeAddedToMix()) {
audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
audioBuffer->shiftReadPosition(audioBuffer->isStereo()
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
audioBuffer->setWillBeAddedToMix(false);
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector

View file

@ -24,14 +24,14 @@ public:
AudioMixerClientData();
~AudioMixerClientData();
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
const QList<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
int parseData(const QByteArray& packet);
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
void pushBuffersAfterFrameSend();
private:
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
QList<PositionalAudioRingBuffer*> _ringBuffers;
};
#endif // hifi_AudioMixerClientData_h

View file

@ -13,8 +13,8 @@
#include "AvatarAudioRingBuffer.h"
AvatarAudioRingBuffer::AvatarAudioRingBuffer() :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone) {
AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo) :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo) {
}

View file

@ -18,7 +18,7 @@
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
public:
AvatarAudioRingBuffer();
AvatarAudioRingBuffer(bool isStereo = false);
int parseData(const QByteArray& packet);
private:

View file

@ -106,7 +106,7 @@ int ModelServer::sendSpecialPacket(OctreeQueryNode* queryNode, const SharedNodeP
//qDebug() << "sending PacketType_MODEL_ERASE packetLength:" << packetLength;
NodeList::getInstance()->writeDatagram((char*) outputBuffer, packetLength, SharedNodePointer(node));
queryNode->incrementSequenceNumber();
queryNode->packetSent(outputBuffer, packetLength);
}
nodeData->setLastDeletedModelsSentAt(deletePacketSentAt);

View file

@ -41,7 +41,8 @@ OctreeQueryNode::OctreeQueryNode() :
_sequenceNumber(0),
_lastRootTimestamp(0),
_myPacketType(PacketTypeUnknown),
_isShuttingDown(false)
_isShuttingDown(false),
_sentPacketHistory(1000)
{
}
@ -362,3 +363,45 @@ void OctreeQueryNode::dumpOutOfView() {
}
}
}
void OctreeQueryNode::octreePacketSent() {
packetSent(_octreePacket, getPacketLength());
}
void OctreeQueryNode::packetSent(unsigned char* packet, int packetLength) {
packetSent(QByteArray((char*)packet, packetLength));
}
void OctreeQueryNode::packetSent(const QByteArray& packet) {
_sentPacketHistory.packetSent(_sequenceNumber, packet);
_sequenceNumber++;
}
bool OctreeQueryNode::hasNextNackedPacket() const {
return !_nackedSequenceNumbers.isEmpty();
}
const QByteArray* OctreeQueryNode::getNextNackedPacket() {
if (!_nackedSequenceNumbers.isEmpty()) {
// could return null if packet is not in the history
return _sentPacketHistory.getPacket(_nackedSequenceNumbers.takeFirst());
}
return NULL;
}
void OctreeQueryNode::parseNackPacket(QByteArray& packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
const unsigned char* dataAt = reinterpret_cast<const unsigned char*>(packet.data()) + numBytesPacketHeader;
// read number of sequence numbers
uint16_t numSequenceNumbers = (*(uint16_t*)dataAt);
dataAt += sizeof(uint16_t);
// read sequence numbers
for (int i = 0; i < numSequenceNumbers; i++) {
OCTREE_PACKET_SEQUENCE sequenceNumber = (*(OCTREE_PACKET_SEQUENCE*)dataAt);
_nackedSequenceNumbers.enqueue(sequenceNumber);
dataAt += sizeof(OCTREE_PACKET_SEQUENCE);
}
}

View file

@ -23,6 +23,8 @@
#include <OctreeQuery.h>
#include <OctreeSceneStats.h>
#include <ThreadedAssignment.h> // for SharedAssignmentPointer
#include "SentPacketHistory.h"
#include <qqueue.h>
class OctreeSendThread;
@ -100,10 +102,16 @@ public:
void forceNodeShutdown();
bool isShuttingDown() const { return _isShuttingDown; }
void incrementSequenceNumber() { _sequenceNumber++; }
void octreePacketSent();
void packetSent(unsigned char* packet, int packetLength);
void packetSent(const QByteArray& packet);
OCTREE_PACKET_SEQUENCE getSequenceNumber() const { return _sequenceNumber; }
void parseNackPacket(QByteArray& packet);
bool hasNextNackedPacket() const;
const QByteArray* getNextNackedPacket();
private slots:
void sendThreadFinished();
@ -146,6 +154,9 @@ private:
PacketType _myPacketType;
bool _isShuttingDown;
SentPacketHistory _sentPacketHistory;
QQueue<OCTREE_PACKET_SEQUENCE> _nackedSequenceNumbers;
};
#endif // hifi_OctreeQueryNode_h

View file

@ -85,6 +85,7 @@ bool OctreeSendThread::process() {
if (nodeData && !nodeData->isShuttingDown()) {
bool viewFrustumChanged = nodeData->updateCurrentViewFrustum();
packetDistributor(nodeData, viewFrustumChanged);
resendNackedPackets(nodeData);
}
}
}
@ -214,8 +215,7 @@ int OctreeSendThread::handlePacketSend(OctreeQueryNode* nodeData, int& trueBytes
packetsSent++;
OctreeServer::didCallWriteDatagram(this);
NodeList::getInstance()->writeDatagram((char*) nodeData->getPacket(), nodeData->getPacketLength(), _node);
NodeList::getInstance()->writeDatagram((char*)nodeData->getPacket(), nodeData->getPacketLength(), _node);
packetSent = true;
thisWastedBytes = MAX_PACKET_SIZE - nodeData->getPacketLength();
@ -244,7 +244,7 @@ int OctreeSendThread::handlePacketSend(OctreeQueryNode* nodeData, int& trueBytes
if (nodeData->isPacketWaiting() && !nodeData->isShuttingDown()) {
// just send the voxel packet
OctreeServer::didCallWriteDatagram(this);
NodeList::getInstance()->writeDatagram((char*) nodeData->getPacket(), nodeData->getPacketLength(), _node);
NodeList::getInstance()->writeDatagram((char*)nodeData->getPacket(), nodeData->getPacketLength(), _node);
packetSent = true;
int thisWastedBytes = MAX_PACKET_SIZE - nodeData->getPacketLength();
@ -274,13 +274,33 @@ int OctreeSendThread::handlePacketSend(OctreeQueryNode* nodeData, int& trueBytes
trueBytesSent += nodeData->getPacketLength();
truePacketsSent++;
packetsSent++;
nodeData->incrementSequenceNumber();
nodeData->octreePacketSent();
nodeData->resetOctreePacket();
}
return packetsSent;
}
int OctreeSendThread::resendNackedPackets(OctreeQueryNode* nodeData) {
const int MAX_PACKETS_RESEND = 10;
int packetsSent = 0;
const QByteArray* packet;
while (nodeData->hasNextNackedPacket() && packetsSent < MAX_PACKETS_RESEND) {
packet = nodeData->getNextNackedPacket();
if (packet) {
NodeList::getInstance()->writeDatagram(*packet, _node);
packetsSent++;
_totalBytes += packet->size();
_totalPackets++;
_totalWastedBytes += MAX_PACKET_SIZE - packet->size();
}
}
return packetsSent;
}
/// Version of voxel distributor that sends the deepest LOD level at once
int OctreeSendThread::packetDistributor(OctreeQueryNode* nodeData, bool viewFrustumChanged) {

View file

@ -55,6 +55,9 @@ private:
int _nodeMissingCount;
bool _isShuttingDown;
int resendNackedPackets(OctreeQueryNode* nodeData);
};
#endif // hifi_OctreeSendThread_h

View file

@ -832,14 +832,13 @@ void OctreeServer::readPendingDatagrams() {
PacketType packetType = packetTypeForPacket(receivedPacket);
SharedNodePointer matchingNode = nodeList->sendingNodeForPacket(receivedPacket);
if (packetType == getMyQueryMessageType()) {
// If we got a query packet, then we're talking to an agent, and we
// need to make sure we have it in our nodeList.
if (matchingNode) {
nodeList->updateNodeWithDataFromPacket(matchingNode, receivedPacket);
OctreeQueryNode* nodeData = (OctreeQueryNode*) matchingNode->getLinkedData();
OctreeQueryNode* nodeData = (OctreeQueryNode*)matchingNode->getLinkedData();
if (nodeData && !nodeData->isOctreeSendThreadInitalized()) {
// NOTE: this is an important aspect of the proper ref counting. The send threads/node data need to
// know that the OctreeServer/Assignment will not get deleted on it while it's still active. The
// solution is to get the shared pointer for the current assignment. We need to make sure this is the
@ -848,6 +847,16 @@ void OctreeServer::readPendingDatagrams() {
nodeData->initializeOctreeSendThread(sharedAssignment, matchingNode);
}
}
} else if (packetType == PacketTypeOctreeDataNack) {
// If we got a nack packet, then we're talking to an agent, and we
// need to make sure we have it in our nodeList.
if (matchingNode) {
nodeList->updateNodeWithDataFromPacket(matchingNode, receivedPacket);
OctreeQueryNode* nodeData = (OctreeQueryNode*)matchingNode->getLinkedData();
if (nodeData) {
nodeData->parseNackPacket(receivedPacket);
}
}
} else if (packetType == PacketTypeJurisdictionRequest) {
_jurisdictionSender->queueReceivedPacket(matchingNode, receivedPacket);
} else if (_octreeInboundPacketProcessor && getOctree()->handlesEditPacketType(packetType)) {

View file

@ -0,0 +1,44 @@
//
// SentPacketHistory.cpp
// assignement-client/src/octree
//
// Created by Yixin Wang on 6/5/2014
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "SentPacketHistory.h"
SentPacketHistory::SentPacketHistory(int size)
: _sentPackets(size),
_newestPacketAt(0),
_numExistingPackets(0),
_newestSequenceNumber(0)
{
}
void SentPacketHistory::packetSent(OCTREE_PACKET_SEQUENCE sequenceNumber, const QByteArray& packet) {
_newestSequenceNumber = sequenceNumber;
// increment _newestPacketAt cyclically, insert new packet there.
// this will overwrite the oldest packet in the buffer
_newestPacketAt = (_newestPacketAt == _sentPackets.size() - 1) ? 0 : _newestPacketAt + 1;
_sentPackets[_newestPacketAt] = packet;
if (_numExistingPackets < _sentPackets.size()) {
_numExistingPackets++;
}
}
const QByteArray* SentPacketHistory::getPacket(OCTREE_PACKET_SEQUENCE sequenceNumber) const {
OCTREE_PACKET_SEQUENCE seqDiff = _newestSequenceNumber - sequenceNumber;
if (!(seqDiff >= 0 && seqDiff < _numExistingPackets)) {
return NULL;
}
int packetAt = _newestPacketAt - seqDiff;
if (packetAt < 0) {
packetAt += _sentPackets.size();
}
return &_sentPackets.at(packetAt);
}

View file

@ -0,0 +1,35 @@
//
// SentPacketHistory.h
// assignement-client/src/octree
//
// Created by Yixin Wang on 6/5/2014
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_SentPacketHistory_h
#define hifi_SentPacketHistory_h
#include <qbytearray.h>
#include <qvector.h>
#include "OctreePacketData.h"
class SentPacketHistory {
public:
SentPacketHistory(int size);
void packetSent(OCTREE_PACKET_SEQUENCE sequenceNumber, const QByteArray& packet);
const QByteArray* getPacket(OCTREE_PACKET_SEQUENCE sequenceNumber) const;
private:
QVector<QByteArray> _sentPackets; // circular buffer
int _newestPacketAt;
int _numExistingPackets;
OCTREE_PACKET_SEQUENCE _newestSequenceNumber;
};
#endif

View file

@ -106,7 +106,7 @@ int ParticleServer::sendSpecialPacket(OctreeQueryNode* queryNode, const SharedNo
//qDebug() << "sending PacketType_PARTICLE_ERASE packetLength:" << packetLength;
NodeList::getInstance()->writeDatagram((char*) outputBuffer, packetLength, SharedNodePointer(node));
queryNode->incrementSequenceNumber();
queryNode->packetSent(outputBuffer, packetLength);
}
nodeData->setLastDeletedParticlesSentAt(deletePacketSentAt);

View file

@ -75,7 +75,7 @@ int VoxelServer::sendSpecialPacket(OctreeQueryNode* queryNode, const SharedNodeP
}
NodeList::getInstance()->writeDatagram((char*) _tempOutputBuffer, envPacketLength, SharedNodePointer(node));
queryNode->incrementSequenceNumber();
queryNode->packetSent(_tempOutputBuffer, envPacketLength);
return envPacketLength;
}

View file

@ -20,21 +20,21 @@ var jointMappings = "\n# Joint list start";
for (var i = 0; i < jointList.length; i++) {
jointMappings = jointMappings + "\njointIndex = " + jointList[i] + " = " + i;
}
print(jointMappings + "\n# Joint list end");
print(jointMappings + "\n# Joint list end");
Script.update.connect(function(deltaTime) {
cumulativeTime += deltaTime;
MyAvatar.setJointData("joint_R_hip", Quat.fromPitchYawRollDegrees(0.0, 0.0, AMPLITUDE * Math.sin(cumulativeTime * FREQUENCY)));
MyAvatar.setJointData("joint_L_hip", Quat.fromPitchYawRollDegrees(0.0, 0.0, -AMPLITUDE * Math.sin(cumulativeTime * FREQUENCY)));
MyAvatar.setJointData("joint_R_knee", Quat.fromPitchYawRollDegrees(0.0, 0.0,
AMPLITUDE * (1.0 + Math.sin(cumulativeTime * FREQUENCY))));
MyAvatar.setJointData("joint_L_knee", Quat.fromPitchYawRollDegrees(0.0, 0.0,
AMPLITUDE * (1.0 - Math.sin(cumulativeTime * FREQUENCY))));
MyAvatar.setJointData("RightUpLeg", Quat.fromPitchYawRollDegrees(AMPLITUDE * Math.sin(cumulativeTime * FREQUENCY), 0.0, 0.0));
MyAvatar.setJointData("LeftUpLeg", Quat.fromPitchYawRollDegrees(-AMPLITUDE * Math.sin(cumulativeTime * FREQUENCY), 0.0, 0.0));
MyAvatar.setJointData("RightLeg", Quat.fromPitchYawRollDegrees(
AMPLITUDE * (1.0 + Math.sin(cumulativeTime * FREQUENCY)),0.0, 0.0));
MyAvatar.setJointData("LeftLeg", Quat.fromPitchYawRollDegrees(
AMPLITUDE * (1.0 - Math.sin(cumulativeTime * FREQUENCY)),0.0, 0.0));
});
Script.scriptEnding.connect(function() {
MyAvatar.clearJointData("joint_R_hip");
MyAvatar.clearJointData("joint_L_hip");
MyAvatar.clearJointData("joint_R_knee");
MyAvatar.clearJointData("joint_L_knee");
MyAvatar.clearJointData("RightUpLeg");
MyAvatar.clearJointData("LeftUpLeg");
MyAvatar.clearJointData("RightLeg");
MyAvatar.clearJointData("LeftLeg");
});

View file

@ -18,7 +18,8 @@ var toolWidth = 50;
var LASER_WIDTH = 4;
var LASER_COLOR = { red: 255, green: 0, blue: 0 };
var LASER_LENGTH_FACTOR = 5;
var LASER_LENGTH_FACTOR = 500
;
var LEFT = 0;
var RIGHT = 1;
@ -42,6 +43,8 @@ var toolBar;
var jointList = MyAvatar.getJointNames();
var mode = 0;
function isLocked(properties) {
// special case to lock the ground plane model in hq.
if (location.hostname == "hq.highfidelity.io" &&
@ -57,6 +60,7 @@ function controller(wichSide) {
this.palm = 2 * wichSide;
this.tip = 2 * wichSide + 1;
this.trigger = wichSide;
this.bumper = 6 * wichSide + 5;
this.oldPalmPosition = Controller.getSpatialControlPosition(this.palm);
this.palmPosition = Controller.getSpatialControlPosition(this.palm);
@ -77,6 +81,7 @@ function controller(wichSide) {
this.rotation = this.oldRotation;
this.triggerValue = Controller.getTriggerValue(this.trigger);
this.bumperValue = Controller.isButtonPressed(this.bumper);
this.pressed = false; // is trigger pressed
this.pressing = false; // is trigger being pressed (is pressed now but wasn't previously)
@ -88,6 +93,11 @@ function controller(wichSide) {
this.oldModelPosition;
this.oldModelRadius;
this.positionAtGrab;
this.rotationAtGrab;
this.modelPositionAtGrab;
this.modelRotationAtGrab;
this.jointsIntersectingFromStart = [];
this.laser = Overlays.addOverlay("line3d", {
@ -145,6 +155,11 @@ function controller(wichSide) {
this.oldModelRotation = properties.modelRotation;
this.oldModelRadius = properties.radius;
this.positionAtGrab = this.palmPosition;
this.rotationAtGrab = this.rotation;
this.modelPositionAtGrab = properties.position;
this.modelRotationAtGrab = properties.modelRotation;
this.jointsIntersectingFromStart = [];
for (var i = 0; i < jointList.length; i++) {
var distance = Vec3.distance(MyAvatar.getJointPosition(jointList[i]), this.oldModelPosition);
@ -152,6 +167,7 @@ function controller(wichSide) {
this.jointsIntersectingFromStart.push(i);
}
}
this.showLaser(false);
}
}
@ -169,12 +185,16 @@ function controller(wichSide) {
}
}
print("closestJoint: " + jointList[closestJointIndex]);
print("closestJointDistance (attach max distance): " + closestJointDistance + " (" + this.oldModelRadius + ")");
if (closestJointIndex != -1) {
print("closestJoint: " + jointList[closestJointIndex]);
print("closestJointDistance (attach max distance): " + closestJointDistance + " (" + this.oldModelRadius + ")");
}
if (closestJointDistance < this.oldModelRadius) {
if (this.jointsIntersectingFromStart.indexOf(closestJointIndex) != -1) {
if (this.jointsIntersectingFromStart.indexOf(closestJointIndex) != -1 ||
(leftController.grabbing && rightController.grabbing &&
leftController.modelID.id == rightController.modelID.id)) {
// Do nothing
} else {
print("Attaching to " + jointList[closestJointIndex]);
@ -188,6 +208,7 @@ function controller(wichSide) {
MyAvatar.attach(this.modelURL, jointList[closestJointIndex],
attachmentOffset, attachmentRotation, 2.0 * this.oldModelRadius,
true, false);
Models.deleteModel(this.modelID);
}
}
@ -196,6 +217,7 @@ function controller(wichSide) {
this.grabbing = false;
this.modelID.isKnownID = false;
this.jointsIntersectingFromStart = [];
this.showLaser(true);
}
this.checkTrigger = function () {
@ -246,6 +268,7 @@ function controller(wichSide) {
return { valid: false };
}
this.glowedIntersectingModel = { isKnownID: false };
this.moveLaser = function () {
// the overlays here are anchored to the avatar, which means they are specified in the avatar's local frame
@ -258,46 +281,99 @@ function controller(wichSide) {
Overlays.editOverlay(this.laser, {
position: startPosition,
end: endPosition,
visible: true
end: endPosition
});
Overlays.editOverlay(this.ball, {
position: endPosition,
visible: true
position: endPosition
});
Overlays.editOverlay(this.leftRight, {
position: Vec3.sum(endPosition, Vec3.multiply(this.right, 2 * this.guideScale)),
end: Vec3.sum(endPosition, Vec3.multiply(this.right, -2 * this.guideScale)),
visible: true
end: Vec3.sum(endPosition, Vec3.multiply(this.right, -2 * this.guideScale))
});
Overlays.editOverlay(this.topDown, {position: Vec3.sum(endPosition, Vec3.multiply(this.up, 2 * this.guideScale)),
end: Vec3.sum(endPosition, Vec3.multiply(this.up, -2 * this.guideScale)),
visible: true
end: Vec3.sum(endPosition, Vec3.multiply(this.up, -2 * this.guideScale))
});
this.showLaser(!this.grabbing || mode == 0);
if (this.glowedIntersectingModel.isKnownID) {
Models.editModel(this.glowedIntersectingModel, { glowLevel: 0.0 });
this.glowedIntersectingModel.isKnownID = false;
}
if (!this.grabbing) {
var intersection = Models.findRayIntersection({
origin: this.palmPosition,
direction: this.front
});
if (intersection.accurate && intersection.modelID.isKnownID) {
this.glowedIntersectingModel = intersection.modelID;
Models.editModel(this.glowedIntersectingModel, { glowLevel: 0.25 });
}
}
}
this.hideLaser = function() {
Overlays.editOverlay(this.laser, { visible: false });
Overlays.editOverlay(this.ball, { visible: false });
Overlays.editOverlay(this.leftRight, { visible: false });
Overlays.editOverlay(this.topDown, { visible: false });
this.showLaser = function(show) {
Overlays.editOverlay(this.laser, { visible: show });
Overlays.editOverlay(this.ball, { visible: show });
Overlays.editOverlay(this.leftRight, { visible: show });
Overlays.editOverlay(this.topDown, { visible: show });
}
this.moveModel = function () {
if (this.grabbing) {
var newPosition = Vec3.sum(this.palmPosition,
Vec3.multiply(this.front, this.x));
newPosition = Vec3.sum(newPosition,
Vec3.multiply(this.up, this.y));
newPosition = Vec3.sum(newPosition,
Vec3.multiply(this.right, this.z));
if (!this.modelID.isKnownID) {
print("Unknown grabbed ID " + this.modelID.id + ", isKnown: " + this.modelID.isKnownID);
this.modelID = Models.findRayIntersection({
origin: this.palmPosition,
direction: this.front
}).modelID;
print("Identified ID " + this.modelID.id + ", isKnown: " + this.modelID.isKnownID);
}
var newPosition;
var newRotation;
var newRotation = Quat.multiply(this.rotation,
Quat.inverse(this.oldRotation));
newRotation = Quat.multiply(newRotation,
this.oldModelRotation);
switch (mode) {
case 0:
newPosition = Vec3.sum(this.palmPosition,
Vec3.multiply(this.front, this.x));
newPosition = Vec3.sum(newPosition,
Vec3.multiply(this.up, this.y));
newPosition = Vec3.sum(newPosition,
Vec3.multiply(this.right, this.z));
newRotation = Quat.multiply(this.rotation,
Quat.inverse(this.oldRotation));
newRotation = Quat.multiply(newRotation,
this.oldModelRotation);
break;
case 1:
var forward = Vec3.multiplyQbyV(MyAvatar.orientation, { x: 0, y: 0, z: -1 });
var d = Vec3.dot(forward, MyAvatar.position);
var factor1 = Vec3.dot(forward, this.positionAtGrab) - d;
var factor2 = Vec3.dot(forward, this.modelPositionAtGrab) - d;
var vector = Vec3.subtract(this.palmPosition, this.positionAtGrab);
if (factor2 < 0) {
factor2 = 0;
}
if (factor1 <= 0) {
factor1 = 1;
factor2 = 1;
}
newPosition = Vec3.sum(this.modelPositionAtGrab,
Vec3.multiply(vector,
factor2 / factor1));
newRotation = Quat.multiply(this.rotation,
Quat.inverse(this.rotationAtGrab));
newRotation = Quat.multiply(newRotation,
this.modelRotationAtGrab);
break;
}
Models.editModel(this.modelID, {
position: newPosition,
@ -341,6 +417,21 @@ function controller(wichSide) {
this.triggerValue = Controller.getTriggerValue(this.trigger);
var bumperValue = Controller.isButtonPressed(this.bumper);
if (bumperValue && !this.bumperValue) {
if (mode == 0) {
mode = 1;
Overlays.editOverlay(leftController.laser, { color: { red: 0, green: 0, blue: 255 } });
Overlays.editOverlay(rightController.laser, { color: { red: 0, green: 0, blue: 255 } });
} else {
mode = 0;
Overlays.editOverlay(leftController.laser, { color: { red: 255, green: 0, blue: 0 } });
Overlays.editOverlay(rightController.laser, { color: { red: 255, green: 0, blue: 0 } });
}
}
this.bumperValue = bumperValue;
this.checkTrigger();
this.moveLaser();
@ -356,8 +447,12 @@ function controller(wichSide) {
var attachmentIndex = -1;
var attachmentX = LASER_LENGTH_FACTOR;
var newModel;
var newProperties;
for (var i = 0; i < attachments.length; ++i) {
var position = Vec3.sum(MyAvatar.getJointPosition(attachments[i].jointName), attachments[i].translation);
var position = Vec3.sum(MyAvatar.getJointPosition(attachments[i].jointName),
Vec3.multiplyQbyV(MyAvatar.getJointCombinedRotation(attachments[i].jointName), attachments[i].translation));
var scale = attachments[i].scale;
var A = this.palmPosition;
@ -375,53 +470,56 @@ function controller(wichSide) {
}
if (attachmentIndex != -1) {
print("Detaching: " + attachments[attachmentIndex].modelURL);
MyAvatar.detachOne(attachments[attachmentIndex].modelURL, attachments[attachmentIndex].jointName);
Models.addModel({
position: Vec3.sum(MyAvatar.getJointPosition(attachments[attachmentIndex].jointName),
attachments[attachmentIndex].translation),
modelRotation: Quat.multiply(MyAvatar.getJointCombinedRotation(attachments[attachmentIndex].jointName),
attachments[attachmentIndex].rotation),
radius: attachments[attachmentIndex].scale / 2.0,
modelURL: attachments[attachmentIndex].modelURL
});
}
// There is none so ...
// Checking model tree
Vec3.print("Looking at: ", this.palmPosition);
var pickRay = { origin: this.palmPosition,
direction: Vec3.normalize(Vec3.subtract(this.tipPosition, this.palmPosition)) };
var foundIntersection = Models.findRayIntersection(pickRay);
if(!foundIntersection.accurate) {
return;
}
var foundModel = foundIntersection.modelID;
if (!foundModel.isKnownID) {
var identify = Models.identifyModel(foundModel);
if (!identify.isKnownID) {
print("Unknown ID " + identify.id + " (update loop " + foundModel.id + ")");
return;
}
foundModel = identify;
}
var properties = Models.getModelProperties(foundModel);
print("foundModel.modelURL=" + properties.modelURL);
if (isLocked(properties)) {
print("Model locked " + properties.id);
newProperties = {
position: Vec3.sum(MyAvatar.getJointPosition(attachments[attachmentIndex].jointName),
Vec3.multiplyQbyV(MyAvatar.getJointCombinedRotation(attachments[attachmentIndex].jointName), attachments[attachmentIndex].translation)),
modelRotation: Quat.multiply(MyAvatar.getJointCombinedRotation(attachments[attachmentIndex].jointName),
attachments[attachmentIndex].rotation),
radius: attachments[attachmentIndex].scale / 2.0,
modelURL: attachments[attachmentIndex].modelURL
};
newModel = Models.addModel(newProperties);
} else {
print("Checking properties: " + properties.id + " " + properties.isKnownID);
var check = this.checkModel(properties);
if (check.valid) {
this.grab(foundModel, properties);
this.x = check.x;
this.y = check.y;
this.z = check.z;
// There is none so ...
// Checking model tree
Vec3.print("Looking at: ", this.palmPosition);
var pickRay = { origin: this.palmPosition,
direction: Vec3.normalize(Vec3.subtract(this.tipPosition, this.palmPosition)) };
var foundIntersection = Models.findRayIntersection(pickRay);
if(!foundIntersection.accurate) {
print("No accurate intersection");
return;
}
newModel = foundIntersection.modelID;
if (!newModel.isKnownID) {
var identify = Models.identifyModel(newModel);
if (!identify.isKnownID) {
print("Unknown ID " + identify.id + " (update loop " + newModel.id + ")");
return;
}
newModel = identify;
}
newProperties = Models.getModelProperties(newModel);
}
print("foundModel.modelURL=" + newProperties.modelURL);
if (isLocked(newProperties)) {
print("Model locked " + newProperties.id);
} else {
this.grab(newModel, newProperties);
var check = this.checkModel(newProperties);
this.x = check.x;
this.y = check.y;
this.z = check.z;
return;
}
}
}
@ -439,38 +537,74 @@ var rightController = new controller(RIGHT);
function moveModels() {
if (leftController.grabbing && rightController.grabbing && rightController.modelID.id == leftController.modelID.id) {
//print("Both controllers");
var oldLeftPoint = Vec3.sum(leftController.oldPalmPosition, Vec3.multiply(leftController.oldFront, leftController.x));
var oldRightPoint = Vec3.sum(rightController.oldPalmPosition, Vec3.multiply(rightController.oldFront, rightController.x));
var oldMiddle = Vec3.multiply(Vec3.sum(oldLeftPoint, oldRightPoint), 0.5);
var oldLength = Vec3.length(Vec3.subtract(oldLeftPoint, oldRightPoint));
var newPosition = leftController.oldModelPosition;
var rotation = leftController.oldModelRotation;
var ratio = 1;
var leftPoint = Vec3.sum(leftController.palmPosition, Vec3.multiply(leftController.front, leftController.x));
var rightPoint = Vec3.sum(rightController.palmPosition, Vec3.multiply(rightController.front, rightController.x));
var middle = Vec3.multiply(Vec3.sum(leftPoint, rightPoint), 0.5);
var length = Vec3.length(Vec3.subtract(leftPoint, rightPoint));
var ratio = length / oldLength;
var newPosition = Vec3.sum(middle,
Vec3.multiply(Vec3.subtract(leftController.oldModelPosition, oldMiddle), ratio));
//Vec3.print("Ratio : " + ratio + " New position: ", newPosition);
var rotation = Quat.multiply(leftController.rotation,
Quat.inverse(leftController.oldRotation));
rotation = Quat.multiply(rotation, leftController.oldModelRotation);
switch (mode) {
case 0:
var oldLeftPoint = Vec3.sum(leftController.oldPalmPosition, Vec3.multiply(leftController.oldFront, leftController.x));
var oldRightPoint = Vec3.sum(rightController.oldPalmPosition, Vec3.multiply(rightController.oldFront, rightController.x));
var oldMiddle = Vec3.multiply(Vec3.sum(oldLeftPoint, oldRightPoint), 0.5);
var oldLength = Vec3.length(Vec3.subtract(oldLeftPoint, oldRightPoint));
var leftPoint = Vec3.sum(leftController.palmPosition, Vec3.multiply(leftController.front, leftController.x));
var rightPoint = Vec3.sum(rightController.palmPosition, Vec3.multiply(rightController.front, rightController.x));
var middle = Vec3.multiply(Vec3.sum(leftPoint, rightPoint), 0.5);
var length = Vec3.length(Vec3.subtract(leftPoint, rightPoint));
ratio = length / oldLength;
newPosition = Vec3.sum(middle,
Vec3.multiply(Vec3.subtract(leftController.oldModelPosition, oldMiddle), ratio));
break;
case 1:
var u = Vec3.normalize(Vec3.subtract(rightController.oldPalmPosition, leftController.oldPalmPosition));
var v = Vec3.normalize(Vec3.subtract(rightController.palmPosition, leftController.palmPosition));
var cos_theta = Vec3.dot(u, v);
if (cos_theta > 1) {
cos_theta = 1;
}
var angle = Math.acos(cos_theta) / Math.PI * 180;
if (angle < 0.1) {
return;
}
var w = Vec3.normalize(Vec3.cross(u, v));
rotation = Quat.multiply(Quat.angleAxis(angle, w), leftController.oldModelRotation);
leftController.positionAtGrab = leftController.palmPosition;
leftController.rotationAtGrab = leftController.rotation;
leftController.modelPositionAtGrab = leftController.oldModelPosition;
leftController.modelRotationAtGrab = rotation;
rightController.positionAtGrab = rightController.palmPosition;
rightController.rotationAtGrab = rightController.rotation;
rightController.modelPositionAtGrab = rightController.oldModelPosition;
rightController.modelRotationAtGrab = rotation;
break;
}
Models.editModel(leftController.modelID, {
position: newPosition,
//modelRotation: rotation,
modelRotation: rotation,
radius: leftController.oldModelRadius * ratio
});
leftController.oldModelPosition = newPosition;
leftController.oldModelRotation = rotation;
leftController.oldModelRadius *= ratio;
rightController.oldModelPosition = newPosition;
rightController.oldModelRotation = rotation;
rightController.oldModelRadius *= ratio;
return;
}
@ -498,8 +632,8 @@ function checkController(deltaTime) {
if (hydraConnected) {
hydraConnected = false;
leftController.hideLaser();
rightController.hideLaser();
leftController.showLaser(false);
rightController.showLaser(false);
}
}
@ -510,7 +644,7 @@ function initToolBar() {
toolBar = new ToolBar(0, 0, ToolBar.VERTICAL);
// New Model
newModel = toolBar.addTool({
imageURL: toolIconUrl + "voxel-tool.svg",
imageURL: toolIconUrl + "add-model-tool.svg",
subImage: { x: 0, y: Tool.IMAGE_WIDTH, width: Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT },
width: toolWidth, height: toolHeight,
visible: true,

52
examples/squeezeHands.js Normal file
View file

@ -0,0 +1,52 @@
//
// squeezeHands.js
// examples
//
// Created by Philip Rosedale on June 4, 2014
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
var rightHandAnimation = "https://s3-us-west-1.amazonaws.com/highfidelity-public/animations/RightHandAnim.fbx";
var leftHandAnimation = "https://s3-us-west-1.amazonaws.com/highfidelity-public/animations/LeftHandAnim.fbx";
var LEFT = 0;
var RIGHT = 1;
var lastLeftFrame = 0;
var lastRightFrame = 0;
var LAST_FRAME = 11.0; // What is the number of the last frame we want to use in the animation?
var SMOOTH_FACTOR = 0.80;
Script.update.connect(function(deltaTime) {
var leftTriggerValue = Math.sqrt(Controller.getTriggerValue(LEFT));
var rightTriggerValue = Math.sqrt(Controller.getTriggerValue(RIGHT));
var leftFrame, rightFrame;
// Average last few trigger frames together for a bit of smoothing
leftFrame = (leftTriggerValue * LAST_FRAME) * (1.0 - SMOOTH_FACTOR) + lastLeftFrame * SMOOTH_FACTOR;
rightFrame = (rightTriggerValue * LAST_FRAME) * (1.0 - SMOOTH_FACTOR) + lastRightFrame * SMOOTH_FACTOR;
if ((leftFrame != lastLeftFrame) && leftHandAnimation.length){
MyAvatar.stopAnimation(leftHandAnimation);
MyAvatar.startAnimation(leftHandAnimation, 30.0, 1.0, false, true, leftFrame, leftFrame);
}
if ((rightFrame != lastRightFrame) && rightHandAnimation.length) {
MyAvatar.stopAnimation(rightHandAnimation);
MyAvatar.startAnimation(rightHandAnimation, 30.0, 1.0, false, true, rightFrame, rightFrame);
}
lastLeftFrame = leftFrame;
lastRightFrame = rightFrame;
});
Script.scriptEnding.connect(function() {
MyAvatar.stopAnimation(leftHandAnimation);
MyAvatar.stopAnimation(rightHandAnimation);
});

View file

@ -167,7 +167,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_applicationOverlay(),
_runningScriptsWidget(new RunningScriptsWidget(_window)),
_runningScriptsWidgetWasVisible(false),
_trayIcon(new QSystemTrayIcon(_window))
_trayIcon(new QSystemTrayIcon(_window)),
_lastNackTime(usecTimestampNow())
{
// read the ApplicationInfo.ini file for Name/Version/Domain information
QSettings applicationInfo(Application::resourcesPath() + "info/ApplicationInfo.ini", QSettings::IniFormat);
@ -1007,12 +1008,6 @@ void Application::keyPressEvent(QKeyEvent* event) {
case Qt::Key_At:
Menu::getInstance()->goTo();
break;
case Qt::Key_B:
_applicationOverlay.setOculusAngle(_applicationOverlay.getOculusAngle() - RADIANS_PER_DEGREE);
break;
case Qt::Key_N:
_applicationOverlay.setOculusAngle(_applicationOverlay.getOculusAngle() + RADIANS_PER_DEGREE);
break;
default:
event->ignore();
break;
@ -1848,7 +1843,8 @@ void Application::updateMyAvatarLookAtPosition() {
}
} else {
// I am not looking at anyone else, so just look forward
lookAtSpot = _myAvatar->getHead()->calculateAverageEyePosition() + (_myAvatar->getHead()->getFinalOrientation() * glm::vec3(0.f, 0.f, -TREE_SCALE));
lookAtSpot = _myAvatar->getHead()->calculateAverageEyePosition() +
(_myAvatar->getHead()->getFinalOrientationInWorldFrame() * glm::vec3(0.f, 0.f, -TREE_SCALE));
}
// TODO: Add saccade to mouse pointer when stable, IF not looking at someone (since we know we are looking at it)
/*
@ -2098,6 +2094,78 @@ void Application::updateMyAvatar(float deltaTime) {
_lastQueriedViewFrustum = _viewFrustum;
}
}
// sent a nack packet containing missing sequence numbers of received packets
{
quint64 now = usecTimestampNow();
quint64 sinceLastNack = now - _lastNackTime;
const quint64 TOO_LONG_SINCE_LAST_NACK = 250 * MSECS_PER_SECOND;
if (sinceLastNack > TOO_LONG_SINCE_LAST_NACK) {
_lastNackTime = now;
sendNack();
}
}
}
void Application::sendNack() {
char packet[MAX_PACKET_SIZE];
NodeList* nodeList = NodeList::getInstance();
// iterates thru all nodes in NodeList
foreach(const SharedNodePointer& node, NodeList::getInstance()->getNodeHash()) {
if (node->getActiveSocket() &&
( node->getType() == NodeType::VoxelServer
|| node->getType() == NodeType::ParticleServer
|| node->getType() == NodeType::ModelServer)
) {
QUuid nodeUUID = node->getUUID();
_octreeSceneStatsLock.lockForRead();
// retreive octree scene stats of this node
if (_octreeServerSceneStats.find(nodeUUID) == _octreeServerSceneStats.end()) {
_octreeSceneStatsLock.unlock();
continue;
}
OctreeSceneStats& stats = _octreeServerSceneStats[nodeUUID];
// check if there are any sequence numbers that need to be nacked
int numSequenceNumbersAvailable = stats.getNumSequenceNumbersToNack();
if (numSequenceNumbersAvailable == 0) {
_octreeSceneStatsLock.unlock();
continue;
}
char* dataAt = packet;
int bytesRemaining = MAX_PACKET_SIZE;
// pack header
int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeOctreeDataNack);
dataAt += numBytesPacketHeader;
bytesRemaining -= numBytesPacketHeader;
int numSequenceNumbersRoomFor = (bytesRemaining - sizeof(uint16_t)) / sizeof(OCTREE_PACKET_SEQUENCE);
// calculate and pack the number of sequence numbers
uint16_t numSequenceNumbers = min(numSequenceNumbersAvailable, numSequenceNumbersRoomFor);
uint16_t* numSequenceNumbersAt = (uint16_t*)dataAt;
*numSequenceNumbersAt = numSequenceNumbers;
dataAt += sizeof(uint16_t);
// pack sequence numbers
for (int i = 0; i < numSequenceNumbers; i++) {
OCTREE_PACKET_SEQUENCE* sequenceNumberAt = (OCTREE_PACKET_SEQUENCE*)dataAt;
*sequenceNumberAt = stats.getNextSequenceNumberToNack();
dataAt += sizeof(OCTREE_PACKET_SEQUENCE);
}
_octreeSceneStatsLock.unlock();
nodeList->writeUnverifiedDatagram(packet, dataAt - packet, node);
}
}
}
void Application::queryOctree(NodeType_t serverType, PacketType packetType, NodeToJurisdictionMap& jurisdictions) {

View file

@ -411,6 +411,8 @@ private:
static void attachNewHeadToNode(Node *newNode);
static void* networkReceive(void* args); // network receive thread
void sendNack();
MainWindow* _window;
GLCanvas* _glWidget; // our GLCanvas has a couple extra features
@ -580,6 +582,8 @@ private:
bool _runningScriptsWidgetWasVisible;
QSystemTrayIcon* _trayIcon;
quint64 _lastNackTime;
};
#endif // hifi_Application_h

View file

@ -68,6 +68,7 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
_proceduralOutputDevice(NULL),
_inputRingBuffer(0),
_ringBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL),
_isStereoInput(false),
_averagedLatency(0.0),
_measuredJitter(0),
_jitterBufferSamples(initialJitterBufferSamples),
@ -289,20 +290,27 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
if (sourceToDestinationFactor >= 2) {
// we need to downsample from 48 to 24
// for now this only supports a mono output - this would be the case for audio input
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
if (destinationAudioFormat.channelCount() == 1) {
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
(sourceSamples[i - sourceAudioFormat.channelCount()] / 2)
+ (sourceSamples[i] / 2);
} else {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
} else {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
(sourceSamples[i - sourceAudioFormat.channelCount()] / 4)
+ (sourceSamples[i] / 2)
+ (sourceSamples[i + sourceAudioFormat.channelCount()] / 4);
}
}
} else {
// this is a 48 to 24 resampling but both source and destination are two channels
// squish two samples into one in each channel
for (int i = 0; i < numSourceSamples; i += 4) {
destinationSamples[i / 2] = (sourceSamples[i] / 2) + (sourceSamples[i + 2] / 2);
destinationSamples[(i / 2) + 1] = (sourceSamples[i + 1] / 2) + (sourceSamples[i + 3] / 2);
}
}
} else {
if (sourceAudioFormat.sampleRate() == destinationAudioFormat.sampleRate()) {
// mono to stereo, same sample rate
@ -405,12 +413,12 @@ bool Audio::switchOutputToAudioDevice(const QString& outputDeviceName) {
}
void Audio::handleAudioInput() {
static char monoAudioDataPacket[MAX_PACKET_SIZE];
static char audioDataPacket[MAX_PACKET_SIZE];
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat);
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes);
static int16_t* networkAudioSamples = (int16_t*) (audioDataPacket + leadingBytes);
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
@ -452,126 +460,139 @@ void Audio::handleAudioInput() {
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
int numNetworkBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
int numNetworkSamples = _isStereoInput ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
// zero out the monoAudioSamples array and the locally injected audio
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
memset(networkAudioSamples, 0, numNetworkBytes);
if (!_muted) {
// we aren't muted, downsample the input audio
linearResampling((int16_t*) inputAudioSamples,
monoAudioSamples,
inputSamplesRequired,
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
linearResampling((int16_t*) inputAudioSamples, networkAudioSamples,
inputSamplesRequired, numNetworkSamples,
_inputFormat, _desiredInputFormat);
//
// Impose Noise Gate
//
// The Noise Gate is used to reject constant background noise by measuring the noise
// floor observed at the microphone and then opening the 'gate' to allow microphone
// signals to be transmitted when the microphone samples average level exceeds a multiple
// of the noise floor.
//
// NOISE_GATE_HEIGHT: How loud you have to speak relative to noise background to open the gate.
// Make this value lower for more sensitivity and less rejection of noise.
// NOISE_GATE_WIDTH: The number of samples in an audio frame for which the height must be exceeded
// to open the gate.
// NOISE_GATE_CLOSE_FRAME_DELAY: Once the noise is below the gate height for the frame, how many frames
// will we wait before closing the gate.
// NOISE_GATE_FRAMES_TO_AVERAGE: How many audio frames should we average together to compute noise floor.
// More means better rejection but also can reject continuous things like singing.
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
float loudness = 0;
float thisSample = 0;
int samplesOverNoiseGate = 0;
const float NOISE_GATE_HEIGHT = 7.0f;
const int NOISE_GATE_WIDTH = 5;
const int NOISE_GATE_CLOSE_FRAME_DELAY = 5;
const int NOISE_GATE_FRAMES_TO_AVERAGE = 5;
const float DC_OFFSET_AVERAGING = 0.99f;
const float CLIPPING_THRESHOLD = 0.90f;
//
// Check clipping, adjust DC offset, and check if should open noise gate
//
float measuredDcOffset = 0.0f;
// Increment the time since the last clip
if (_timeSinceLastClip >= 0.0f) {
_timeSinceLastClip += (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE;
}
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
measuredDcOffset += monoAudioSamples[i];
monoAudioSamples[i] -= (int16_t) _dcOffset;
thisSample = fabsf(monoAudioSamples[i]);
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
_timeSinceLastClip = 0.0f;
// only impose the noise gate and perform tone injection if we sending mono audio
if (!_isStereoInput) {
//
// Impose Noise Gate
//
// The Noise Gate is used to reject constant background noise by measuring the noise
// floor observed at the microphone and then opening the 'gate' to allow microphone
// signals to be transmitted when the microphone samples average level exceeds a multiple
// of the noise floor.
//
// NOISE_GATE_HEIGHT: How loud you have to speak relative to noise background to open the gate.
// Make this value lower for more sensitivity and less rejection of noise.
// NOISE_GATE_WIDTH: The number of samples in an audio frame for which the height must be exceeded
// to open the gate.
// NOISE_GATE_CLOSE_FRAME_DELAY: Once the noise is below the gate height for the frame, how many frames
// will we wait before closing the gate.
// NOISE_GATE_FRAMES_TO_AVERAGE: How many audio frames should we average together to compute noise floor.
// More means better rejection but also can reject continuous things like singing.
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
float loudness = 0;
float thisSample = 0;
int samplesOverNoiseGate = 0;
const float NOISE_GATE_HEIGHT = 7.0f;
const int NOISE_GATE_WIDTH = 5;
const int NOISE_GATE_CLOSE_FRAME_DELAY = 5;
const int NOISE_GATE_FRAMES_TO_AVERAGE = 5;
const float DC_OFFSET_AVERAGING = 0.99f;
const float CLIPPING_THRESHOLD = 0.90f;
//
// Check clipping, adjust DC offset, and check if should open noise gate
//
float measuredDcOffset = 0.0f;
// Increment the time since the last clip
if (_timeSinceLastClip >= 0.0f) {
_timeSinceLastClip += (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE;
}
loudness += thisSample;
// Noise Reduction: Count peaks above the average loudness
if (_noiseGateEnabled && (thisSample > (_noiseGateMeasuredFloor * NOISE_GATE_HEIGHT))) {
samplesOverNoiseGate++;
}
}
measuredDcOffset /= NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
if (_dcOffset == 0.0f) {
// On first frame, copy over measured offset
_dcOffset = measuredDcOffset;
} else {
_dcOffset = DC_OFFSET_AVERAGING * _dcOffset + (1.0f - DC_OFFSET_AVERAGING) * measuredDcOffset;
}
// Add tone injection if enabled
const float TONE_FREQ = 220.0f / SAMPLE_RATE * TWO_PI;
const float QUARTER_VOLUME = 8192.0f;
if (_toneInjectionEnabled) {
loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
monoAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
loudness += fabsf(monoAudioSamples[i]);
}
}
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
// If Noise Gate is enabled, check and turn the gate on and off
if (!_toneInjectionEnabled && _noiseGateEnabled) {
float averageOfAllSampleFrames = 0.0f;
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
float smallestSample = FLT_MAX;
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
float thisAverage = 0.0f;
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
thisAverage += _noiseSampleFrames[j];
averageOfAllSampleFrames += _noiseSampleFrames[j];
}
thisAverage /= NOISE_GATE_FRAMES_TO_AVERAGE;
if (thisAverage < smallestSample) {
smallestSample = thisAverage;
}
measuredDcOffset += networkAudioSamples[i];
networkAudioSamples[i] -= (int16_t) _dcOffset;
thisSample = fabsf(networkAudioSamples[i]);
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
_timeSinceLastClip = 0.0f;
}
loudness += thisSample;
// Noise Reduction: Count peaks above the average loudness
if (_noiseGateEnabled && (thisSample > (_noiseGateMeasuredFloor * NOISE_GATE_HEIGHT))) {
samplesOverNoiseGate++;
}
averageOfAllSampleFrames /= NUMBER_OF_NOISE_SAMPLE_FRAMES;
_noiseGateMeasuredFloor = smallestSample;
_noiseGateSampleCounter = 0;
}
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
_noiseGateOpen = true;
_noiseGateFramesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
measuredDcOffset /= NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
if (_dcOffset == 0.0f) {
// On first frame, copy over measured offset
_dcOffset = measuredDcOffset;
} else {
if (--_noiseGateFramesToClose == 0) {
_noiseGateOpen = false;
_dcOffset = DC_OFFSET_AVERAGING * _dcOffset + (1.0f - DC_OFFSET_AVERAGING) * measuredDcOffset;
}
// Add tone injection if enabled
const float TONE_FREQ = 220.0f / SAMPLE_RATE * TWO_PI;
const float QUARTER_VOLUME = 8192.0f;
if (_toneInjectionEnabled) {
loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
networkAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
loudness += fabsf(networkAudioSamples[i]);
}
}
if (!_noiseGateOpen) {
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
_lastInputLoudness = 0;
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
// If Noise Gate is enabled, check and turn the gate on and off
if (!_toneInjectionEnabled && _noiseGateEnabled) {
float averageOfAllSampleFrames = 0.0f;
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
float smallestSample = FLT_MAX;
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
float thisAverage = 0.0f;
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
thisAverage += _noiseSampleFrames[j];
averageOfAllSampleFrames += _noiseSampleFrames[j];
}
thisAverage /= NOISE_GATE_FRAMES_TO_AVERAGE;
if (thisAverage < smallestSample) {
smallestSample = thisAverage;
}
}
averageOfAllSampleFrames /= NUMBER_OF_NOISE_SAMPLE_FRAMES;
_noiseGateMeasuredFloor = smallestSample;
_noiseGateSampleCounter = 0;
}
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
_noiseGateOpen = true;
_noiseGateFramesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
} else {
if (--_noiseGateFramesToClose == 0) {
_noiseGateOpen = false;
}
}
if (!_noiseGateOpen) {
memset(networkAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
_lastInputLoudness = 0;
}
}
} else {
float loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; i++) {
loudness += fabsf(networkAudioSamples[i]);
}
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
}
} else {
// our input loudness is 0, since we're muted
@ -580,19 +601,19 @@ void Audio::handleAudioInput() {
// at this point we have clean monoAudioSamples, which match our target output...
// this is what we should send to our interested listeners
if (_processSpatialAudio && !_muted && _audioOutput) {
QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
if (_processSpatialAudio && !_muted && !_isStereoInput && _audioOutput) {
QByteArray monoInputData((char*)networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat);
}
if (_proceduralAudioOutput) {
processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
if (!_isStereoInput && _proceduralAudioOutput) {
processProceduralAudio(networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
}
if (_scopeEnabled && !_scopeEnabledPause) {
if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
unsigned int numMonoAudioChannels = 1;
unsigned int monoAudioChannel = 0;
addBufferToScope(_scopeInput, _scopeInputOffset, monoAudioSamples, monoAudioChannel, numMonoAudioChannels);
addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, monoAudioChannel, numMonoAudioChannels);
_scopeInputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeInputOffset %= _samplesPerScope;
}
@ -603,10 +624,8 @@ void Audio::handleAudioInput() {
if (audioMixer && audioMixer->getActiveSocket()) {
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
glm::vec3 headPosition = interfaceAvatar->getHead()->getPosition();
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientation();
// we need the amount of bytes in the buffer + 1 for type
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
quint8 isStereo = _isStereoInput ? 1 : 0;
int numAudioBytes = 0;
@ -615,11 +634,12 @@ void Audio::handleAudioInput() {
packetType = PacketTypeSilentAudioFrame;
// we need to indicate how many silent samples this is to the audio mixer
monoAudioSamples[0] = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
audioDataPacket[0] = _isStereoInput
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
: NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
numAudioBytes = sizeof(int16_t);
} else {
numAudioBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
numAudioBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
packetType = PacketTypeMicrophoneAudioWithEcho;
@ -628,7 +648,10 @@ void Audio::handleAudioInput() {
}
}
char* currentPacketPtr = monoAudioDataPacket + populatePacketHeader(monoAudioDataPacket, packetType);
char* currentPacketPtr = audioDataPacket + populatePacketHeader(audioDataPacket, packetType);
// set the mono/stereo byte
*currentPacketPtr++ = isStereo;
// memcpy the three float positions
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
@ -638,7 +661,7 @@ void Audio::handleAudioInput() {
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation);
nodeList->writeDatagram(monoAudioDataPacket, numAudioBytes + leadingBytes, audioMixer);
nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
.updateValue(numAudioBytes + leadingBytes);
@ -761,6 +784,24 @@ void Audio::toggleAudioNoiseReduction() {
_noiseGateEnabled = !_noiseGateEnabled;
}
void Audio::toggleStereoInput() {
int oldChannelCount = _desiredInputFormat.channelCount();
QAction* stereoAudioOption = Menu::getInstance()->getActionForOption(MenuOption::StereoAudio);
if (stereoAudioOption->isChecked()) {
_desiredInputFormat.setChannelCount(2);
_isStereoInput = true;
} else {
_desiredInputFormat.setChannelCount(1);
_isStereoInput = false;
}
if (oldChannelCount != _desiredInputFormat.channelCount()) {
// change in channel count for desired input format, restart the input device
switchInputToAudioDevice(_inputAudioDeviceName);
}
}
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
_ringBuffer.parseData(audioByteArray);
@ -1300,18 +1341,21 @@ bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) {
qDebug() << "The format to be used for audio input is" << _inputFormat;
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
_audioInput->setBufferSize(_numInputCallbackBytes);
// how do we want to handle input working, but output not working?
int numFrameSamples = calculateNumberOfFrameSamples(_numInputCallbackBytes);
_inputRingBuffer.resizeForFrameSize(numFrameSamples);
_inputDevice = _audioInput->start();
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
supportedFormat = true;
// if the user wants stereo but this device can't provide then bail
if (!_isStereoInput || _inputFormat.channelCount() == 2) {
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
_audioInput->setBufferSize(_numInputCallbackBytes);
// how do we want to handle input working, but output not working?
int numFrameSamples = calculateNumberOfFrameSamples(_numInputCallbackBytes);
_inputRingBuffer.resizeForFrameSize(numFrameSamples);
_inputDevice = _audioInput->start();
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
supportedFormat = true;
}
}
}
return supportedFormat;

View file

@ -85,6 +85,7 @@ public slots:
void toggleScope();
void toggleScopePause();
void toggleAudioSpatialProcessing();
void toggleStereoInput();
void selectAudioScopeFiveFrames();
void selectAudioScopeTwentyFrames();
void selectAudioScopeFiftyFrames();
@ -127,6 +128,7 @@ private:
QIODevice* _proceduralOutputDevice;
AudioRingBuffer _inputRingBuffer;
AudioRingBuffer _ringBuffer;
bool _isStereoInput;
QString _inputAudioDeviceName;
QString _outputAudioDeviceName;

View file

@ -459,7 +459,7 @@ void AudioReflector::calculateAllReflections() {
// only recalculate when we've moved, or if the attributes have changed
// TODO: what about case where new voxels are added in front of us???
bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation();
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientationInWorldFrame() : _myAvatar->getOrientation();
glm::vec3 origin = _myAvatar->getHead()->getPosition();
glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition();

View file

@ -18,6 +18,7 @@
#include "Camera.h"
#include "Menu.h"
#include "Util.h"
#include "devices/OculusManager.h"
const float CAMERA_FIRST_PERSON_MODE_UP_SHIFT = 0.0f;
const float CAMERA_FIRST_PERSON_MODE_DISTANCE = 0.0f;
@ -264,7 +265,12 @@ PickRay CameraScriptableObject::computePickRay(float x, float y) {
float screenWidth = Application::getInstance()->getGLWidget()->width();
float screenHeight = Application::getInstance()->getGLWidget()->height();
PickRay result;
_viewFrustum->computePickRay(x / screenWidth, y / screenHeight, result.origin, result.direction);
if (OculusManager::isConnected()) {
result.origin = _camera->getPosition();
Application::getInstance()->getApplicationOverlay().computeOculusPickRay(x / screenWidth, y / screenHeight, result.direction);
} else {
_viewFrustum->computePickRay(x / screenWidth, y / screenHeight, result.origin, result.direction);
}
return result;
}

View file

@ -353,6 +353,7 @@ Menu::Menu() :
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::RenderSkeletonCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::RenderHeadCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::RenderBoundingCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::CollideAsRagDoll);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::LookAtVectors, 0, false);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu,
@ -432,6 +433,8 @@ Menu::Menu() :
SLOT(toggleAudioNoiseReduction()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoLocalAudio);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::StereoAudio, 0, false,
appInstance->getAudio(), SLOT(toggleStereoInput()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::MuteAudio,
Qt::CTRL | Qt::Key_M,
false,

View file

@ -311,6 +311,7 @@ namespace MenuOption {
const QString CascadedShadows = "Cascaded";
const QString Chat = "Chat...";
const QString ChatCircling = "Chat Circling";
const QString CollideAsRagDoll = "Collide As RagDoll";
const QString CollideWithAvatars = "Collide With Avatars";
const QString CollideWithEnvironment = "Collide With World Boundaries";
const QString CollideWithParticles = "Collide With Particles";
@ -402,6 +403,7 @@ namespace MenuOption {
const QString StandOnNearbyFloors = "Stand on nearby floors";
const QString Stars = "Stars";
const QString Stats = "Stats";
const QString StereoAudio = "Stereo Audio";
const QString StopAllScripts = "Stop All Scripts";
const QString SuppressShortTimings = "Suppress Timings Less than 10ms";
const QString TestPing = "Test Ping";

View file

@ -377,7 +377,7 @@ void Avatar::simulateAttachments(float deltaTime) {
if (!isMyAvatar()) {
model->setLODDistance(getLODDistance());
}
if (_skeletonModel.getJointPosition(jointIndex, jointPosition) &&
if (_skeletonModel.getJointPositionInWorldFrame(jointIndex, jointPosition) &&
_skeletonModel.getJointCombinedRotation(jointIndex, jointRotation)) {
model->setTranslation(jointPosition + jointRotation * attachment.translation * _scale);
model->setRotation(jointRotation * attachment.rotation);
@ -713,7 +713,7 @@ glm::vec3 Avatar::getJointPosition(int index) const {
return position;
}
glm::vec3 position;
_skeletonModel.getJointPosition(index, position);
_skeletonModel.getJointPositionInWorldFrame(index, position);
return position;
}
@ -725,7 +725,7 @@ glm::vec3 Avatar::getJointPosition(const QString& name) const {
return position;
}
glm::vec3 position;
_skeletonModel.getJointPosition(getJointIndex(name), position);
_skeletonModel.getJointPositionInWorldFrame(getJointIndex(name), position);
return position;
}

View file

@ -48,10 +48,10 @@ void FaceModel::simulate(float deltaTime, bool fullUpdate) {
void FaceModel::maybeUpdateNeckRotation(const JointState& parentState, const FBXJoint& joint, JointState& state) {
// get the rotation axes in joint space and use them to adjust the rotation
glm::mat3 axes = glm::mat3_cast(_rotation);
glm::mat3 inverse = glm::mat3(glm::inverse(parentState._transform * glm::translate(state.getDefaultTranslationInParentFrame()) *
glm::mat3 axes = glm::mat3_cast(glm::quat());
glm::mat3 inverse = glm::mat3(glm::inverse(parentState.getTransform() * glm::translate(state.getDefaultTranslationInParentFrame()) *
joint.preTransform * glm::mat4_cast(joint.preRotation)));
state._rotation = glm::angleAxis(- RADIANS_PER_DEGREE * _owningHead->getFinalRoll(), glm::normalize(inverse * axes[2]))
state._rotationInParentFrame = glm::angleAxis(- RADIANS_PER_DEGREE * _owningHead->getFinalRoll(), glm::normalize(inverse * axes[2]))
* glm::angleAxis(RADIANS_PER_DEGREE * _owningHead->getFinalYaw(), glm::normalize(inverse * axes[1]))
* glm::angleAxis(- RADIANS_PER_DEGREE * _owningHead->getFinalPitch(), glm::normalize(inverse * axes[0]))
* joint.rotation;
@ -59,14 +59,16 @@ void FaceModel::maybeUpdateNeckRotation(const JointState& parentState, const FBX
void FaceModel::maybeUpdateEyeRotation(const JointState& parentState, const FBXJoint& joint, JointState& state) {
// likewise with the eye joints
glm::mat4 inverse = glm::inverse(parentState._transform * glm::translate(state.getDefaultTranslationInParentFrame()) *
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation));
glm::vec3 front = glm::vec3(inverse * glm::vec4(_owningHead->getFinalOrientation() * IDENTITY_FRONT, 0.0f));
// NOTE: at the moment we do the math in the world-frame, hence the inverse transform is more complex than usual.
glm::mat4 inverse = glm::inverse(glm::mat4_cast(_rotation) * parentState.getTransform() *
glm::translate(state.getDefaultTranslationInParentFrame()) *
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation));
glm::vec3 front = glm::vec3(inverse * glm::vec4(_owningHead->getFinalOrientationInWorldFrame() * IDENTITY_FRONT, 0.0f));
glm::vec3 lookAt = glm::vec3(inverse * glm::vec4(_owningHead->getLookAtPosition() +
_owningHead->getSaccade() - _translation, 1.0f));
glm::quat between = rotationBetween(front, lookAt);
const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE;
state._rotation = glm::angleAxis(glm::clamp(glm::angle(between), -MAX_ANGLE, MAX_ANGLE), glm::axis(between)) *
state._rotationInParentFrame = glm::angleAxis(glm::clamp(glm::angle(between), -MAX_ANGLE, MAX_ANGLE), glm::axis(between)) *
joint.rotation;
}
@ -92,6 +94,6 @@ bool FaceModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& secondEy
return false;
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
return getJointPosition(geometry.leftEyeJointIndex, firstEyePosition) &&
getJointPosition(geometry.rightEyeJointIndex, secondEyePosition);
return getJointPositionInWorldFrame(geometry.leftEyeJointIndex, firstEyePosition) &&
getJointPositionInWorldFrame(geometry.rightEyeJointIndex, secondEyePosition);
}

View file

@ -188,9 +188,12 @@ void Head::setScale (float scale) {
_scale = scale;
}
glm::quat Head::getFinalOrientation() const {
return _owningAvatar->getOrientation() * glm::quat(glm::radians(
glm::vec3(getFinalPitch(), getFinalYaw(), getFinalRoll() )));
glm::quat Head::getFinalOrientationInWorldFrame() const {
return _owningAvatar->getOrientation() * getFinalOrientationInLocalFrame();
}
glm::quat Head::getFinalOrientationInLocalFrame() const {
return glm::quat(glm::radians(glm::vec3(getFinalPitch(), getFinalYaw(), getFinalRoll() )));
}
glm::quat Head::getCameraOrientation () const {

View file

@ -50,9 +50,13 @@ public:
void setRenderLookatVectors(bool onOff) { _renderLookatVectors = onOff; }
void setLeanSideways(float leanSideways) { _leanSideways = leanSideways; }
void setLeanForward(float leanForward) { _leanForward = leanForward; }
/// \return orientationBase+Delta
glm::quat getFinalOrientationInLocalFrame() const;
/// \return orientationBody * orientationBase+Delta
glm::quat getFinalOrientation() const;
/// \return orientationBody * (orientationBase+Delta)
glm::quat getFinalOrientationInWorldFrame() const;
/// \return orientationBody * orientationBasePitch
glm::quat getCameraOrientation () const;

View file

@ -236,7 +236,7 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
estimatedRotation = glm::degrees(safeEulerAngles(tracker->getHeadRotation()));
}
}
// Rotate the body if the head is turned beyond the screen
if (Menu::getInstance()->isOptionChecked(MenuOption::TurnWithHead)) {
const float TRACKER_YAW_TURN_SENSITIVITY = 0.5f;
@ -433,11 +433,11 @@ void MyAvatar::removeAnimationHandle(const AnimationHandlePointer& handle) {
}
void MyAvatar::startAnimation(const QString& url, float fps, float priority,
bool loop, bool hold, int firstFrame, int lastFrame, const QStringList& maskedJoints) {
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startAnimation", Q_ARG(const QString&, url), Q_ARG(float, fps),
Q_ARG(float, priority), Q_ARG(bool, loop), Q_ARG(bool, hold), Q_ARG(int, firstFrame),
Q_ARG(int, lastFrame), Q_ARG(const QStringList&, maskedJoints));
Q_ARG(float, priority), Q_ARG(bool, loop), Q_ARG(bool, hold), Q_ARG(float, firstFrame),
Q_ARG(float, lastFrame), Q_ARG(const QStringList&, maskedJoints));
return;
}
AnimationHandlePointer handle = _skeletonModel.createAnimationHandle();
@ -453,11 +453,11 @@ void MyAvatar::startAnimation(const QString& url, float fps, float priority,
}
void MyAvatar::startAnimationByRole(const QString& role, const QString& url, float fps, float priority,
bool loop, bool hold, int firstFrame, int lastFrame, const QStringList& maskedJoints) {
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startAnimationByRole", Q_ARG(const QString&, role), Q_ARG(const QString&, url),
Q_ARG(float, fps), Q_ARG(float, priority), Q_ARG(bool, loop), Q_ARG(bool, hold), Q_ARG(int, firstFrame),
Q_ARG(int, lastFrame), Q_ARG(const QStringList&, maskedJoints));
Q_ARG(float, fps), Q_ARG(float, priority), Q_ARG(bool, loop), Q_ARG(bool, hold), Q_ARG(float, firstFrame),
Q_ARG(float, lastFrame), Q_ARG(const QStringList&, maskedJoints));
return;
}
// check for a configured animation for the role
@ -627,8 +627,8 @@ void MyAvatar::loadData(QSettings* settings) {
handle->setLoop(settings->value("loop", true).toBool());
handle->setHold(settings->value("hold", false).toBool());
handle->setStartAutomatically(settings->value("startAutomatically", true).toBool());
handle->setFirstFrame(settings->value("firstFrame", 0).toInt());
handle->setLastFrame(settings->value("lastFrame", INT_MAX).toInt());
handle->setFirstFrame(settings->value("firstFrame", 0.0f).toFloat());
handle->setLastFrame(settings->value("lastFrame", INT_MAX).toFloat());
handle->setMaskedJoints(settings->value("maskedJoints").toStringList());
}
settings->endArray();
@ -724,7 +724,8 @@ void MyAvatar::updateLookAtTargetAvatar() {
Avatar* avatar = static_cast<Avatar*>(avatarPointer.data());
avatar->setIsLookAtTarget(false);
if (!avatar->isMyAvatar()) {
float angleTo = glm::angle(getHead()->getFinalOrientation() * glm::vec3(0.0f, 0.0f, -1.0f),
glm::vec3 DEFAULT_GAZE_IN_HEAD_FRAME = glm::vec3(0.0f, 0.0f, -1.0f);
float angleTo = glm::angle(getHead()->getFinalOrientationInWorldFrame() * DEFAULT_GAZE_IN_HEAD_FRAME,
glm::normalize(avatar->getHead()->getEyePosition() - getHead()->getEyePosition()));
if (angleTo < smallestAngleTo) {
_lookAtTargetAvatar = avatarPointer;

View file

@ -68,7 +68,7 @@ public:
/// Allows scripts to run animations.
Q_INVOKABLE void startAnimation(const QString& url, float fps = 30.0f, float priority = 1.0f, bool loop = false,
bool hold = false, int firstFrame = 0, int lastFrame = INT_MAX, const QStringList& maskedJoints = QStringList());
bool hold = false, float firstFrame = 0.0f, float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
/// Stops an animation as identified by a URL.
Q_INVOKABLE void stopAnimation(const QString& url);
@ -76,8 +76,8 @@ public:
/// Starts an animation by its role, using the provided URL and parameters if the avatar doesn't have a custom
/// animation for the role.
Q_INVOKABLE void startAnimationByRole(const QString& role, const QString& url = QString(), float fps = 30.0f,
float priority = 1.0f, bool loop = false, bool hold = false, int firstFrame = 0,
int lastFrame = INT_MAX, const QStringList& maskedJoints = QStringList());
float priority = 1.0f, bool loop = false, bool hold = false, float firstFrame = 0.0f,
float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
/// Stops an animation identified by its role.
Q_INVOKABLE void stopAnimationByRole(const QString& role);

View file

@ -21,6 +21,14 @@ SkeletonModel::SkeletonModel(Avatar* owningAvatar) :
_owningAvatar(owningAvatar) {
}
void SkeletonModel::setJointStates(QVector<JointState> states) {
Model::setJointStates(states);
if (isActive() && _owningAvatar->isMyAvatar()) {
_ragDoll.init(_jointStates);
}
}
const float PALM_PRIORITY = 3.0f;
void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
@ -46,7 +54,7 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
int jointIndex = geometry.humanIKJointIndices.at(humanIKJointIndex);
if (jointIndex != -1) {
JointState& state = _jointStates[jointIndex];
state.setRotation(_rotation * prioVR->getJointRotations().at(i), PALM_PRIORITY);
state.setRotationFromBindFrame(prioVR->getJointRotations().at(i), PALM_PRIORITY);
}
}
return;
@ -63,7 +71,9 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
if (_owningAvatar->getHandState() == HAND_STATE_NULL) {
restoreRightHandPosition(HAND_RESTORATION_RATE, PALM_PRIORITY);
} else {
applyHandPosition(geometry.rightHandJointIndex, _owningAvatar->getHandPosition());
// transform into model-frame
glm::vec3 handPosition = glm::inverse(_rotation) * (_owningAvatar->getHandPosition() - _translation);
applyHandPosition(geometry.rightHandJointIndex, handPosition);
}
restoreLeftHandPosition(HAND_RESTORATION_RATE, PALM_PRIORITY);
@ -76,6 +86,21 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
applyPalmData(geometry.leftHandJointIndex, hand->getPalms()[leftPalmIndex]);
applyPalmData(geometry.rightHandJointIndex, hand->getPalms()[rightPalmIndex]);
}
simulateRagDoll(deltaTime);
}
void SkeletonModel::simulateRagDoll(float deltaTime) {
_ragDoll.slaveToSkeleton(_jointStates, 0.1f); // fraction = 0.1f left intentionally low for demo purposes
float MIN_CONSTRAINT_ERROR = 0.005f; // 5mm
int MAX_ITERATIONS = 4;
int iterations = 0;
float delta = 0.0f;
do {
delta = _ragDoll.enforceConstraints();
++iterations;
} while (delta > MIN_CONSTRAINT_ERROR && iterations < MAX_ITERATIONS);
}
void SkeletonModel::getHandShapes(int jointIndex, QVector<const Shape*>& shapes) const {
@ -119,6 +144,9 @@ void SkeletonModel::getBodyShapes(QVector<const Shape*>& shapes) const {
void SkeletonModel::renderIKConstraints() {
renderJointConstraints(getRightHandJointIndex());
renderJointConstraints(getLeftHandJointIndex());
if (isActive() && _owningAvatar->isMyAvatar()) {
renderRagDoll();
}
}
class IndexValue {
@ -135,6 +163,7 @@ void SkeletonModel::applyHandPosition(int jointIndex, const glm::vec3& position)
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
return;
}
// NOTE: 'position' is in model-frame
setJointPosition(jointIndex, position, glm::quat(), false, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
const FBXGeometry& geometry = _geometry->getFBXGeometry();
@ -147,7 +176,7 @@ void SkeletonModel::applyHandPosition(int jointIndex, const glm::vec3& position)
return;
}
JointState& state = _jointStates[jointIndex];
glm::quat handRotation = state.getJointRotation(true);
glm::quat handRotation = state.getRotation();
// align hand with forearm
float sign = (jointIndex == geometry.rightHandJointIndex) ? 1.0f : -1.0f;
@ -167,36 +196,41 @@ void SkeletonModel::applyPalmData(int jointIndex, PalmData& palm) {
// rotate palm to align with its normal (normal points out of hand's palm)
glm::quat palmRotation;
glm::quat r0, r1;
if (!Menu::getInstance()->isOptionChecked(MenuOption::AlternateIK) &&
Menu::getInstance()->isOptionChecked(MenuOption::AlignForearmsWithWrists)) {
JointState parentState = _jointStates[parentJointIndex];
palmRotation = parentState.getJointRotation(true);
palmRotation = parentState.getRotationFromBindToModelFrame();
r0 = palmRotation;
} else {
JointState state = _jointStates[jointIndex];
palmRotation = state.getJointRotation(true);
palmRotation = state.getRotationFromBindToModelFrame();
}
palmRotation = rotationBetween(palmRotation * geometry.palmDirection, palm.getNormal()) * palmRotation;
glm::quat inverseRotation = glm::inverse(_rotation);
glm::vec3 palmNormal = inverseRotation * palm.getNormal();
palmRotation = rotationBetween(palmRotation * geometry.palmDirection, palmNormal) * palmRotation;
r1 = palmRotation;
// rotate palm to align with finger direction
glm::vec3 direction = palm.getFingerDirection();
glm::vec3 direction = inverseRotation * palm.getFingerDirection();
palmRotation = rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), direction) * palmRotation;
// set hand position, rotation
glm::vec3 palmPosition = inverseRotation * (palm.getPosition() - _translation);
if (Menu::getInstance()->isOptionChecked(MenuOption::AlternateIK)) {
setHandPosition(jointIndex, palm.getPosition(), palmRotation);
setHandPosition(jointIndex, palmPosition, palmRotation);
} else if (Menu::getInstance()->isOptionChecked(MenuOption::AlignForearmsWithWrists)) {
glm::vec3 forearmVector = palmRotation * glm::vec3(sign, 0.0f, 0.0f);
setJointPosition(parentJointIndex, palm.getPosition() + forearmVector *
setJointPosition(parentJointIndex, palmPosition + forearmVector *
geometry.joints.at(jointIndex).distanceToParent * extractUniformScale(_scale),
glm::quat(), false, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
JointState& parentState = _jointStates[parentJointIndex];
parentState.setRotation(palmRotation, PALM_PRIORITY);
// slam parent-relative rotation to identity
_jointStates[jointIndex]._rotation = glm::quat();
parentState.setRotationFromBindFrame(palmRotation, PALM_PRIORITY);
// lock hand to forearm by slamming its rotation (in parent-frame) to identity
_jointStates[jointIndex]._rotationInParentFrame = glm::quat();
} else {
setJointPosition(jointIndex, palm.getPosition(), palmRotation,
setJointPosition(jointIndex, palmPosition, palmRotation,
true, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
}
}
@ -221,9 +255,16 @@ void SkeletonModel::updateJointState(int index) {
Model::updateJointState(index);
if (index == _geometry->getFBXGeometry().rootJointIndex) {
state._transform[3][0] = 0.0f;
state._transform[3][1] = 0.0f;
state._transform[3][2] = 0.0f;
state.clearTransformTranslation();
}
}
void SkeletonModel::updateShapePositions() {
if (isActive() && _owningAvatar->isMyAvatar() &&
Menu::getInstance()->isOptionChecked(MenuOption::CollideAsRagDoll)) {
_ragDoll.updateShapes(_jointShapes, _rotation, _translation);
} else {
Model::updateShapePositions();
}
}
@ -232,10 +273,10 @@ void SkeletonModel::maybeUpdateLeanRotation(const JointState& parentState, const
return;
}
// get the rotation axes in joint space and use them to adjust the rotation
glm::mat3 axes = glm::mat3_cast(_rotation);
glm::mat3 inverse = glm::mat3(glm::inverse(parentState._transform * glm::translate(state.getDefaultTranslationInParentFrame()) *
glm::mat3 axes = glm::mat3_cast(glm::quat());
glm::mat3 inverse = glm::mat3(glm::inverse(parentState.getTransform() * glm::translate(state.getDefaultTranslationInParentFrame()) *
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation)));
state._rotation = glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanSideways(),
state._rotationInParentFrame = glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanSideways(),
glm::normalize(inverse * axes[2])) * glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanForward(),
glm::normalize(inverse * axes[0])) * joint.rotation;
}
@ -259,11 +300,11 @@ void SkeletonModel::renderJointConstraints(int jointIndex) {
do {
const FBXJoint& joint = geometry.joints.at(jointIndex);
const JointState& jointState = _jointStates.at(jointIndex);
glm::vec3 position = extractTranslation(jointState._transform) + _translation;
glm::vec3 position = _rotation * jointState.getPosition() + _translation;
glPushMatrix();
glTranslatef(position.x, position.y, position.z);
glm::quat parentRotation = (joint.parentIndex == -1) ? _rotation : _jointStates.at(joint.parentIndex)._combinedRotation;
glm::quat parentRotation = (joint.parentIndex == -1) ? _rotation : _rotation * _jointStates.at(joint.parentIndex).getRotation();
glm::vec3 rotationAxis = glm::axis(parentRotation);
glRotatef(glm::degrees(glm::angle(parentRotation)), rotationAxis.x, rotationAxis.y, rotationAxis.z);
float fanScale = directionSize * 0.75f;
@ -296,7 +337,7 @@ void SkeletonModel::renderJointConstraints(int jointIndex) {
}
glPopMatrix();
renderOrientationDirections(position, jointState._combinedRotation, directionSize);
renderOrientationDirections(position, _rotation * jointState.getRotation(), directionSize);
jointIndex = joint.parentIndex;
} while (jointIndex != -1 && geometry.joints.at(jointIndex).isFree);
@ -359,21 +400,21 @@ void SkeletonModel::setHandPosition(int jointIndex, const glm::vec3& position, c
glm::quat shoulderRotation = rotationBetween(forwardVector, elbowPosition - shoulderPosition);
JointState& shoulderState = _jointStates[shoulderJointIndex];
shoulderState.setRotation(shoulderRotation, PALM_PRIORITY);
shoulderState.setRotationFromBindFrame(shoulderRotation, PALM_PRIORITY);
JointState& elbowState = _jointStates[elbowJointIndex];
elbowState.setRotation(rotationBetween(shoulderRotation * forwardVector, wristPosition - elbowPosition) * shoulderRotation, PALM_PRIORITY);
elbowState.setRotationFromBindFrame(rotationBetween(shoulderRotation * forwardVector, wristPosition - elbowPosition) * shoulderRotation, PALM_PRIORITY);
JointState& handState = _jointStates[jointIndex];
handState.setRotation(rotation, PALM_PRIORITY);
handState.setRotationFromBindFrame(rotation, PALM_PRIORITY);
}
bool SkeletonModel::getLeftHandPosition(glm::vec3& position) const {
return getJointPosition(getLeftHandJointIndex(), position);
return getJointPositionInWorldFrame(getLeftHandJointIndex(), position);
}
bool SkeletonModel::getRightHandPosition(glm::vec3& position) const {
return getJointPosition(getRightHandJointIndex(), position);
return getJointPositionInWorldFrame(getRightHandJointIndex(), position);
}
bool SkeletonModel::restoreLeftHandPosition(float fraction, float priority) {
@ -381,7 +422,7 @@ bool SkeletonModel::restoreLeftHandPosition(float fraction, float priority) {
}
bool SkeletonModel::getLeftShoulderPosition(glm::vec3& position) const {
return getJointPosition(getLastFreeJointIndex(getLeftHandJointIndex()), position);
return getJointPositionInWorldFrame(getLastFreeJointIndex(getLeftHandJointIndex()), position);
}
float SkeletonModel::getLeftArmLength() const {
@ -393,7 +434,7 @@ bool SkeletonModel::restoreRightHandPosition(float fraction, float priority) {
}
bool SkeletonModel::getRightShoulderPosition(glm::vec3& position) const {
return getJointPosition(getLastFreeJointIndex(getRightHandJointIndex()), position);
return getJointPositionInWorldFrame(getLastFreeJointIndex(getRightHandJointIndex()), position);
}
float SkeletonModel::getRightArmLength() const {
@ -401,11 +442,11 @@ float SkeletonModel::getRightArmLength() const {
}
bool SkeletonModel::getHeadPosition(glm::vec3& headPosition) const {
return isActive() && getJointPosition(_geometry->getFBXGeometry().headJointIndex, headPosition);
return isActive() && getJointPositionInWorldFrame(_geometry->getFBXGeometry().headJointIndex, headPosition);
}
bool SkeletonModel::getNeckPosition(glm::vec3& neckPosition) const {
return isActive() && getJointPosition(_geometry->getFBXGeometry().neckJointIndex, neckPosition);
return isActive() && getJointPositionInWorldFrame(_geometry->getFBXGeometry().neckJointIndex, neckPosition);
}
bool SkeletonModel::getNeckParentRotation(glm::quat& neckParentRotation) const {
@ -416,7 +457,7 @@ bool SkeletonModel::getNeckParentRotation(glm::quat& neckParentRotation) const {
if (geometry.neckJointIndex == -1) {
return false;
}
return getJointRotation(geometry.joints.at(geometry.neckJointIndex).parentIndex, neckParentRotation);
return getJointRotationInWorldFrame(geometry.joints.at(geometry.neckJointIndex).parentIndex, neckParentRotation);
}
bool SkeletonModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& secondEyePosition) const {
@ -424,18 +465,18 @@ bool SkeletonModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& seco
return false;
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
if (getJointPosition(geometry.leftEyeJointIndex, firstEyePosition) &&
getJointPosition(geometry.rightEyeJointIndex, secondEyePosition)) {
if (getJointPositionInWorldFrame(geometry.leftEyeJointIndex, firstEyePosition) &&
getJointPositionInWorldFrame(geometry.rightEyeJointIndex, secondEyePosition)) {
return true;
}
// no eye joints; try to estimate based on head/neck joints
glm::vec3 neckPosition, headPosition;
if (getJointPosition(geometry.neckJointIndex, neckPosition) &&
getJointPosition(geometry.headJointIndex, headPosition)) {
if (getJointPositionInWorldFrame(geometry.neckJointIndex, neckPosition) &&
getJointPositionInWorldFrame(geometry.headJointIndex, headPosition)) {
const float EYE_PROPORTION = 0.6f;
glm::vec3 baseEyePosition = glm::mix(neckPosition, headPosition, EYE_PROPORTION);
glm::quat headRotation;
getJointRotation(geometry.headJointIndex, headRotation);
getJointRotationInWorldFrame(geometry.headJointIndex, headRotation);
const float EYES_FORWARD = 0.25f;
const float EYE_SEPARATION = 0.1f;
float headHeight = glm::distance(neckPosition, headPosition);
@ -446,3 +487,30 @@ bool SkeletonModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& seco
return false;
}
void SkeletonModel::renderRagDoll() {
const int BALL_SUBDIVISIONS = 6;
glDisable(GL_DEPTH_TEST);
glDisable(GL_LIGHTING);
glPushMatrix();
Application::getInstance()->loadTranslatedViewMatrix(_translation);
QVector<glm::vec3> points = _ragDoll.getPoints();
int numPoints = points.size();
float alpha = 0.3f;
float radius1 = 0.008f;
float radius2 = 0.01f;
for (int i = 0; i < numPoints; ++i) {
glPushMatrix();
// draw each point as a yellow hexagon with black border
glm::vec3 position = _rotation * points[i];
glTranslatef(position.x, position.y, position.z);
glColor4f(0.0f, 0.0f, 0.0f, alpha);
glutSolidSphere(radius2, BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
glColor4f(1.0f, 1.0f, 0.0f, alpha);
glutSolidSphere(radius1, BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
glPopMatrix();
}
glPopMatrix();
glEnable(GL_DEPTH_TEST);
glEnable(GL_LIGHTING);
}

View file

@ -13,6 +13,7 @@
#define hifi_SkeletonModel_h
#include "renderer/Model.h"
#include "renderer/RagDoll.h"
class Avatar;
@ -23,8 +24,12 @@ class SkeletonModel : public Model {
public:
SkeletonModel(Avatar* owningAvatar);
void setJointStates(QVector<JointState> states);
void simulate(float deltaTime, bool fullUpdate = true);
void simulateRagDoll(float deltaTime);
void updateShapePositions();
/// \param jointIndex index of hand joint
/// \param shapes[out] list in which is stored pointers to hand shapes
@ -89,8 +94,11 @@ public:
/// \return whether or not both eye meshes were found
bool getEyePositions(glm::vec3& firstEyePosition, glm::vec3& secondEyePosition) const;
void renderRagDoll();
protected:
/// \param jointIndex index of joint in model
/// \param position position of joint in model-frame
void applyHandPosition(int jointIndex, const glm::vec3& position);
void applyPalmData(int jointIndex, PalmData& palm);
@ -105,9 +113,14 @@ protected:
private:
void renderJointConstraints(int jointIndex);
/// \param jointIndex index of joint in model
/// \param position position of joint in model-frame
/// \param rotation rotation of joint in model-frame
void setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation);
Avatar* _owningAvatar;
RagDoll _ragDoll;
};
#endif // hifi_SkeletonModel_h

View file

@ -76,23 +76,24 @@ static void setPalm(float deltaTime, int index) {
}
}
// NOTE: this math is done in the worl-frame with unecessary complexity.
// TODO: transfom this to stay in the model-frame.
glm::vec3 position;
glm::quat rotation;
SkeletonModel* skeletonModel = &Application::getInstance()->getAvatar()->getSkeletonModel();
int jointIndex;
glm::quat inverseRotation = glm::inverse(Application::getInstance()->getAvatar()->getOrientation());
if (index == LEFT_HAND_INDEX) {
jointIndex = skeletonModel->getLeftHandJointIndex();
skeletonModel->getJointRotation(jointIndex, rotation, true);
skeletonModel->getJointRotationInWorldFrame(jointIndex, rotation);
rotation = inverseRotation * rotation * glm::quat(glm::vec3(0.0f, PI_OVER_TWO, 0.0f));
} else {
jointIndex = skeletonModel->getRightHandJointIndex();
skeletonModel->getJointRotation(jointIndex, rotation, true);
skeletonModel->getJointRotationInWorldFrame(jointIndex, rotation);
rotation = inverseRotation * rotation * glm::quat(glm::vec3(0.0f, -PI_OVER_TWO, 0.0f));
}
skeletonModel->getJointPosition(jointIndex, position);
skeletonModel->getJointPositionInWorldFrame(jointIndex, position);
position = inverseRotation * (position - skeletonModel->getTranslation());
palm->setRawRotation(rotation);

View file

@ -0,0 +1,101 @@
//
// JointState.cpp
// interface/src/renderer
//
// Created by Andrzej Kapolka on 10/18/13.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <glm/gtx/norm.hpp>
//#include <GeometryUtil.h>
#include <SharedUtil.h>
#include "JointState.h"
JointState::JointState() :
_animationPriority(0.0f),
_fbxJoint(NULL) {
}
void JointState::setFBXJoint(const FBXJoint* joint) {
assert(joint != NULL);
_rotationInParentFrame = joint->rotation;
// NOTE: JointState does not own the FBXJoint to which it points.
_fbxJoint = joint;
}
void JointState::copyState(const JointState& state) {
_rotationInParentFrame = state._rotationInParentFrame;
_transform = state._transform;
_rotation = extractRotation(_transform);
_animationPriority = state._animationPriority;
// DO NOT copy _fbxJoint
}
void JointState::computeTransform(const glm::mat4& parentTransform) {
glm::quat modifiedRotation = _fbxJoint->preRotation * _rotationInParentFrame * _fbxJoint->postRotation;
glm::mat4 modifiedTransform = _fbxJoint->preTransform * glm::mat4_cast(modifiedRotation) * _fbxJoint->postTransform;
_transform = parentTransform * glm::translate(_fbxJoint->translation) * modifiedTransform;
_rotation = extractRotation(_transform);
}
glm::quat JointState::getRotationFromBindToModelFrame() const {
return _rotation * _fbxJoint->inverseBindRotation;
}
void JointState::restoreRotation(float fraction, float priority) {
assert(_fbxJoint != NULL);
if (priority == _animationPriority || _animationPriority == 0.0f) {
_rotationInParentFrame = safeMix(_rotationInParentFrame, _fbxJoint->rotation, fraction);
_animationPriority = 0.0f;
}
}
void JointState::setRotationFromBindFrame(const glm::quat& rotation, float priority) {
assert(_fbxJoint != NULL);
if (priority >= _animationPriority) {
// rotation is from bind- to model-frame
_rotationInParentFrame = _rotationInParentFrame * glm::inverse(_rotation) * rotation * glm::inverse(_fbxJoint->inverseBindRotation);
_animationPriority = priority;
}
}
void JointState::clearTransformTranslation() {
_transform[3][0] = 0.0f;
_transform[3][1] = 0.0f;
_transform[3][2] = 0.0f;
}
void JointState::setRotation(const glm::quat& rotation, bool constrain, float priority) {
applyRotationDelta(rotation * glm::inverse(_rotation), true, priority);
}
void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) {
// NOTE: delta is in jointParent-frame
assert(_fbxJoint != NULL);
if (priority < _animationPriority) {
return;
}
_animationPriority = priority;
if (!constrain || (_fbxJoint->rotationMin == glm::vec3(-PI, -PI, -PI) &&
_fbxJoint->rotationMax == glm::vec3(PI, PI, PI))) {
// no constraints
_rotationInParentFrame = _rotationInParentFrame * glm::inverse(_rotation) * delta * _rotation;
_rotation = delta * _rotation;
return;
}
glm::quat targetRotation = delta * _rotation;
glm::vec3 eulers = safeEulerAngles(_rotationInParentFrame * glm::inverse(_rotation) * targetRotation);
glm::quat newRotation = glm::quat(glm::clamp(eulers, _fbxJoint->rotationMin, _fbxJoint->rotationMax));
_rotation = _rotation * glm::inverse(_rotationInParentFrame) * newRotation;
_rotationInParentFrame = newRotation;
}
const glm::vec3& JointState::getDefaultTranslationInParentFrame() const {
assert(_fbxJoint != NULL);
return _fbxJoint->translation;
}

View file

@ -0,0 +1,66 @@
//
// JointState.h
// interface/src/renderer
//
// Created by Andrzej Kapolka on 10/18/13.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_JointState_h
#define hifi_JointState_h
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <glm/gtx/transform.hpp>
#include <FBXReader.h>
class JointState {
public:
JointState();
void setFBXJoint(const FBXJoint* joint);
const FBXJoint& getFBXJoint() const { return *_fbxJoint; }
void copyState(const JointState& state);
void computeTransform(const glm::mat4& parentTransform);
const glm::mat4& getTransform() const { return _transform; }
glm::quat getRotation() const { return _rotation; }
glm::vec3 getPosition() const { return extractTranslation(_transform); }
/// \return rotation from bind to model frame
glm::quat getRotationFromBindToModelFrame() const;
/// \param rotation rotation of joint in model-frame
void setRotation(const glm::quat& rotation, bool constrain, float priority);
/// \param delta is in the jointParent-frame
void applyRotationDelta(const glm::quat& delta, bool constrain = true, float priority = 1.0f);
const glm::vec3& getDefaultTranslationInParentFrame() const;
void restoreRotation(float fraction, float priority);
/// \param rotation is from bind- to model-frame
/// computes and sets new _rotationInParentFrame
/// NOTE: the JointState's model-frame transform/rotation are NOT updated!
void setRotationFromBindFrame(const glm::quat& rotation, float priority);
void clearTransformTranslation();
glm::quat _rotationInParentFrame; // joint- to parentJoint-frame
float _animationPriority; // the priority of the animation affecting this joint
private:
glm::mat4 _transform; // joint- to model-frame
glm::quat _rotation; // joint- to model-frame
const FBXJoint* _fbxJoint; // JointState does NOT own its FBXJoint
};
#endif // hifi_JointState_h

View file

@ -166,38 +166,21 @@ QVector<JointState> Model::createJointStates(const FBXGeometry& geometry) {
jointStates.append(state);
}
// compute transforms
// Unfortunately, the joints are not neccessarily in order from parents to children,
// so we must iterate over the list multiple times until all are set correctly.
QVector<bool> jointIsSet;
// compute model transforms
int numJoints = jointStates.size();
jointIsSet.fill(false, numJoints);
int numJointsSet = 0;
int lastNumJointsSet = -1;
while (numJointsSet < numJoints && numJointsSet != lastNumJointsSet) {
lastNumJointsSet = numJointsSet;
for (int i = 0; i < numJoints; ++i) {
if (jointIsSet[i]) {
continue;
}
JointState& state = jointStates[i];
const FBXJoint& joint = state.getFBXJoint();
int parentIndex = joint.parentIndex;
if (parentIndex == -1) {
_rootIndex = i;
glm::mat4 baseTransform = glm::mat4_cast(_rotation) * glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
state.computeTransforms(baseTransform, _rotation);
++numJointsSet;
jointIsSet[i] = true;
} else if (jointIsSet[parentIndex]) {
const JointState& parentState = jointStates.at(parentIndex);
state.computeTransforms(parentState._transform, parentState._combinedRotation);
++numJointsSet;
jointIsSet[i] = true;
}
for (int i = 0; i < numJoints; ++i) {
JointState& state = jointStates[i];
const FBXJoint& joint = state.getFBXJoint();
int parentIndex = joint.parentIndex;
if (parentIndex == -1) {
_rootIndex = i;
glm::mat4 parentTransform = glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
state.computeTransform(parentTransform);
} else {
const JointState& parentState = jointStates.at(parentIndex);
state.computeTransform(parentState.getTransform());
}
}
return jointStates;
}
@ -476,7 +459,7 @@ void Model::reset() {
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
for (int i = 0; i < _jointStates.size(); i++) {
_jointStates[i]._rotation = geometry.joints.at(i).rotation;
_jointStates[i]._rotationInParentFrame = geometry.joints.at(i).rotation;
}
}
@ -527,12 +510,12 @@ bool Model::updateGeometry() {
deleteGeometry();
_dilatedTextures.clear();
_geometry = geometry;
_jointStates = newJointStates;
setJointStates(newJointStates);
needToRebuild = true;
} else if (_jointStates.isEmpty()) {
const FBXGeometry& fbxGeometry = geometry->getFBXGeometry();
if (fbxGeometry.joints.size() > 0) {
_jointStates = createJointStates(fbxGeometry);
setJointStates(createJointStates(fbxGeometry));
needToRebuild = true;
}
} else if (!geometry->isLoaded()) {
@ -574,6 +557,11 @@ bool Model::updateGeometry() {
return needFullUpdate;
}
// virtual
void Model::setJointStates(QVector<JointState> states) {
_jointStates = states;
}
bool Model::render(float alpha, RenderMode mode, bool receiveShadows) {
// render the attachments
foreach (Model* attachment, _attachments) {
@ -686,7 +674,7 @@ bool Model::getJointState(int index, glm::quat& rotation) const {
if (index == -1 || index >= _jointStates.size()) {
return false;
}
rotation = _jointStates.at(index)._rotation;
rotation = _jointStates.at(index)._rotationInParentFrame;
const glm::quat& defaultRotation = _geometry->getFBXGeometry().joints.at(index).rotation;
return glm::abs(rotation.x - defaultRotation.x) >= EPSILON ||
glm::abs(rotation.y - defaultRotation.y) >= EPSILON ||
@ -699,7 +687,7 @@ void Model::setJointState(int index, bool valid, const glm::quat& rotation, floa
JointState& state = _jointStates[index];
if (priority >= state._animationPriority) {
if (valid) {
state._rotation = rotation;
state._rotationInParentFrame = rotation;
state._animationPriority = priority;
} else {
state.restoreRotation(1.0f, priority);
@ -731,19 +719,29 @@ void Model::setURL(const QUrl& url, const QUrl& fallback, bool retainCurrent, bo
}
}
bool Model::getJointPositionInWorldFrame(int jointIndex, glm::vec3& position) const {
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
return false;
}
// position is in world-frame
position = _translation + _rotation * _jointStates[jointIndex].getPosition();
return true;
}
bool Model::getJointPosition(int jointIndex, glm::vec3& position) const {
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
return false;
}
position = _translation + extractTranslation(_jointStates[jointIndex]._transform);
// position is in model-frame
position = extractTranslation(_jointStates[jointIndex].getTransform());
return true;
}
bool Model::getJointRotation(int jointIndex, glm::quat& rotation, bool fromBind) const {
bool Model::getJointRotationInWorldFrame(int jointIndex, glm::quat& rotation) const {
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
return false;
}
rotation = _jointStates[jointIndex].getJointRotation(fromBind);
rotation = _rotation * _jointStates[jointIndex].getRotation();
return true;
}
@ -751,7 +749,7 @@ bool Model::getJointCombinedRotation(int jointIndex, glm::quat& rotation) const
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
return false;
}
rotation = _jointStates[jointIndex]._combinedRotation;
rotation = _rotation * _jointStates[jointIndex].getRotation();
return true;
}
@ -963,15 +961,16 @@ void Model::updateShapePositions() {
glm::vec3 rootPosition(0.0f);
_boundingRadius = 0.0f;
float uniformScale = extractUniformScale(_scale);
const FBXGeometry& geometry = _geometry->getFBXGeometry();
for (int i = 0; i < _jointStates.size(); i++) {
const FBXJoint& joint = geometry.joints[i];
const JointState& state = _jointStates[i];
const FBXJoint& joint = state.getFBXJoint();
// shape position and rotation need to be in world-frame
glm::vec3 jointToShapeOffset = uniformScale * (_jointStates[i]._combinedRotation * joint.shapePosition);
glm::vec3 worldPosition = extractTranslation(_jointStates[i]._transform) + jointToShapeOffset + _translation;
glm::quat stateRotation = state.getRotation();
glm::vec3 shapeOffset = uniformScale * (stateRotation * joint.shapePosition);
glm::vec3 worldPosition = _translation + _rotation * (state.getPosition() + shapeOffset);
Shape* shape = _jointShapes[i];
shape->setPosition(worldPosition);
shape->setRotation(_jointStates[i]._combinedRotation * joint.shapeRotation);
shape->setRotation(_rotation * stateRotation * joint.shapeRotation);
float distance = glm::distance(worldPosition, _translation) + shape->getBoundingRadius();
if (distance > _boundingRadius) {
_boundingRadius = distance;
@ -993,12 +992,12 @@ bool Model::findRayIntersection(const glm::vec3& origin, const glm::vec3& direct
float radiusScale = extractUniformScale(_scale);
for (int i = 0; i < _jointStates.size(); i++) {
const FBXJoint& joint = geometry.joints[i];
glm::vec3 end = extractTranslation(_jointStates[i]._transform);
glm::vec3 end = _translation + _rotation * _jointStates[i].getPosition();
float endRadius = joint.boneRadius * radiusScale;
glm::vec3 start = end;
float startRadius = joint.boneRadius * radiusScale;
if (joint.parentIndex != -1) {
start = extractTranslation(_jointStates[joint.parentIndex]._transform);
start = _translation + _rotation * _jointStates[joint.parentIndex].getPosition();
startRadius = geometry.joints[joint.parentIndex].boneRadius * radiusScale;
}
// for now, use average of start and end radii
@ -1208,8 +1207,8 @@ void Model::simulateInternal(float deltaTime) {
glm::vec3 jointTranslation = _translation;
glm::quat jointRotation = _rotation;
getJointPosition(attachment.jointIndex, jointTranslation);
getJointRotation(attachment.jointIndex, jointRotation);
getJointPositionInWorldFrame(attachment.jointIndex, jointTranslation);
getJointRotationInWorldFrame(attachment.jointIndex, jointRotation);
model->setTranslation(jointTranslation + jointRotation * attachment.translation * _scale);
model->setRotation(jointRotation * attachment.rotation);
@ -1220,12 +1219,13 @@ void Model::simulateInternal(float deltaTime) {
}
}
glm::mat4 modelToWorld = glm::mat4_cast(_rotation);
for (int i = 0; i < _meshStates.size(); i++) {
MeshState& state = _meshStates[i];
const FBXMesh& mesh = geometry.meshes.at(i);
for (int j = 0; j < mesh.clusters.size(); j++) {
const FBXCluster& cluster = mesh.clusters.at(j);
state.clusterMatrices[j] = _jointStates[cluster.jointIndex]._transform * cluster.inverseBindMatrix;
state.clusterMatrices[j] = modelToWorld * _jointStates[cluster.jointIndex].getTransform() * cluster.inverseBindMatrix;
}
}
@ -1239,22 +1239,23 @@ void Model::updateJointState(int index) {
JointState& state = _jointStates[index];
const FBXJoint& joint = state.getFBXJoint();
if (joint.parentIndex == -1) {
// compute model transforms
int parentIndex = joint.parentIndex;
if (parentIndex == -1) {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
glm::mat4 baseTransform = glm::mat4_cast(_rotation) * glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
state.computeTransforms(baseTransform, _rotation);
glm::mat4 parentTransform = glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
state.computeTransform(parentTransform);
} else {
const JointState& parentState = _jointStates.at(joint.parentIndex);
state.computeTransforms(parentState._transform, parentState._combinedRotation);
const JointState& parentState = _jointStates.at(parentIndex);
state.computeTransform(parentState.getTransform());
}
}
bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const glm::quat& rotation, bool useRotation,
bool Model::setJointPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation, bool useRotation,
int lastFreeIndex, bool allIntermediatesFree, const glm::vec3& alignment, float priority) {
if (jointIndex == -1 || _jointStates.isEmpty()) {
return false;
}
glm::vec3 relativePosition = translation - _translation;
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const QVector<int>& freeLineage = geometry.joints.at(jointIndex).freeLineage;
if (freeLineage.isEmpty()) {
@ -1267,21 +1268,19 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const
// this is a cyclic coordinate descent algorithm: see
// http://www.ryanjuckett.com/programming/animation/21-cyclic-coordinate-descent-in-2d
const int ITERATION_COUNT = 1;
glm::vec3 worldAlignment = _rotation * alignment;
glm::vec3 worldAlignment = alignment;
for (int i = 0; i < ITERATION_COUNT; i++) {
// first, try to rotate the end effector as close as possible to the target rotation, if any
glm::quat endRotation;
if (useRotation) {
JointState& state = _jointStates[jointIndex];
// TODO: figure out what this is trying to do and combine it into one JointState method
endRotation = state.getJointRotation(true);
state.applyRotationDelta(rotation * glm::inverse(endRotation), true, priority);
endRotation = state.getJointRotation(true);
state.setRotation(rotation, true, priority);
endRotation = state.getRotation();
}
// then, we go from the joint upwards, rotating the end as close as possible to the target
glm::vec3 endPosition = extractTranslation(_jointStates[jointIndex]._transform);
glm::vec3 endPosition = extractTranslation(_jointStates[jointIndex].getTransform());
for (int j = 1; freeLineage.at(j - 1) != lastFreeIndex; j++) {
int index = freeLineage.at(j);
JointState& state = _jointStates[index];
@ -1289,18 +1288,18 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const
if (!(joint.isFree || allIntermediatesFree)) {
continue;
}
glm::vec3 jointPosition = extractTranslation(state._transform);
glm::vec3 jointPosition = extractTranslation(state.getTransform());
glm::vec3 jointVector = endPosition - jointPosition;
glm::quat oldCombinedRotation = state._combinedRotation;
glm::quat oldCombinedRotation = state.getRotation();
glm::quat combinedDelta;
float combinedWeight;
if (useRotation) {
combinedDelta = safeMix(rotation * glm::inverse(endRotation),
rotationBetween(jointVector, relativePosition - jointPosition), 0.5f);
rotationBetween(jointVector, position - jointPosition), 0.5f);
combinedWeight = 2.0f;
} else {
combinedDelta = rotationBetween(jointVector, relativePosition - jointPosition);
combinedDelta = rotationBetween(jointVector, position - jointPosition);
combinedWeight = 1.0f;
}
if (alignment != glm::vec3() && j > 1) {
@ -1309,7 +1308,7 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const
for (int k = j - 1; k > 0; k--) {
int index = freeLineage.at(k);
updateJointState(index);
positionSum += extractTranslation(_jointStates.at(index)._transform);
positionSum += extractTranslation(_jointStates.at(index).getTransform());
}
glm::vec3 projectedCenterOfMass = glm::cross(jointVector,
glm::cross(positionSum / (j - 1.0f) - jointPosition, jointVector));
@ -1321,7 +1320,7 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const
}
}
state.applyRotationDelta(combinedDelta, true, priority);
glm::quat actualDelta = state._combinedRotation * glm::inverse(oldCombinedRotation);
glm::quat actualDelta = state.getRotation() * glm::inverse(oldCombinedRotation);
endPosition = actualDelta * jointVector + jointPosition;
if (useRotation) {
endRotation = actualDelta * endRotation;
@ -1344,7 +1343,7 @@ bool Model::restoreJointPosition(int jointIndex, float fraction, float priority)
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const QVector<int>& freeLineage = geometry.joints.at(jointIndex).freeLineage;
foreach (int index, freeLineage) {
JointState& state = _jointStates[index];
state.restoreRotation(fraction, priority);
@ -1470,12 +1469,12 @@ void Model::applyCollision(CollisionInfo& collision) {
glm::vec3 jointPosition(0.0f);
int jointIndex = collision._intData;
if (getJointPosition(jointIndex, jointPosition)) {
if (getJointPositionInWorldFrame(jointIndex, jointPosition)) {
const FBXJoint& joint = _geometry->getFBXGeometry().joints[jointIndex];
if (joint.parentIndex != -1) {
// compute the approximate distance (travel) that the joint needs to move
glm::vec3 start;
getJointPosition(joint.parentIndex, start);
getJointPositionInWorldFrame(joint.parentIndex, start);
glm::vec3 contactPoint = collision._contactPoint - start;
glm::vec3 penetrationEnd = contactPoint + collision._penetration;
glm::vec3 axis = glm::cross(contactPoint, penetrationEnd);
@ -1486,8 +1485,9 @@ void Model::applyCollision(CollisionInfo& collision) {
float angle = asinf(travel / (glm::length(contactPoint) * glm::length(penetrationEnd)));
axis = glm::normalize(axis);
glm::vec3 end;
getJointPosition(jointIndex, end);
glm::vec3 newEnd = start + glm::angleAxis(angle, axis) * (end - start);
getJointPositionInWorldFrame(jointIndex, end);
// transform into model-frame
glm::vec3 newEnd = glm::inverse(_rotation) * (start + glm::angleAxis(angle, axis) * (end - start) - _translation);
// try to move it
setJointPosition(jointIndex, newEnd, glm::quat(), false, -1, true);
}
@ -1895,8 +1895,8 @@ AnimationHandle::AnimationHandle(Model* model) :
_loop(false),
_hold(false),
_startAutomatically(false),
_firstFrame(0),
_lastFrame(INT_MAX),
_firstFrame(0.0f),
_lastFrame(FLT_MAX),
_running(false) {
}
@ -1927,41 +1927,40 @@ void AnimationHandle::simulate(float deltaTime) {
stop();
return;
}
int lastFrameIndex = qMin(_lastFrame, animationGeometry.animationFrames.size() - 1);
int firstFrameIndex = qMin(_firstFrame, lastFrameIndex);
if ((!_loop && _frameIndex >= lastFrameIndex) || firstFrameIndex == lastFrameIndex) {
float endFrameIndex = qMin(_lastFrame, animationGeometry.animationFrames.size() - (_loop ? 0.0f : 1.0f));
float startFrameIndex = qMin(_firstFrame, endFrameIndex);
if ((!_loop && (_frameIndex < startFrameIndex || _frameIndex > endFrameIndex)) || startFrameIndex == endFrameIndex) {
// passed the end; apply the last frame
const FBXAnimationFrame& frame = animationGeometry.animationFrames.at(lastFrameIndex);
for (int i = 0; i < _jointMappings.size(); i++) {
int mapping = _jointMappings.at(i);
if (mapping != -1) {
JointState& state = _model->_jointStates[mapping];
if (_priority >= state._animationPriority) {
state._rotation = frame.rotations.at(i);
state._animationPriority = _priority;
}
}
}
applyFrame(glm::clamp(_frameIndex, startFrameIndex, endFrameIndex));
if (!_hold) {
stop();
}
return;
}
int frameCount = lastFrameIndex - firstFrameIndex + 1;
_frameIndex = firstFrameIndex + glm::mod(qMax(_frameIndex - firstFrameIndex, 0.0f), (float)frameCount);
// wrap within the the desired range
if (_frameIndex < startFrameIndex) {
_frameIndex = endFrameIndex - glm::mod(endFrameIndex - _frameIndex, endFrameIndex - startFrameIndex);
} else if (_frameIndex > endFrameIndex) {
_frameIndex = startFrameIndex + glm::mod(_frameIndex - startFrameIndex, endFrameIndex - startFrameIndex);
}
// blend between the closest two frames
const FBXAnimationFrame& ceilFrame = animationGeometry.animationFrames.at(
firstFrameIndex + ((int)glm::ceil(_frameIndex) - firstFrameIndex) % frameCount);
const FBXAnimationFrame& floorFrame = animationGeometry.animationFrames.at(
firstFrameIndex + ((int)glm::floor(_frameIndex) - firstFrameIndex) % frameCount);
float frameFraction = glm::fract(_frameIndex);
applyFrame(_frameIndex);
}
void AnimationHandle::applyFrame(float frameIndex) {
const FBXGeometry& animationGeometry = _animation->getGeometry();
int frameCount = animationGeometry.animationFrames.size();
const FBXAnimationFrame& floorFrame = animationGeometry.animationFrames.at((int)glm::floor(frameIndex) % frameCount);
const FBXAnimationFrame& ceilFrame = animationGeometry.animationFrames.at((int)glm::ceil(frameIndex) % frameCount);
float frameFraction = glm::fract(frameIndex);
for (int i = 0; i < _jointMappings.size(); i++) {
int mapping = _jointMappings.at(i);
if (mapping != -1) {
JointState& state = _model->_jointStates[mapping];
if (_priority >= state._animationPriority) {
state._rotation = safeMix(floorFrame.rotations.at(i), ceilFrame.rotations.at(i), frameFraction);
state._rotationInParentFrame = safeMix(floorFrame.rotations.at(i), ceilFrame.rotations.at(i), frameFraction);
state._animationPriority = _priority;
}
}
@ -1980,79 +1979,3 @@ void AnimationHandle::replaceMatchingPriorities(float newPriority) {
}
}
// ----------------------------------------------------------------------------
// JointState TODO: move this class to its own files
// ----------------------------------------------------------------------------
JointState::JointState() :
_animationPriority(0.0f),
_fbxJoint(NULL) {
}
void JointState::setFBXJoint(const FBXJoint* joint) {
assert(joint != NULL);
_rotation = joint->rotation;
// NOTE: JointState does not own the FBXJoint to which it points.
_fbxJoint = joint;
}
void JointState::copyState(const JointState& state) {
_rotation = state._rotation;
_transform = state._transform;
_combinedRotation = state._combinedRotation;
_animationPriority = state._animationPriority;
// DO NOT copy _fbxJoint
}
void JointState::computeTransforms(const glm::mat4& baseTransform, const glm::quat& baseRotation) {
assert(_fbxJoint != NULL);
glm::quat combinedRotation = _fbxJoint->preRotation * _rotation * _fbxJoint->postRotation;
_transform = baseTransform * glm::translate(_fbxJoint->translation) * _fbxJoint->preTransform
* glm::mat4_cast(combinedRotation) * _fbxJoint->postTransform;
_combinedRotation = baseRotation * combinedRotation;
}
glm::quat JointState::getJointRotation(bool fromBind) const {
assert(_fbxJoint != NULL);
return _combinedRotation * (fromBind ? _fbxJoint->inverseBindRotation : _fbxJoint->inverseDefaultRotation);
}
void JointState::restoreRotation(float fraction, float priority) {
assert(_fbxJoint != NULL);
if (priority == _animationPriority) {
_rotation = safeMix(_rotation, _fbxJoint->rotation, fraction);
_animationPriority = 0.0f;
}
}
void JointState::setRotation(const glm::quat& rotation, float priority) {
assert(_fbxJoint != NULL);
if (priority >= _animationPriority) {
_rotation = _rotation * glm::inverse(_combinedRotation) * rotation * glm::inverse(_fbxJoint->inverseBindRotation);
_animationPriority = priority;
}
}
void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) {
assert(_fbxJoint != NULL);
if (priority < _animationPriority) {
return;
}
_animationPriority = priority;
if (!constrain || (_fbxJoint->rotationMin == glm::vec3(-PI, -PI, -PI) &&
_fbxJoint->rotationMax == glm::vec3(PI, PI, PI))) {
// no constraints
_rotation = _rotation * glm::inverse(_combinedRotation) * delta * _combinedRotation;
_combinedRotation = delta * _combinedRotation;
return;
}
glm::quat targetRotation = delta * _combinedRotation;
glm::vec3 eulers = safeEulerAngles(_rotation * glm::inverse(_combinedRotation) * targetRotation);
glm::quat newRotation = glm::quat(glm::clamp(eulers, _fbxJoint->rotationMin, _fbxJoint->rotationMax));
_combinedRotation = _combinedRotation * glm::inverse(_rotation) * newRotation;
_rotation = newRotation;
}
const glm::vec3& JointState::getDefaultTranslationInParentFrame() const {
assert(_fbxJoint != NULL);
return _fbxJoint->translation;
}

View file

@ -22,6 +22,7 @@
#include "GeometryCache.h"
#include "InterfaceConfig.h"
#include "JointState.h"
#include "ProgramObject.h"
#include "TextureCache.h"
@ -30,40 +31,6 @@ class Shape;
typedef QSharedPointer<AnimationHandle> AnimationHandlePointer;
typedef QWeakPointer<AnimationHandle> WeakAnimationHandlePointer;
class JointState {
public:
JointState();
void setFBXJoint(const FBXJoint* joint);
const FBXJoint& getFBXJoint() const { return *_fbxJoint; }
void copyState(const JointState& state);
/// computes new _transform and _combinedRotation
void computeTransforms(const glm::mat4& baseTransform, const glm::quat& baseRotation);
/// \return rotation from the joint's default (or bind) frame to world frame
glm::quat getJointRotation(bool fromBind = false) const;
void applyRotationDelta(const glm::quat& delta, bool constrain = true, float priority = 1.0f);
const glm::vec3& getDefaultTranslationInParentFrame() const;
void restoreRotation(float fraction, float priority);
/// \param rotation is from bind- to world-frame
/// computes parent relative _rotation and sets that
void setRotation(const glm::quat& rotation, float priority);
glm::quat _rotation; // rotation relative to parent
glm::mat4 _transform; // rotation to world frame + translation in model frame
glm::quat _combinedRotation; // rotation from joint local to world frame
float _animationPriority; // the priority of the animation affecting this joint
private:
const FBXJoint* _fbxJoint; // JointState does NOT own its FBXJoint
};
/// A generic 3D model displaying geometry loaded from a URL.
class Model : public QObject {
@ -155,10 +122,15 @@ public:
/// Returns the index of the last free ancestor of the indexed joint, or -1 if not found.
int getLastFreeJointIndex(int jointIndex) const;
bool getJointPosition(int jointIndex, glm::vec3& position) const;
bool getJointRotation(int jointIndex, glm::quat& rotation, bool fromBind = false) const;
bool getJointPositionInWorldFrame(int jointIndex, glm::vec3& position) const;
bool getJointRotationInWorldFrame(int jointIndex, glm::quat& rotation) const;
bool getJointCombinedRotation(int jointIndex, glm::quat& rotation) const;
/// \param jointIndex index of joint in model structure
/// \param position[out] position of joint in model-frame
/// \return true if joint exists
bool getJointPosition(int jointIndex, glm::vec3& position) const;
QStringList getJointNames() const;
AnimationHandlePointer createAnimationHandle();
@ -168,7 +140,7 @@ public:
void clearShapes();
void rebuildShapes();
void resetShapePositions();
void updateShapePositions();
virtual void updateShapePositions();
void renderJointCollisionShapes(float alpha);
void renderBoundingCollisionShapes(float alpha);
@ -234,6 +206,8 @@ protected:
// returns 'true' if needs fullUpdate after geometry change
bool updateGeometry();
virtual void setJointStates(QVector<JointState> states);
void setScaleInternal(const glm::vec3& scale);
void scaleToFit();
@ -244,7 +218,15 @@ protected:
/// Updates the state of the joint at the specified index.
virtual void updateJointState(int index);
bool setJointPosition(int jointIndex, const glm::vec3& translation, const glm::quat& rotation = glm::quat(),
/// \param jointIndex index of joint in model structure
/// \param position position of joint in model-frame
/// \param rotation rotation of joint in model-frame
/// \param useRotation false if rotation should be ignored
/// \param lastFreeIndex
/// \param allIntermediatesFree
/// \param alignment
/// \return true if joint exists
bool setJointPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation = glm::quat(),
bool useRotation = false, int lastFreeIndex = -1, bool allIntermediatesFree = false,
const glm::vec3& alignment = glm::vec3(0.0f, -1.0f, 0.0f), float priority = 1.0f);
@ -395,11 +377,11 @@ public:
void setStartAutomatically(bool startAutomatically);
bool getStartAutomatically() const { return _startAutomatically; }
void setFirstFrame(int firstFrame) { _firstFrame = firstFrame; }
int getFirstFrame() const { return _firstFrame; }
void setFirstFrame(float firstFrame) { _firstFrame = firstFrame; }
float getFirstFrame() const { return _firstFrame; }
void setLastFrame(int lastFrame) { _lastFrame = lastFrame; }
int getLastFrame() const { return _lastFrame; }
void setLastFrame(float lastFrame) { _lastFrame = lastFrame; }
float getLastFrame() const { return _lastFrame; }
void setMaskedJoints(const QStringList& maskedJoints);
const QStringList& getMaskedJoints() const { return _maskedJoints; }
@ -423,6 +405,7 @@ private:
AnimationHandle(Model* model);
void simulate(float deltaTime);
void applyFrame(float frameIndex);
void replaceMatchingPriorities(float newPriority);
Model* _model;
@ -435,8 +418,8 @@ private:
bool _loop;
bool _hold;
bool _startAutomatically;
int _firstFrame;
int _lastFrame;
float _firstFrame;
float _lastFrame;
QStringList _maskedJoints;
bool _running;
QVector<int> _jointMappings;

View file

@ -0,0 +1,167 @@
//
// RagDoll.cpp
// interface/src/avatar
//
// Created by Andrew Meadows 2014.05.30
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <glm/gtx/transform.hpp>
#include <CollisionInfo.h>
#include <SharedUtil.h>
#include <CapsuleShape.h>
#include <SphereShape.h>
#include "RagDoll.h"
// ----------------------------------------------------------------------------
// FixedConstraint
// ----------------------------------------------------------------------------
FixedConstraint::FixedConstraint(glm::vec3* point, const glm::vec3& anchor) : _point(point), _anchor(anchor) {
}
float FixedConstraint::enforce() {
assert(_point != NULL);
float distance = glm::distance(_anchor, *_point);
*_point = _anchor;
return distance;
}
void FixedConstraint::setPoint(glm::vec3* point) {
_point = point;
}
void FixedConstraint::setAnchor(const glm::vec3& anchor) {
_anchor = anchor;
}
// ----------------------------------------------------------------------------
// DistanceConstraint
// ----------------------------------------------------------------------------
DistanceConstraint::DistanceConstraint(glm::vec3* startPoint, glm::vec3* endPoint) : _distance(-1.0f) {
_points[0] = startPoint;
_points[1] = endPoint;
_distance = glm::distance(*(_points[0]), *(_points[1]));
}
DistanceConstraint::DistanceConstraint(const DistanceConstraint& other) {
_distance = other._distance;
_points[0] = other._points[0];
_points[1] = other._points[1];
}
void DistanceConstraint::setDistance(float distance) {
_distance = fabsf(distance);
}
float DistanceConstraint::enforce() {
float newDistance = glm::distance(*(_points[0]), *(_points[1]));
glm::vec3 direction(0.0f, 1.0f, 0.0f);
if (newDistance > EPSILON) {
direction = (*(_points[0]) - *(_points[1])) / newDistance;
}
glm::vec3 center = 0.5f * (*(_points[0]) + *(_points[1]));
*(_points[0]) = center + (0.5f * _distance) * direction;
*(_points[1]) = center - (0.5f * _distance) * direction;
return glm::abs(newDistance - _distance);
}
void DistanceConstraint::updateProxyShape(Shape* shape, const glm::quat& rotation, const glm::vec3& translation) const {
if (!shape) {
return;
}
switch (shape->getType()) {
case Shape::SPHERE_SHAPE: {
// sphere collides at endPoint
SphereShape* sphere = static_cast<SphereShape*>(shape);
sphere->setPosition(translation + rotation * (*_points[1]));
}
break;
case Shape::CAPSULE_SHAPE: {
// capsule collides from startPoint to endPoint
CapsuleShape* capsule = static_cast<CapsuleShape*>(shape);
capsule->setEndPoints(translation + rotation * (*_points[0]), translation + rotation * (*_points[1]));
}
break;
default:
break;
}
}
// ----------------------------------------------------------------------------
// RagDoll
// ----------------------------------------------------------------------------
RagDoll::RagDoll() {
}
RagDoll::~RagDoll() {
clear();
}
void RagDoll::init(const QVector<JointState>& states) {
clear();
const int numStates = states.size();
_points.reserve(numStates);
for (int i = 0; i < numStates; ++i) {
const JointState& state = states[i];
_points.push_back(state.getPosition());
int parentIndex = state.getFBXJoint().parentIndex;
assert(parentIndex < i);
if (parentIndex == -1) {
FixedConstraint* anchor = new FixedConstraint(&(_points[i]), glm::vec3(0.0f));
_constraints.push_back(anchor);
} else {
DistanceConstraint* stick = new DistanceConstraint(&(_points[i]), &(_points[parentIndex]));
_constraints.push_back(stick);
}
}
}
/// Delete all data.
void RagDoll::clear() {
int numConstraints = _constraints.size();
for (int i = 0; i < numConstraints; ++i) {
delete _constraints[i];
}
_constraints.clear();
_points.clear();
}
float RagDoll::slaveToSkeleton(const QVector<JointState>& states, float fraction) {
const int numStates = states.size();
assert(numStates == _points.size());
fraction = glm::clamp(fraction, 0.0f, 1.0f);
float maxDistance = 0.0f;
for (int i = 0; i < numStates; ++i) {
glm::vec3 oldPoint = _points[i];
_points[i] = (1.0f - fraction) * _points[i] + fraction * states[i].getPosition();
maxDistance = glm::max(maxDistance, glm::distance(oldPoint, _points[i]));
}
return maxDistance;
}
float RagDoll::enforceConstraints() {
float maxDistance = 0.0f;
const int numConstraints = _constraints.size();
for (int i = 0; i < numConstraints; ++i) {
DistanceConstraint* c = static_cast<DistanceConstraint*>(_constraints[i]);
//maxDistance = glm::max(maxDistance, _constraints[i]->enforce());
maxDistance = glm::max(maxDistance, c->enforce());
}
return maxDistance;
}
void RagDoll::updateShapes(const QVector<Shape*>& shapes, const glm::quat& rotation, const glm::vec3& translation) const {
int numShapes = shapes.size();
int numConstraints = _constraints.size();
for (int i = 0; i < numShapes && i < numConstraints; ++i) {
_constraints[i]->updateProxyShape(shapes[i], rotation, translation);
}
}

View file

@ -0,0 +1,95 @@
//
// RagDoll.h
// interface/src/avatar
//
// Created by Andrew Meadows 2014.05.30
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_RagDoll_h
#define hifi_RagDoll_h
#include "renderer/Model.h"
class Shape;
class Constraint {
public:
Constraint() {}
virtual ~Constraint() {}
/// Enforce contraint by moving relevant points.
/// \return max distance of point movement
virtual float enforce() = 0;
/// \param shape pointer to shape that will be this Constraint's collision proxy
/// \param rotation rotation into shape's collision frame
/// \param translation translation into shape's collision frame
/// Moves the shape such that it will collide at this constraint's position
virtual void updateProxyShape(Shape* shape, const glm::quat& rotation, const glm::vec3& translation) const {}
protected:
int _type;
};
class FixedConstraint : public Constraint {
public:
FixedConstraint(glm::vec3* point, const glm::vec3& anchor);
float enforce();
void setPoint(glm::vec3* point);
void setAnchor(const glm::vec3& anchor);
private:
glm::vec3* _point;
glm::vec3 _anchor;
};
class DistanceConstraint : public Constraint {
public:
DistanceConstraint(glm::vec3* startPoint, glm::vec3* endPoint);
DistanceConstraint(const DistanceConstraint& other);
float enforce();
void setDistance(float distance);
void updateProxyShape(Shape* shape, const glm::quat& rotation, const glm::vec3& translation) const;
private:
float _distance;
glm::vec3* _points[2];
};
class RagDoll {
public:
RagDoll();
virtual ~RagDoll();
/// Create points and constraints based on topology of collection of joints
/// \param joints list of connected joint states
void init(const QVector<JointState>& states);
/// Delete all data.
void clear();
/// \param states list of joint states
/// \param fraction range from 0.0 (no movement) to 1.0 (use joint locations)
/// \return max distance of point movement
float slaveToSkeleton(const QVector<JointState>& states, float fraction);
/// Enforce contraints.
/// \return max distance of point movement
float enforceConstraints();
const QVector<glm::vec3>& getPoints() const { return _points; }
/// \param shapes list of shapes to be updated with new positions
/// \param rotation rotation into shapes' collision frame
/// \param translation translation into shapes' collision frame
void updateShapes(const QVector<Shape*>& shapes, const glm::quat& rotation, const glm::vec3& translation) const;
private:
QVector<Constraint*> _constraints;
QVector<glm::vec3> _points;
};
#endif // hifi_RagDoll_h

View file

@ -98,6 +98,7 @@ AnimationPanel::AnimationPanel(AnimationsDialog* dialog, const AnimationHandlePo
layout->addRow("FPS:", _fps = new QDoubleSpinBox());
_fps->setSingleStep(0.01);
_fps->setMinimum(-FLT_MAX);
_fps->setMaximum(FLT_MAX);
_fps->setValue(handle->getFPS());
connect(_fps, SIGNAL(valueChanged(double)), SLOT(updateHandle()));
@ -128,15 +129,17 @@ AnimationPanel::AnimationPanel(AnimationsDialog* dialog, const AnimationHandlePo
_startAutomatically->setChecked(handle->getStartAutomatically());
connect(_startAutomatically, SIGNAL(toggled(bool)), SLOT(updateHandle()));
layout->addRow("First Frame:", _firstFrame = new QSpinBox());
layout->addRow("First Frame:", _firstFrame = new QDoubleSpinBox());
_firstFrame->setSingleStep(0.01);
_firstFrame->setMaximum(INT_MAX);
_firstFrame->setValue(handle->getFirstFrame());
connect(_firstFrame, SIGNAL(valueChanged(int)), SLOT(updateHandle()));
connect(_firstFrame, SIGNAL(valueChanged(double)), SLOT(updateHandle()));
layout->addRow("Last Frame:", _lastFrame = new QSpinBox());
layout->addRow("Last Frame:", _lastFrame = new QDoubleSpinBox());
_lastFrame->setSingleStep(0.01);
_lastFrame->setMaximum(INT_MAX);
_lastFrame->setValue(handle->getLastFrame());
connect(_lastFrame, SIGNAL(valueChanged(int)), SLOT(updateHandle()));
connect(_lastFrame, SIGNAL(valueChanged(double)), SLOT(updateHandle()));
QHBoxLayout* buttons = new QHBoxLayout();
layout->addRow(buttons);

View file

@ -22,7 +22,6 @@ class QComboBox;
class QDoubleSpinner;
class QLineEdit;
class QPushButton;
class QSpinBox;
class QVBoxLayout;
/// Allows users to edit the avatar animations.
@ -71,8 +70,8 @@ private:
QCheckBox* _loop;
QCheckBox* _hold;
QCheckBox* _startAutomatically;
QSpinBox* _firstFrame;
QSpinBox* _lastFrame;
QDoubleSpinBox* _firstFrame;
QDoubleSpinBox* _lastFrame;
QLineEdit* _maskedJoints;
QPushButton* _chooseMaskedJoints;
QPushButton* _start;

View file

@ -22,7 +22,8 @@
ApplicationOverlay::ApplicationOverlay() :
_framebufferObject(NULL),
_oculusAngle(65.0f * RADIANS_PER_DEGREE),
_distance(0.5f) {
_distance(0.5f),
_uiType(HEMISPHERE) {
}
@ -296,6 +297,25 @@ void ApplicationOverlay::displayOverlayTexture(Camera& whichCamera) {
glDisable(GL_TEXTURE_2D);
}
const float textureFov = PI / 2.5f;
void ApplicationOverlay::computeOculusPickRay(float x, float y, glm::vec3& direction) const {
glm::quat rot = Application::getInstance()->getAvatar()->getOrientation();
//invert y direction
y = 1.0 - y;
//Get position on hemisphere UI
x = sin((x - 0.5f) * textureFov);
y = sin((y - 0.5f) * textureFov);
float dist = sqrt(x * x + y * y);
float z = -sqrt(1.0f - dist * dist);
//Rotate the UI pick ray by the avatar orientation
direction = glm::normalize(rot * glm::vec3(x, y, z));
}
// Fast helper functions
inline float max(float a, float b) {
return (a > b) ? a : b;
@ -316,28 +336,32 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
int mouseX = application->getMouseX();
int mouseY = application->getMouseY();
int widgetWidth = glWidget->width();
int widgetHeight = glWidget->height();
const int widgetWidth = glWidget->width();
const int widgetHeight = glWidget->height();
float magnifyWidth = 80.0f;
float magnifyHeight = 60.0f;
const float magnification = 4.0f;
// Get vertical FoV of the displayed overlay texture
const float halfVerticalAngle = _oculusAngle / 2.0f;
const float verticalAngle = halfVerticalAngle * 2.0f;
const float overlayAspectRatio = glWidget->width() / (float)glWidget->height();
const float halfOverlayHeight = _distance * tan(halfVerticalAngle);
const float overlayHeight = halfOverlayHeight * 2.0f;
// The more vertices, the better the curve
const int numHorizontalVertices = 20;
const int numVerticalVertices = 20;
// U texture coordinate width at each quad
const float quadTexWidth = 1.0f / (numHorizontalVertices - 1);
const float quadTexHeight = 1.0f / (numVerticalVertices - 1);
// Get horizontal angle and angle increment from vertical angle and aspect ratio
const float horizontalAngle = halfVerticalAngle * 2.0f * overlayAspectRatio;
const float angleIncrement = horizontalAngle / (numHorizontalVertices - 1);
const float halfHorizontalAngle = horizontalAngle / 2;
const float verticalAngleIncrement = _oculusAngle / (numVerticalVertices - 1);
glActiveTexture(GL_TEXTURE0);
glEnable(GL_BLEND);
@ -391,9 +415,10 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
magnifyHeight = widgetHeight - mouseY;
}
const float halfMagnifyHeight = magnifyHeight / 2.0f;
float newWidth = magnifyWidth * magnification;
float newHeight = magnifyHeight * magnification;
float tmp;
// Magnification Texture Coordinates
float magnifyULeft = mouseX / (float)widgetWidth;
@ -408,55 +433,118 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
// Get angle on the UI
float leftAngle = (newMouseX / (float)widgetWidth) * horizontalAngle - halfHorizontalAngle;
float rightAngle = ((newMouseX + newWidth) / (float)widgetWidth) * horizontalAngle - halfHorizontalAngle;
float halfMagnifyHeight = magnifyHeight / 2.0f;
float leftX, rightX, leftZ, rightZ;
float bottomAngle = (newMouseY / (float)widgetHeight) * _oculusAngle - halfVerticalAngle;
float topAngle = ((newMouseY - newHeight) / (float)widgetHeight) * _oculusAngle - halfVerticalAngle;
float leftX, rightX, leftZ, rightZ, topZ, bottomZ;
// Get position on hemisphere using angle
leftX = sin(leftAngle) * _distance;
rightX = sin(rightAngle) * _distance;
leftZ = -cos(leftAngle) * _distance;
rightZ = -cos(rightAngle) * _distance;
float bottomY = (1.0 - newMouseY / (float)widgetHeight) * halfOverlayHeight * 2.0f - halfOverlayHeight;
float topY = bottomY + (newHeight / widgetHeight) * halfOverlayHeight * 2;
if (_uiType == HEMISPHERE) {
//TODO: Remove immediate mode in favor of VBO
glBegin(GL_QUADS);
//Get new UV coordinates from our magnification window
float newULeft = newMouseX / widgetWidth;
float newURight = (newMouseX + newWidth) / widgetWidth;
float newVBottom = 1.0 - newMouseY / widgetHeight;
float newVTop = 1.0 - (newMouseY - newHeight) / widgetHeight;
glTexCoord2f(magnifyULeft, magnifyVBottom); glVertex3f(leftX, topY, leftZ);
glTexCoord2f(magnifyURight, magnifyVBottom); glVertex3f(rightX, topY, rightZ);
glTexCoord2f(magnifyURight, magnifyVTop); glVertex3f(rightX, bottomY, rightZ);
glTexCoord2f(magnifyULeft, magnifyVTop); glVertex3f(leftX, bottomY, leftZ);
// Project our position onto the hemisphere using the UV coordinates
float lX = sin((newULeft - 0.5f) * textureFov);
float rX = sin((newURight - 0.5f) * textureFov);
float bY = sin((newVBottom - 0.5f) * textureFov);
float tY = sin((newVTop - 0.5f) * textureFov);
float dist;
//Bottom Left
dist = sqrt(lX * lX + bY * bY);
float blZ = sqrt(1.0f - dist * dist);
//Top Left
dist = sqrt(lX * lX + tY * tY);
float tlZ = sqrt(1.0f - dist * dist);
//Bottom Right
dist = sqrt(rX * rX + bY * bY);
float brZ = sqrt(1.0f - dist * dist);
//Top Right
dist = sqrt(rX * rX + tY * tY);
float trZ = sqrt(1.0f - dist * dist);
glEnd();
glBegin(GL_QUADS);
glTexCoord2f(magnifyULeft, magnifyVBottom); glVertex3f(lX, tY, -tlZ);
glTexCoord2f(magnifyURight, magnifyVBottom); glVertex3f(rX, tY, -trZ);
glTexCoord2f(magnifyURight, magnifyVTop); glVertex3f(rX, bY, -brZ);
glTexCoord2f(magnifyULeft, magnifyVTop); glVertex3f(lX, bY, -blZ);
glEnd();
} else {
leftX = sin(leftAngle) * _distance;
rightX = sin(rightAngle) * _distance;
leftZ = -cos(leftAngle) * _distance;
rightZ = -cos(rightAngle) * _distance;
if (_uiType == CURVED_SEMICIRCLE) {
topZ = -cos(topAngle * overlayAspectRatio) * _distance;
bottomZ = -cos(bottomAngle * overlayAspectRatio) * _distance;
} else {
// Dont want to use topZ or bottomZ for SEMICIRCLE
topZ = -99999;
bottomZ = -99999;
}
float bottomY = (1.0 - newMouseY / (float)widgetHeight) * halfOverlayHeight * 2.0f - halfOverlayHeight;
float topY = bottomY + (newHeight / widgetHeight) * halfOverlayHeight * 2;
//TODO: Remove immediate mode in favor of VBO
glBegin(GL_QUADS);
glTexCoord2f(magnifyULeft, magnifyVBottom); glVertex3f(leftX, topY, max(topZ, leftZ));
glTexCoord2f(magnifyURight, magnifyVBottom); glVertex3f(rightX, topY, max(topZ, rightZ));
glTexCoord2f(magnifyURight, magnifyVTop); glVertex3f(rightX, bottomY, max(bottomZ, rightZ));
glTexCoord2f(magnifyULeft, magnifyVTop); glVertex3f(leftX, bottomY, max(bottomZ, leftZ));
glEnd();
}
glDepthMask(GL_FALSE);
glDisable(GL_ALPHA_TEST);
//TODO: Remove immediate mode in favor of VBO
glBegin(GL_QUADS);
// Place the vertices in a semicircle curve around the camera
for (int i = 0; i < numHorizontalVertices-1; i++) {
if (_uiType == HEMISPHERE) {
renderTexturedHemisphere();
} else{
glBegin(GL_QUADS);
// Place the vertices in a semicircle curve around the camera
for (int i = 0; i < numHorizontalVertices - 1; i++) {
for (int j = 0; j < numVerticalVertices - 1; j++) {
// Calculate the X and Z coordinates from the angles and radius from camera
leftX = sin(angleIncrement * i - halfHorizontalAngle) * _distance;
rightX = sin(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
leftZ = -cos(angleIncrement * i - halfHorizontalAngle) * _distance;
rightZ = -cos(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
// Calculate the X and Z coordinates from the angles and radius from camera
leftX = sin(angleIncrement * i - halfHorizontalAngle) * _distance;
rightX = sin(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
leftZ = -cos(angleIncrement * i - halfHorizontalAngle) * _distance;
rightZ = -cos(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
if (_uiType == 2) {
topZ = -cos((verticalAngleIncrement * (j + 1) - halfVerticalAngle) * overlayAspectRatio) * _distance;
bottomZ = -cos((verticalAngleIncrement * j - halfVerticalAngle) * overlayAspectRatio) * _distance;
} else {
topZ = -99999;
bottomZ = -99999;
}
glTexCoord2f(quadTexWidth * i, 1); glVertex3f(leftX, halfOverlayHeight, leftZ);
glTexCoord2f(quadTexWidth * (i + 1), 1); glVertex3f(rightX, halfOverlayHeight, rightZ);
glTexCoord2f(quadTexWidth * (i + 1), 0); glVertex3f(rightX, -halfOverlayHeight, rightZ);
glTexCoord2f(quadTexWidth * i, 0); glVertex3f(leftX, -halfOverlayHeight, leftZ);
glTexCoord2f(quadTexWidth * i, (j + 1) * quadTexHeight);
glVertex3f(leftX, (j + 1) * quadTexHeight * overlayHeight - halfOverlayHeight, max(topZ, leftZ));
glTexCoord2f(quadTexWidth * (i + 1), (j + 1) * quadTexHeight);
glVertex3f(rightX, (j + 1) * quadTexHeight * overlayHeight - halfOverlayHeight, max(topZ, rightZ));
glTexCoord2f(quadTexWidth * (i + 1), j * quadTexHeight);
glVertex3f(rightX, j * quadTexHeight * overlayHeight - halfOverlayHeight, max(bottomZ, rightZ));
glTexCoord2f(quadTexWidth * i, j * quadTexHeight);
glVertex3f(leftX, j * quadTexHeight * overlayHeight - halfOverlayHeight, max(bottomZ, leftZ));
}
}
glEnd();
}
glEnd();
glPopMatrix();
glDepthMask(GL_TRUE);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
@ -466,13 +554,106 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
}
void ApplicationOverlay::renderTexturedHemisphere() {
const int slices = 80;
const int stacks = 80;
static VerticesIndices vbo(0, 0);
int vertices = slices * (stacks - 1) + 1;
int indices = slices * 2 * 3 * (stacks - 2) + slices * 3;
if (vbo.first == 0) {
TextureVertex* vertexData = new TextureVertex[vertices];
TextureVertex* vertex = vertexData;
for (int i = 0; i < stacks - 1; i++) {
float phi = PI_OVER_TWO * (float)i / (float)(stacks - 1);
float z = -sinf(phi), radius = cosf(phi);
for (int j = 0; j < slices; j++) {
float theta = TWO_PI * (float)j / (float)slices;
vertex->position.x = sinf(theta) * radius;
vertex->position.y = cosf(theta) * radius;
vertex->position.z = z;
vertex->uv.x = asin(vertex->position.x) / (textureFov) + 0.5f;
vertex->uv.y = asin(vertex->position.y) / (textureFov) + 0.5f;
vertex++;
}
}
vertex->position.x = 0.0f;
vertex->position.y = 0.0f;
vertex->position.z = -1.0f;
vertex->uv.x = 0.5f;
vertex->uv.y = 0.5f;
vertex++;
glGenBuffers(1, &vbo.first);
glBindBuffer(GL_ARRAY_BUFFER, vbo.first);
const int BYTES_PER_VERTEX = sizeof(TextureVertex);
glBufferData(GL_ARRAY_BUFFER, vertices * BYTES_PER_VERTEX, vertexData, GL_STATIC_DRAW);
delete[] vertexData;
GLushort* indexData = new GLushort[indices];
GLushort* index = indexData;
for (int i = 0; i < stacks - 2; i++) {
GLushort bottom = i * slices;
GLushort top = bottom + slices;
for (int j = 0; j < slices; j++) {
int next = (j + 1) % slices;
*(index++) = bottom + j;
*(index++) = top + next;
*(index++) = top + j;
*(index++) = bottom + j;
*(index++) = bottom + next;
*(index++) = top + next;
}
}
GLushort bottom = (stacks - 2) * slices;
GLushort top = bottom + slices;
for (int i = 0; i < slices; i++) {
*(index++) = bottom + i;
*(index++) = bottom + (i + 1) % slices;
*(index++) = top;
}
glGenBuffers(1, &vbo.second);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo.second);
const int BYTES_PER_INDEX = sizeof(GLushort);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices * BYTES_PER_INDEX, indexData, GL_STATIC_DRAW);
delete[] indexData;
} else {
glBindBuffer(GL_ARRAY_BUFFER, vbo.first);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo.second);
}
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(TextureVertex), (void*)0);
glTexCoordPointer(2, GL_FLOAT, sizeof(TextureVertex), (void*)12);
glDrawRangeElements(GL_TRIANGLES, 0, vertices - 1, indices, GL_UNSIGNED_SHORT, 0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
QOpenGLFramebufferObject* ApplicationOverlay::getFramebufferObject() {
if (!_framebufferObject) {
_framebufferObject = new QOpenGLFramebufferObject(Application::getInstance()->getGLWidget()->size());
glBindTexture(GL_TEXTURE_2D, _framebufferObject->texture());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
GLfloat borderColor[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
glBindTexture(GL_TEXTURE_2D, 0);
}
return _framebufferObject;

View file

@ -15,16 +15,19 @@
class Overlays;
class QOpenGLFramebufferObject;
// Handles the drawing of the overlays to the scree
// Handles the drawing of the overlays to the screen
class ApplicationOverlay {
public:
enum UIType { HEMISPHERE, SEMICIRCLE, CURVED_SEMICIRCLE };
ApplicationOverlay();
~ApplicationOverlay();
void renderOverlay(bool renderToTexture = false);
void displayOverlayTexture(Camera& whichCamera);
void displayOverlayTextureOculus(Camera& whichCamera);
void computeOculusPickRay(float x, float y, glm::vec3& direction) const;
// Getters
QOpenGLFramebufferObject* getFramebufferObject();
@ -32,14 +35,24 @@ public:
// Setters
void setOculusAngle(float oculusAngle) { _oculusAngle = oculusAngle; }
void setUIType(UIType uiType) { _uiType = uiType; }
private:
// Interleaved vertex data
struct TextureVertex {
glm::vec3 position;
glm::vec2 uv;
};
typedef QPair<GLuint, GLuint> VerticesIndices;
void renderTexturedHemisphere();
ProgramObject _textureProgram;
QOpenGLFramebufferObject* _framebufferObject;
float _trailingAudioLoudness;
float _oculusAngle;
float _distance;
UIType _uiType;
};
#endif // hifi_ApplicationOverlay_h

View file

@ -20,14 +20,15 @@
#include "PositionalAudioRingBuffer.h"
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type) :
AudioRingBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo) :
AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
_type(type),
_position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
_willBeAddedToMix(false),
_shouldLoopbackForNode(false),
_shouldOutputStarveDebug(true)
_shouldOutputStarveDebug(true),
_isStereo(isStereo)
{
}
@ -40,6 +41,9 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
// skip the packet header (includes the source UUID)
int readBytes = numBytesForPacketHeader(packet);
// hop over the channel flag that has already been read in AudioMixerClientData
readBytes += sizeof(quint8);
// read the positional data
readBytes += parsePositionalData(packet.mid(readBytes));
if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) {

View file

@ -24,7 +24,7 @@ public:
Injector
};
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type);
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false);
~PositionalAudioRingBuffer();
int parseData(const QByteArray& packet);
@ -41,6 +41,8 @@ public:
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; }
PositionalAudioRingBuffer::Type getType() const { return _type; }
const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; }
@ -56,6 +58,7 @@ protected:
bool _willBeAddedToMix;
bool _shouldLoopbackForNode;
bool _shouldOutputStarveDebug;
bool _isStereo;
float _nextOutputTrailingLoudness;
};

View file

@ -894,14 +894,15 @@ FBXBlendshape extractBlendshape(const FBXNode& object) {
}
void setTangents(FBXMesh& mesh, int firstIndex, int secondIndex) {
glm::vec3 normal = glm::normalize(mesh.normals.at(firstIndex));
const glm::vec3& normal = mesh.normals.at(firstIndex);
glm::vec3 bitangent = glm::cross(normal, mesh.vertices.at(secondIndex) - mesh.vertices.at(firstIndex));
if (glm::length(bitangent) < EPSILON) {
return;
}
glm::vec2 texCoordDelta = mesh.texCoords.at(secondIndex) - mesh.texCoords.at(firstIndex);
mesh.tangents[firstIndex] += glm::cross(glm::angleAxis(
- atan2f(-texCoordDelta.t, texCoordDelta.s), normal) * glm::normalize(bitangent), normal);
glm::vec3 normalizedNormal = glm::normalize(normal);
mesh.tangents[firstIndex] += glm::cross(glm::angleAxis(-atan2f(-texCoordDelta.t, texCoordDelta.s), normalizedNormal) *
glm::normalize(bitangent), normalizedNormal);
}
QVector<int> getIndices(const QVector<QString> ids, QVector<QString> modelIDs) {

View file

@ -47,11 +47,19 @@ IDStreamer::IDStreamer(Bitstream& stream) :
_bits(1) {
}
void IDStreamer::setBitsFromValue(int value) {
_bits = 1;
while (value >= (1 << _bits) - 1) {
_bits++;
static int getBitsForHighestValue(int highestValue) {
// if this turns out to be a bottleneck, there are fancier ways to do it (get the position of the highest set bit):
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogObvious
int bits = 0;
while (highestValue != 0) {
bits++;
highestValue >>= 1;
}
return bits;
}
void IDStreamer::setBitsFromValue(int value) {
_bits = getBitsForHighestValue(value + 1);
}
IDStreamer& IDStreamer::operator<<(int value) {
@ -120,6 +128,14 @@ void Bitstream::addTypeSubstitution(const QByteArray& typeName, int type) {
_typeStreamerSubstitutions.insert(typeName, getTypeStreamers().value(type));
}
void Bitstream::addTypeSubstitution(const QByteArray& typeName, const char* replacementTypeName) {
const TypeStreamer* streamer = getTypeStreamers().value(QMetaType::type(replacementTypeName));
if (!streamer) {
streamer = getEnumStreamersByName().value(replacementTypeName);
}
_typeStreamerSubstitutions.insert(typeName, streamer);
}
const int LAST_BIT_POSITION = BITS_IN_BYTE - 1;
Bitstream& Bitstream::write(const void* data, int bits, int offset) {
@ -193,8 +209,9 @@ void Bitstream::persistWriteMappings(const WriteMappings& mappings) {
continue;
}
connect(it.key().data(), SIGNAL(destroyed(QObject*)), SLOT(clearSharedObject(QObject*)));
QPointer<SharedObject>& reference = _sharedObjectReferences[it.key()->getID()];
if (reference) {
QPointer<SharedObject>& reference = _sharedObjectReferences[it.key()->getOriginID()];
if (reference && reference != it.key()) {
// the object has been replaced by a successor, so we can forget about the original
_sharedObjectStreamer.removePersistentID(reference);
reference->disconnect(this);
}
@ -227,8 +244,9 @@ void Bitstream::persistReadMappings(const ReadMappings& mappings) {
if (!it.value()) {
continue;
}
QPointer<SharedObject>& reference = _sharedObjectReferences[it.value()->getRemoteID()];
if (reference) {
QPointer<SharedObject>& reference = _sharedObjectReferences[it.value()->getRemoteOriginID()];
if (reference && reference != it.value()) {
// the object has been replaced by a successor, so we can forget about the original
_sharedObjectStreamer.removePersistentValue(reference.data());
}
reference = it.value();
@ -280,16 +298,8 @@ void Bitstream::writeRawDelta(const QObject* value, const QObject* reference) {
}
const QMetaObject* metaObject = value->metaObject();
_metaObjectStreamer << metaObject;
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored(value)) {
continue;
}
const TypeStreamer* streamer = getTypeStreamers().value(property.userType());
if (streamer) {
streamer->writeDelta(*this, property.read(value), reference && metaObject == reference->metaObject() ?
property.read(reference) : QVariant());
}
foreach (const PropertyWriter& propertyWriter, getPropertyWriters().value(metaObject)) {
propertyWriter.writeDelta(*this, value, reference);
}
}
@ -411,6 +421,10 @@ Bitstream& Bitstream::operator>>(QUrl& url) {
}
Bitstream& Bitstream::operator<<(const QVariant& value) {
if (!value.isValid()) {
_typeStreamerStreamer << NULL;
return *this;
}
const TypeStreamer* streamer = getTypeStreamers().value(value.userType());
if (streamer) {
_typeStreamerStreamer << streamer;
@ -424,7 +438,11 @@ Bitstream& Bitstream::operator<<(const QVariant& value) {
Bitstream& Bitstream::operator>>(QVariant& value) {
TypeReader reader;
_typeStreamerStreamer >> reader;
value = reader.read(*this);
if (reader.getTypeName().isEmpty()) {
value = QVariant();
} else {
value = reader.read(*this);
}
return *this;
}
@ -458,15 +476,8 @@ Bitstream& Bitstream::operator<<(const QObject* object) {
}
const QMetaObject* metaObject = object->metaObject();
_metaObjectStreamer << metaObject;
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored(object)) {
continue;
}
const TypeStreamer* streamer = getTypeStreamers().value(property.userType());
if (streamer) {
streamer->write(*this, property.read(object));
}
foreach (const PropertyWriter& propertyWriter, getPropertyWriters().value(metaObject)) {
propertyWriter.write(*this, object);
}
return *this;
}
@ -550,25 +561,12 @@ Bitstream& Bitstream::operator<(const QMetaObject* metaObject) {
if (_metadataType == NO_METADATA) {
return *this;
}
int storedPropertyCount = 0;
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (property.isStored() && getTypeStreamers().contains(property.userType())) {
storedPropertyCount++;
}
}
*this << storedPropertyCount;
const QVector<PropertyWriter>& propertyWriters = getPropertyWriters().value(metaObject);
*this << propertyWriters.size();
QCryptographicHash hash(QCryptographicHash::Md5);
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
}
const TypeStreamer* typeStreamer = getTypeStreamers().value(property.userType());
if (!typeStreamer) {
continue;
}
_typeStreamerStreamer << typeStreamer;
foreach (const PropertyWriter& propertyWriter, propertyWriters) {
_typeStreamerStreamer << propertyWriter.getStreamer();
const QMetaProperty& property = propertyWriter.getProperty();
if (_metadataType == FULL_METADATA) {
*this << QByteArray::fromRawData(property.name(), strlen(property.name()));
} else {
@ -597,7 +595,7 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
qWarning() << "Unknown class name: " << className << "\n";
}
if (_metadataType == NO_METADATA) {
objectReader = ObjectReader(className, metaObject, getPropertyReaders(metaObject));
objectReader = ObjectReader(className, metaObject, getPropertyReaders().value(metaObject));
return *this;
}
int storedPropertyCount;
@ -621,25 +619,18 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
QCryptographicHash hash(QCryptographicHash::Md5);
bool matches = true;
if (metaObject) {
int propertyIndex = 0;
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
const QVector<PropertyWriter>& propertyWriters = getPropertyWriters().value(metaObject);
if (propertyWriters.size() == properties.size()) {
for (int i = 0; i < propertyWriters.size(); i++) {
const PropertyWriter& propertyWriter = propertyWriters.at(i);
if (!properties.at(i).getReader().matchesExactly(propertyWriter.getStreamer())) {
matches = false;
break;
}
const QMetaProperty& property = propertyWriter.getProperty();
hash.addData(property.name(), strlen(property.name()) + 1);
}
const TypeStreamer* typeStreamer = getTypeStreamers().value(property.userType());
if (!typeStreamer) {
continue;
}
if (propertyIndex >= properties.size() ||
!properties.at(propertyIndex).getReader().matchesExactly(typeStreamer)) {
matches = false;
break;
}
hash.addData(property.name(), strlen(property.name()) + 1);
propertyIndex++;
}
if (propertyIndex != properties.size()) {
} else {
matches = false;
}
}
@ -647,7 +638,7 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
QByteArray remoteHashResult(localHashResult.size(), 0);
read(remoteHashResult.data(), remoteHashResult.size() * BITS_IN_BYTE);
if (metaObject && matches && localHashResult == remoteHashResult) {
objectReader = ObjectReader(className, metaObject, getPropertyReaders(metaObject));
objectReader = ObjectReader(className, metaObject, getPropertyReaders().value(metaObject));
return *this;
}
}
@ -656,7 +647,11 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
}
Bitstream& Bitstream::operator<(const TypeStreamer* streamer) {
const char* typeName = QMetaType::typeName(streamer->getType());
if (!streamer) {
*this << QByteArray();
return *this;
}
const char* typeName = streamer->getName();
*this << QByteArray::fromRawData(typeName, strlen(typeName));
if (_metadataType == NO_METADATA) {
return *this;
@ -667,6 +662,27 @@ Bitstream& Bitstream::operator<(const TypeStreamer* streamer) {
case TypeReader::SIMPLE_TYPE:
return *this;
case TypeReader::ENUM_TYPE: {
QMetaEnum metaEnum = streamer->getMetaEnum();
if (_metadataType == FULL_METADATA) {
*this << metaEnum.keyCount();
for (int i = 0; i < metaEnum.keyCount(); i++) {
*this << QByteArray::fromRawData(metaEnum.key(i), strlen(metaEnum.key(i)));
*this << metaEnum.value(i);
}
} else {
*this << streamer->getBits();
QCryptographicHash hash(QCryptographicHash::Md5);
for (int i = 0; i < metaEnum.keyCount(); i++) {
hash.addData(metaEnum.key(i), strlen(metaEnum.key(i)) + 1);
qint32 value = metaEnum.value(i);
hash.addData((const char*)&value, sizeof(qint32));
}
QByteArray hashResult = hash.result();
write(hashResult.constData(), hashResult.size() * BITS_IN_BYTE);
}
return *this;
}
case TypeReader::LIST_TYPE:
case TypeReader::SET_TYPE:
return *this << streamer->getValueStreamer();
@ -702,9 +718,16 @@ Bitstream& Bitstream::operator<(const TypeStreamer* streamer) {
Bitstream& Bitstream::operator>(TypeReader& reader) {
QByteArray typeName;
*this >> typeName;
if (typeName.isEmpty()) {
reader = TypeReader();
return *this;
}
const TypeStreamer* streamer = _typeStreamerSubstitutions.value(typeName);
if (!streamer) {
streamer = getTypeStreamers().value(QMetaType::type(typeName.constData()));
if (!streamer) {
streamer = getEnumStreamersByName().value(typeName);
}
}
if (!streamer) {
qWarning() << "Unknown type name: " << typeName << "\n";
@ -719,7 +742,55 @@ Bitstream& Bitstream::operator>(TypeReader& reader) {
case TypeReader::SIMPLE_TYPE:
reader = TypeReader(typeName, streamer);
return *this;
case TypeReader::ENUM_TYPE: {
if (_metadataType == FULL_METADATA) {
int keyCount;
*this >> keyCount;
QMetaEnum metaEnum = (streamer && streamer->getReaderType() == TypeReader::ENUM_TYPE) ?
streamer->getMetaEnum() : QMetaEnum();
QHash<int, int> mappings;
bool matches = (keyCount == metaEnum.keyCount());
int highestValue = 0;
for (int i = 0; i < keyCount; i++) {
QByteArray key;
int value;
*this >> key >> value;
highestValue = qMax(value, highestValue);
int localValue = metaEnum.keyToValue(key);
if (localValue != -1) {
mappings.insert(value, localValue);
}
matches &= (value == localValue);
}
if (matches) {
reader = TypeReader(typeName, streamer);
} else {
reader = TypeReader(typeName, streamer, getBitsForHighestValue(highestValue), mappings);
}
} else {
int bits;
*this >> bits;
QCryptographicHash hash(QCryptographicHash::Md5);
if (streamer && streamer->getReaderType() == TypeReader::ENUM_TYPE) {
QMetaEnum metaEnum = streamer->getMetaEnum();
for (int i = 0; i < metaEnum.keyCount(); i++) {
hash.addData(metaEnum.key(i), strlen(metaEnum.key(i)) + 1);
qint32 value = metaEnum.value(i);
hash.addData((const char*)&value, sizeof(qint32));
}
}
QByteArray localHashResult = hash.result();
QByteArray remoteHashResult(localHashResult.size(), 0);
read(remoteHashResult.data(), remoteHashResult.size() * BITS_IN_BYTE);
if (localHashResult == remoteHashResult) {
reader = TypeReader(typeName, streamer);
} else {
reader = TypeReader(typeName, streamer, bits, QHash<int, int>());
}
}
return *this;
}
case TypeReader::LIST_TYPE:
case TypeReader::SET_TYPE: {
TypeReader valueReader;
@ -728,7 +799,7 @@ Bitstream& Bitstream::operator>(TypeReader& reader) {
valueReader.matchesExactly(streamer->getValueStreamer())) {
reader = TypeReader(typeName, streamer);
} else {
reader = TypeReader(typeName, streamer, false, (TypeReader::Type)type, TypeReaderPointer(),
reader = TypeReader(typeName, streamer, (TypeReader::Type)type,
TypeReaderPointer(new TypeReader(valueReader)));
}
return *this;
@ -741,8 +812,8 @@ Bitstream& Bitstream::operator>(TypeReader& reader) {
valueReader.matchesExactly(streamer->getValueStreamer())) {
reader = TypeReader(typeName, streamer);
} else {
reader = TypeReader(typeName, streamer, false, TypeReader::MAP_TYPE,
TypeReaderPointer(new TypeReader(keyReader)), TypeReaderPointer(new TypeReader(valueReader)));
reader = TypeReader(typeName, streamer, TypeReaderPointer(new TypeReader(keyReader)),
TypeReaderPointer(new TypeReader(valueReader)));
}
return *this;
}
@ -800,23 +871,20 @@ Bitstream& Bitstream::operator>(TypeReader& reader) {
// if all fields are the same type and in the right order, we can use the (more efficient) default streamer
const QVector<MetaField>& localFields = streamer->getMetaFields();
if (fieldCount != localFields.size()) {
reader = TypeReader(typeName, streamer, false, TypeReader::STREAMABLE_TYPE,
TypeReaderPointer(), TypeReaderPointer(), fields);
reader = TypeReader(typeName, streamer, fields);
return *this;
}
for (int i = 0; i < fieldCount; i++) {
const FieldReader& fieldReader = fields.at(i);
if (!fieldReader.getReader().matchesExactly(localFields.at(i).getStreamer()) || fieldReader.getIndex() != i) {
reader = TypeReader(typeName, streamer, false, TypeReader::STREAMABLE_TYPE,
TypeReaderPointer(), TypeReaderPointer(), fields);
reader = TypeReader(typeName, streamer, fields);
return *this;
}
}
reader = TypeReader(typeName, streamer);
return *this;
}
reader = TypeReader(typeName, streamer, false, TypeReader::STREAMABLE_TYPE,
TypeReaderPointer(), TypeReaderPointer(), fields);
reader = TypeReader(typeName, streamer, fields);
return *this;
}
@ -847,9 +915,10 @@ Bitstream& Bitstream::operator<(const SharedObjectPointer& object) {
return *this << (int)0;
}
*this << object->getID();
QPointer<SharedObject> reference = _sharedObjectReferences.value(object->getID());
*this << object->getOriginID();
QPointer<SharedObject> reference = _sharedObjectReferences.value(object->getOriginID());
if (reference) {
writeRawDelta((QObject*)object.data(), (QObject*)reference.data());
writeRawDelta((const QObject*)object.data(), (const QObject*)reference.data());
} else {
*this << (QObject*)object.data();
}
@ -863,7 +932,9 @@ Bitstream& Bitstream::operator>(SharedObjectPointer& object) {
object = SharedObjectPointer();
return *this;
}
QPointer<SharedObject> reference = _sharedObjectReferences.value(id);
int originID;
*this >> originID;
QPointer<SharedObject> reference = _sharedObjectReferences.value(originID);
QPointer<SharedObject>& pointer = _weakSharedObjectHash[id];
if (pointer) {
ObjectReader objectReader;
@ -876,15 +947,19 @@ Bitstream& Bitstream::operator>(SharedObjectPointer& object) {
} else {
QObject* rawObject;
if (reference) {
readRawDelta(rawObject, (QObject*)reference.data());
readRawDelta(rawObject, (const QObject*)reference.data());
} else {
*this >> rawObject;
}
pointer = static_cast<SharedObject*>(rawObject);
if (pointer) {
if (reference) {
pointer->setOriginID(reference->getOriginID());
}
pointer->setRemoteID(id);
pointer->setRemoteOriginID(originID);
} else {
qDebug() << "Null object" << pointer << reference;
qDebug() << "Null object" << pointer << reference << id;
}
}
object = static_cast<SharedObject*>(pointer.data());
@ -893,7 +968,7 @@ Bitstream& Bitstream::operator>(SharedObjectPointer& object) {
void Bitstream::clearSharedObject(QObject* object) {
SharedObject* sharedObject = static_cast<SharedObject*>(object);
_sharedObjectReferences.remove(sharedObject->getID());
_sharedObjectReferences.remove(sharedObject->getOriginID());
int id = _sharedObjectStreamer.takePersistentID(sharedObject);
if (id != 0) {
emit sharedObjectCleared(id);
@ -915,41 +990,156 @@ QHash<int, const TypeStreamer*>& Bitstream::getTypeStreamers() {
return typeStreamers;
}
QVector<PropertyReader> Bitstream::getPropertyReaders(const QMetaObject* metaObject) {
QVector<PropertyReader> propertyReaders;
if (!metaObject) {
return propertyReaders;
}
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
const QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*>& Bitstream::getEnumStreamers() {
static QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> enumStreamers = createEnumStreamers();
return enumStreamers;
}
QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> Bitstream::createEnumStreamers() {
QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> enumStreamers;
foreach (const QMetaObject* metaObject, getMetaObjects()) {
for (int i = 0; i < metaObject->enumeratorCount(); i++) {
QMetaEnum metaEnum = metaObject->enumerator(i);
const TypeStreamer*& streamer = enumStreamers[QPair<QByteArray, QByteArray>(metaEnum.scope(), metaEnum.name())];
if (!streamer) {
streamer = new EnumTypeStreamer(metaEnum);
}
}
const TypeStreamer* typeStreamer = getTypeStreamers().value(property.userType());
if (typeStreamer) {
propertyReaders.append(PropertyReader(TypeReader(QByteArray(), typeStreamer), property));
}
return enumStreamers;
}
const QHash<QByteArray, const TypeStreamer*>& Bitstream::getEnumStreamersByName() {
static QHash<QByteArray, const TypeStreamer*> enumStreamersByName = createEnumStreamersByName();
return enumStreamersByName;
}
QHash<QByteArray, const TypeStreamer*> Bitstream::createEnumStreamersByName() {
QHash<QByteArray, const TypeStreamer*> enumStreamersByName;
foreach (const TypeStreamer* streamer, getEnumStreamers()) {
enumStreamersByName.insert(streamer->getName(), streamer);
}
return enumStreamersByName;
}
const QHash<const QMetaObject*, QVector<PropertyReader> >& Bitstream::getPropertyReaders() {
static QHash<const QMetaObject*, QVector<PropertyReader> > propertyReaders = createPropertyReaders();
return propertyReaders;
}
QHash<const QMetaObject*, QVector<PropertyReader> > Bitstream::createPropertyReaders() {
QHash<const QMetaObject*, QVector<PropertyReader> > propertyReaders;
foreach (const QMetaObject* metaObject, getMetaObjects()) {
QVector<PropertyReader>& readers = propertyReaders[metaObject];
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
}
const TypeStreamer* streamer;
if (property.isEnumType()) {
QMetaEnum metaEnum = property.enumerator();
streamer = getEnumStreamers().value(QPair<QByteArray, QByteArray>(
QByteArray::fromRawData(metaEnum.scope(), strlen(metaEnum.scope())),
QByteArray::fromRawData(metaEnum.name(), strlen(metaEnum.name()))));
} else {
streamer = getTypeStreamers().value(property.userType());
}
if (streamer) {
readers.append(PropertyReader(TypeReader(QByteArray(), streamer), property));
}
}
}
return propertyReaders;
}
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, bool exactMatch, Type type,
const TypeReaderPointer& keyReader, const TypeReaderPointer& valueReader, const QVector<FieldReader>& fields) :
const QHash<const QMetaObject*, QVector<PropertyWriter> >& Bitstream::getPropertyWriters() {
static QHash<const QMetaObject*, QVector<PropertyWriter> > propertyWriters = createPropertyWriters();
return propertyWriters;
}
QHash<const QMetaObject*, QVector<PropertyWriter> > Bitstream::createPropertyWriters() {
QHash<const QMetaObject*, QVector<PropertyWriter> > propertyWriters;
foreach (const QMetaObject* metaObject, getMetaObjects()) {
QVector<PropertyWriter>& writers = propertyWriters[metaObject];
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (!property.isStored()) {
continue;
}
const TypeStreamer* streamer;
if (property.isEnumType()) {
QMetaEnum metaEnum = property.enumerator();
streamer = getEnumStreamers().value(QPair<QByteArray, QByteArray>(
QByteArray::fromRawData(metaEnum.scope(), strlen(metaEnum.scope())),
QByteArray::fromRawData(metaEnum.name(), strlen(metaEnum.name()))));
} else {
streamer = getTypeStreamers().value(property.userType());
}
if (streamer) {
writers.append(PropertyWriter(property, streamer));
}
}
}
return propertyWriters;
}
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer) :
_typeName(typeName),
_streamer(streamer),
_exactMatch(exactMatch),
_type(type),
_keyReader(keyReader),
_valueReader(valueReader),
_exactMatch(true) {
}
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, int bits, const QHash<int, int>& mappings) :
_typeName(typeName),
_streamer(streamer),
_exactMatch(false),
_type(ENUM_TYPE),
_bits(bits),
_mappings(mappings) {
}
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, const QVector<FieldReader>& fields) :
_typeName(typeName),
_streamer(streamer),
_exactMatch(false),
_type(STREAMABLE_TYPE),
_fields(fields) {
}
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer,
Type type, const TypeReaderPointer& valueReader) :
_typeName(typeName),
_streamer(streamer),
_exactMatch(false),
_type(type),
_valueReader(valueReader) {
}
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer,
const TypeReaderPointer& keyReader, const TypeReaderPointer& valueReader) :
_typeName(typeName),
_streamer(streamer),
_exactMatch(false),
_type(MAP_TYPE),
_keyReader(keyReader),
_valueReader(valueReader) {
}
QVariant TypeReader::read(Bitstream& in) const {
if (_exactMatch) {
return _streamer->read(in);
}
QVariant object = _streamer ? QVariant(_streamer->getType(), 0) : QVariant();
switch (_type) {
case ENUM_TYPE: {
int value = 0;
in.read(&value, _bits);
if (_streamer) {
_streamer->setEnumValue(object, value, _mappings);
}
break;
}
case STREAMABLE_TYPE: {
foreach (const FieldReader& field, _fields) {
field.read(in, _streamer, object);
@ -1006,6 +1196,14 @@ void TypeReader::readRawDelta(Bitstream& in, QVariant& object, const QVariant& r
return;
}
switch (_type) {
case ENUM_TYPE: {
int value = 0;
in.read(&value, _bits);
if (_streamer) {
_streamer->setEnumValue(object, value, _mappings);
}
break;
}
case STREAMABLE_TYPE: {
foreach (const FieldReader& field, _fields) {
field.readDelta(in, _streamer, object, reference);
@ -1099,6 +1297,10 @@ uint qHash(const TypeReader& typeReader, uint seed) {
return qHash(typeReader.getTypeName(), seed);
}
QDebug& operator<<(QDebug& debug, const TypeReader& typeReader) {
return debug << typeReader.getTypeName();
}
FieldReader::FieldReader(const TypeReader& reader, int index) :
_reader(reader),
_index(index) {
@ -1152,6 +1354,10 @@ uint qHash(const ObjectReader& objectReader, uint seed) {
return qHash(objectReader.getClassName(), seed);
}
QDebug& operator<<(QDebug& debug, const ObjectReader& objectReader) {
return debug << objectReader.getClassName();
}
PropertyReader::PropertyReader(const TypeReader& reader, const QMetaProperty& property) :
_reader(reader),
_property(property) {
@ -1172,6 +1378,20 @@ void PropertyReader::readDelta(Bitstream& in, QObject* object, const QObject* re
}
}
PropertyWriter::PropertyWriter(const QMetaProperty& property, const TypeStreamer* streamer) :
_property(property),
_streamer(streamer) {
}
void PropertyWriter::write(Bitstream& out, const QObject* object) const {
_streamer->write(out, _property.read(object));
}
void PropertyWriter::writeDelta(Bitstream& out, const QObject* object, const QObject* reference) const {
_streamer->writeDelta(out, _property.read(object), reference && object->metaObject() == reference->metaObject() ?
_property.read(reference) : QVariant());
}
MetaField::MetaField(const QByteArray& name, const TypeStreamer* streamer) :
_name(name),
_streamer(streamer) {
@ -1180,6 +1400,14 @@ MetaField::MetaField(const QByteArray& name, const TypeStreamer* streamer) :
TypeStreamer::~TypeStreamer() {
}
const char* TypeStreamer::getName() const {
return QMetaType::typeName(_type);
}
void TypeStreamer::setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const {
// nothing by default
}
const QVector<MetaField>& TypeStreamer::getMetaFields() const {
static QVector<MetaField> emptyMetaFields;
return emptyMetaFields;
@ -1201,6 +1429,14 @@ TypeReader::Type TypeStreamer::getReaderType() const {
return TypeReader::SIMPLE_TYPE;
}
int TypeStreamer::getBits() const {
return 0;
}
QMetaEnum TypeStreamer::getMetaEnum() const {
return QMetaEnum();
}
const TypeStreamer* TypeStreamer::getKeyStreamer() const {
return NULL;
}
@ -1236,3 +1472,119 @@ QVariant TypeStreamer::getValue(const QVariant& object, int index) const {
void TypeStreamer::setValue(QVariant& object, int index, const QVariant& value) const {
// nothing by default
}
QDebug& operator<<(QDebug& debug, const TypeStreamer* typeStreamer) {
return debug << (typeStreamer ? QMetaType::typeName(typeStreamer->getType()) : "null");
}
QDebug& operator<<(QDebug& debug, const QMetaObject* metaObject) {
return debug << (metaObject ? metaObject->className() : "null");
}
EnumTypeStreamer::EnumTypeStreamer(const QMetaObject* metaObject, const char* name) :
_metaObject(metaObject),
_enumName(name),
_name(QByteArray(metaObject->className()) + "::" + name),
_bits(-1) {
setType(QMetaType::Int);
}
EnumTypeStreamer::EnumTypeStreamer(const QMetaEnum& metaEnum) :
_name(QByteArray(metaEnum.scope()) + "::" + metaEnum.name()),
_metaEnum(metaEnum),
_bits(-1) {
setType(QMetaType::Int);
}
const char* EnumTypeStreamer::getName() const {
return _name.constData();
}
TypeReader::Type EnumTypeStreamer::getReaderType() const {
return TypeReader::ENUM_TYPE;
}
int EnumTypeStreamer::getBits() const {
if (_bits == -1) {
int highestValue = 0;
QMetaEnum metaEnum = getMetaEnum();
for (int j = 0; j < metaEnum.keyCount(); j++) {
highestValue = qMax(highestValue, metaEnum.value(j));
}
const_cast<EnumTypeStreamer*>(this)->_bits = getBitsForHighestValue(highestValue);
}
return _bits;
}
QMetaEnum EnumTypeStreamer::getMetaEnum() const {
if (!_metaEnum.isValid()) {
const_cast<EnumTypeStreamer*>(this)->_metaEnum = _metaObject->enumerator(_metaObject->indexOfEnumerator(_enumName));
}
return _metaEnum;
}
bool EnumTypeStreamer::equal(const QVariant& first, const QVariant& second) const {
return first.toInt() == second.toInt();
}
void EnumTypeStreamer::write(Bitstream& out, const QVariant& value) const {
int intValue = value.toInt();
out.write(&intValue, getBits());
}
QVariant EnumTypeStreamer::read(Bitstream& in) const {
int intValue = 0;
in.read(&intValue, getBits());
return intValue;
}
void EnumTypeStreamer::writeDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
int intValue = value.toInt(), intReference = reference.toInt();
if (intValue == intReference) {
out << false;
} else {
out << true;
out.write(&intValue, getBits());
}
}
void EnumTypeStreamer::readDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
bool changed;
in >> changed;
if (changed) {
int intValue = 0;
in.read(&intValue, getBits());
value = intValue;
} else {
value = reference;
}
}
void EnumTypeStreamer::writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
int intValue = value.toInt();
out.write(&intValue, getBits());
}
void EnumTypeStreamer::readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
int intValue = 0;
in.read(&intValue, getBits());
value = intValue;
}
void EnumTypeStreamer::setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const {
if (getMetaEnum().isFlag()) {
int combined = 0;
for (QHash<int, int>::const_iterator it = mappings.constBegin(); it != mappings.constEnd(); it++) {
if (value & it.key()) {
combined |= it.value();
}
}
object = combined;
} else {
object = mappings.value(value);
}
}

View file

@ -38,6 +38,7 @@ class FieldReader;
class ObjectReader;
class OwnedAttributeValue;
class PropertyReader;
class PropertyWriter;
class TypeReader;
class TypeStreamer;
@ -235,6 +236,9 @@ public:
/// Substitutes the supplied type for the given type name's default mapping.
void addTypeSubstitution(const QByteArray& typeName, int type);
/// Substitutes the named type for the given type name's default mapping.
void addTypeSubstitution(const QByteArray& typeName, const char* replacementTypeName);
/// Writes a set of bits to the underlying stream.
/// \param bits the number of bits to write
/// \param offset the offset of the first bit
@ -294,6 +298,9 @@ public:
template<class T> void writeRawDelta(const QList<T>& value, const QList<T>& reference);
template<class T> void readRawDelta(QList<T>& value, const QList<T>& reference);
template<class T> void writeRawDelta(const QVector<T>& value, const QVector<T>& reference);
template<class T> void readRawDelta(QVector<T>& value, const QVector<T>& reference);
template<class T> void writeRawDelta(const QSet<T>& value, const QSet<T>& reference);
template<class T> void readRawDelta(QSet<T>& value, const QSet<T>& reference);
@ -339,6 +346,9 @@ public:
template<class T> Bitstream& operator<<(const QList<T>& list);
template<class T> Bitstream& operator>>(QList<T>& list);
template<class T> Bitstream& operator<<(const QVector<T>& list);
template<class T> Bitstream& operator>>(QVector<T>& list);
template<class T> Bitstream& operator<<(const QSet<T>& set);
template<class T> Bitstream& operator>>(QSet<T>& set);
@ -412,7 +422,14 @@ private:
static QHash<QByteArray, const QMetaObject*>& getMetaObjects();
static QMultiHash<const QMetaObject*, const QMetaObject*>& getMetaObjectSubClasses();
static QHash<int, const TypeStreamer*>& getTypeStreamers();
static QVector<PropertyReader> getPropertyReaders(const QMetaObject* metaObject);
static const QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*>& getEnumStreamers();
static QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> createEnumStreamers();
static const QHash<QByteArray, const TypeStreamer*>& getEnumStreamersByName();
static QHash<QByteArray, const TypeStreamer*> createEnumStreamersByName();
static const QHash<const QMetaObject*, QVector<PropertyReader> >& getPropertyReaders();
static QHash<const QMetaObject*, QVector<PropertyReader> > createPropertyReaders();
static const QHash<const QMetaObject*, QVector<PropertyWriter> >& getPropertyWriters();
static QHash<const QMetaObject*, QVector<PropertyWriter> > createPropertyWriters();
};
template<class T> inline void Bitstream::writeDelta(const T& value, const T& reference) {
@ -472,6 +489,36 @@ template<class T> inline void Bitstream::readRawDelta(QList<T>& value, const QLi
}
}
template<class T> inline void Bitstream::writeRawDelta(const QVector<T>& value, const QVector<T>& reference) {
*this << value.size();
*this << reference.size();
for (int i = 0; i < value.size(); i++) {
if (i < reference.size()) {
writeDelta(value.at(i), reference.at(i));
} else {
*this << value.at(i);
}
}
}
template<class T> inline void Bitstream::readRawDelta(QVector<T>& value, const QVector<T>& reference) {
value = reference;
int size, referenceSize;
*this >> size >> referenceSize;
if (size < value.size()) {
value.erase(value.begin() + size, value.end());
}
for (int i = 0; i < size; i++) {
if (i < referenceSize) {
readDelta(value[i], reference.at(i));
} else {
T element;
*this >> element;
value.append(element);
}
}
}
template<class T> inline void Bitstream::writeRawDelta(const QSet<T>& value, const QSet<T>& reference) {
int addedOrRemoved = 0;
foreach (const T& element, value) {
@ -600,6 +647,27 @@ template<class T> inline Bitstream& Bitstream::operator>>(QList<T>& list) {
return *this;
}
template<class T> inline Bitstream& Bitstream::operator<<(const QVector<T>& vector) {
*this << vector.size();
foreach (const T& entry, vector) {
*this << entry;
}
return *this;
}
template<class T> inline Bitstream& Bitstream::operator>>(QVector<T>& vector) {
int size;
*this >> size;
vector.clear();
vector.reserve(size);
for (int i = 0; i < size; i++) {
T entry;
*this >> entry;
vector.append(entry);
}
return *this;
}
template<class T> inline Bitstream& Bitstream::operator<<(const QSet<T>& set) {
*this << set.size();
foreach (const T& entry, set) {
@ -651,13 +719,20 @@ typedef QSharedPointer<TypeReader> TypeReaderPointer;
class TypeReader {
public:
enum Type { SIMPLE_TYPE, STREAMABLE_TYPE, LIST_TYPE, SET_TYPE, MAP_TYPE };
enum Type { SIMPLE_TYPE, ENUM_TYPE, STREAMABLE_TYPE, LIST_TYPE, SET_TYPE, MAP_TYPE };
TypeReader(const QByteArray& typeName = QByteArray(), const TypeStreamer* streamer = NULL, bool exactMatch = true,
Type type = SIMPLE_TYPE, const TypeReaderPointer& keyReader = TypeReaderPointer(),
const TypeReaderPointer& valueReader = TypeReaderPointer(),
const QVector<FieldReader>& fields = QVector<FieldReader>());
TypeReader(const QByteArray& typeName = QByteArray(), const TypeStreamer* streamer = NULL);
TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, int bits, const QHash<int, int>& mappings);
TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, const QVector<FieldReader>& fields);
TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, Type type,
const TypeReaderPointer& valueReader);
TypeReader(const QByteArray& typeName, const TypeStreamer* streamer,
const TypeReaderPointer& keyReader, const TypeReaderPointer& valueReader);
const QByteArray& getTypeName() const { return _typeName; }
const TypeStreamer* getStreamer() const { return _streamer; }
@ -676,6 +751,8 @@ private:
const TypeStreamer* _streamer;
bool _exactMatch;
Type _type;
int _bits;
QHash<int, int> _mappings;
TypeReaderPointer _keyReader;
TypeReaderPointer _valueReader;
QVector<FieldReader> _fields;
@ -683,6 +760,8 @@ private:
uint qHash(const TypeReader& typeReader, uint seed = 0);
QDebug& operator<<(QDebug& debug, const TypeReader& typeReader);
/// Contains the information required to read a metatype field from the stream and apply it.
class FieldReader {
public:
@ -726,6 +805,8 @@ private:
uint qHash(const ObjectReader& objectReader, uint seed = 0);
QDebug& operator<<(QDebug& debug, const ObjectReader& objectReader);
/// Contains the information required to read an object property from the stream and apply it.
class PropertyReader {
public:
@ -743,6 +824,24 @@ private:
QMetaProperty _property;
};
/// Contains the information required to obtain an object property and write it to the stream.
class PropertyWriter {
public:
PropertyWriter(const QMetaProperty& property = QMetaProperty(), const TypeStreamer* streamer = NULL);
const QMetaProperty& getProperty() const { return _property; }
const TypeStreamer* getStreamer() const { return _streamer; }
void write(Bitstream& out, const QObject* object) const;
void writeDelta(Bitstream& out, const QObject* object, const QObject* reference) const;
private:
QMetaProperty _property;
const TypeStreamer* _streamer;
};
/// Describes a metatype field.
class MetaField {
public:
@ -772,6 +871,8 @@ public:
void setType(int type) { _type = type; }
int getType() const { return _type; }
virtual const char* getName() const;
virtual bool equal(const QVariant& first, const QVariant& second) const = 0;
virtual void write(Bitstream& out, const QVariant& value) const = 0;
@ -783,6 +884,8 @@ public:
virtual void writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const = 0;
virtual void readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const = 0;
virtual void setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const;
virtual const QVector<MetaField>& getMetaFields() const;
virtual int getFieldIndex(const QByteArray& name) const;
virtual void setField(QVariant& object, int index, const QVariant& value) const;
@ -790,6 +893,9 @@ public:
virtual TypeReader::Type getReaderType() const;
virtual int getBits() const;
virtual QMetaEnum getMetaEnum() const;
virtual const TypeStreamer* getKeyStreamer() const;
virtual const TypeStreamer* getValueStreamer() const;
@ -808,6 +914,10 @@ private:
int _type;
};
QDebug& operator<<(QDebug& debug, const TypeStreamer* typeStreamer);
QDebug& operator<<(QDebug& debug, const QMetaObject* metaObject);
/// A streamer that works with Bitstream's operators.
template<class T> class SimpleTypeStreamer : public TypeStreamer {
public:
@ -818,11 +928,40 @@ public:
virtual void writeDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
out.writeDelta(value.value<T>(), reference.value<T>()); }
virtual void readDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
in.readDelta(*static_cast<T*>(value.data()), reference.value<T>()); }
T rawValue; in.readDelta(rawValue, reference.value<T>()); value = QVariant::fromValue(rawValue); }
virtual void writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
out.writeRawDelta(value.value<T>(), reference.value<T>()); }
virtual void readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
in.readRawDelta(*static_cast<T*>(value.data()), reference.value<T>()); }
T rawValue; in.readRawDelta(rawValue, reference.value<T>()); value = QVariant::fromValue(rawValue); }
};
/// A streamer class for enumerated types.
class EnumTypeStreamer : public TypeStreamer {
public:
EnumTypeStreamer(const QMetaObject* metaObject, const char* name);
EnumTypeStreamer(const QMetaEnum& metaEnum);
virtual const char* getName() const;
virtual TypeReader::Type getReaderType() const;
virtual int getBits() const;
virtual QMetaEnum getMetaEnum() const;
virtual bool equal(const QVariant& first, const QVariant& second) const;
virtual void write(Bitstream& out, const QVariant& value) const;
virtual QVariant read(Bitstream& in) const;
virtual void writeDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const;
virtual void readDelta(Bitstream& in, QVariant& value, const QVariant& reference) const;
virtual void writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const;
virtual void readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const;
virtual void setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const;
private:
const QMetaObject* _metaObject;
const char* _enumName;
QByteArray _name;
QMetaEnum _metaEnum;
int _bits;
};
/// A streamer for types compiled by mtc.
@ -858,6 +997,22 @@ public:
static_cast<QList<T>*>(object.data())->replace(index, value.value<T>()); }
};
/// A streamer for vector types.
template<class T> class CollectionTypeStreamer<QVector<T> > : public SimpleTypeStreamer<QVector<T> > {
public:
virtual TypeReader::Type getReaderType() const { return TypeReader::LIST_TYPE; }
virtual const TypeStreamer* getValueStreamer() const { return Bitstream::getTypeStreamer(qMetaTypeId<T>()); }
virtual void insert(QVariant& object, const QVariant& value) const {
static_cast<QVector<T>*>(object.data())->append(value.value<T>()); }
virtual void prune(QVariant& object, int size) const {
QVector<T>* list = static_cast<QVector<T>*>(object.data()); list->erase(list->begin() + size, list->end()); }
virtual QVariant getValue(const QVariant& object, int index) const {
return QVariant::fromValue(static_cast<const QVector<T>*>(object.constData())->at(index)); }
virtual void setValue(QVariant& object, int index, const QVariant& value) const {
static_cast<QVector<T>*>(object.data())->replace(index, value.value<T>()); }
};
/// A streamer for set types.
template<class T> class CollectionTypeStreamer<QSet<T> > : public SimpleTypeStreamer<QSet<T> > {
public:
@ -886,12 +1041,12 @@ public:
};
/// Macro for registering simple type streamers.
#define REGISTER_SIMPLE_TYPE_STREAMER(x) static int x##Streamer = \
Bitstream::registerTypeStreamer(qMetaTypeId<x>(), new SimpleTypeStreamer<x>());
#define REGISTER_SIMPLE_TYPE_STREAMER(X) static int X##Streamer = \
Bitstream::registerTypeStreamer(qMetaTypeId<X>(), new SimpleTypeStreamer<X>());
/// Macro for registering collection type streamers.
#define REGISTER_COLLECTION_TYPE_STREAMER(x) static int x##Streamer = \
Bitstream::registerTypeStreamer(qMetaTypeId<x>(), new CollectionTypeStreamer<x>());
#define REGISTER_COLLECTION_TYPE_STREAMER(X) static int x##Streamer = \
Bitstream::registerTypeStreamer(qMetaTypeId<X>(), new CollectionTypeStreamer<X>());
/// Declares the metatype and the streaming operators. The last lines
/// ensure that the generated file will be included in the link phase.
@ -926,20 +1081,56 @@ public:
_Pragma(STRINGIFY(unused(_TypePtr##X)))
#endif
#define DECLARE_ENUM_METATYPE(S, N) Q_DECLARE_METATYPE(S::N) \
Bitstream& operator<<(Bitstream& out, const S::N& obj); \
Bitstream& operator>>(Bitstream& in, S::N& obj); \
template<> inline void Bitstream::writeRawDelta(const S::N& value, const S::N& reference) { *this << value; } \
template<> inline void Bitstream::readRawDelta(S::N& value, const S::N& reference) { *this >> value; }
#define IMPLEMENT_ENUM_METATYPE(S, N) \
static int S##N##MetaTypeId = registerEnumMetaType<S::N>(&S::staticMetaObject, #N); \
Bitstream& operator<<(Bitstream& out, const S::N& obj) { \
static int bits = Bitstream::getTypeStreamer(qMetaTypeId<S::N>())->getBits(); \
return out.write(&obj, bits); \
} \
Bitstream& operator>>(Bitstream& in, S::N& obj) { \
static int bits = Bitstream::getTypeStreamer(qMetaTypeId<S::N>())->getBits(); \
obj = (S::N)0; \
return in.read(&obj, bits); \
}
/// Registers a simple type and its streamer.
/// \return the metatype id
template<class T> int registerSimpleMetaType() {
int type = qRegisterMetaType<T>();
Bitstream::registerTypeStreamer(type, new SimpleTypeStreamer<T>());
return type;
}
/// Registers an enum type and its streamer.
/// \return the metatype id
template<class T> int registerEnumMetaType(const QMetaObject* metaObject, const char* name) {
int type = qRegisterMetaType<T>();
Bitstream::registerTypeStreamer(type, new EnumTypeStreamer(metaObject, name));
return type;
}
/// Registers a streamable type and its streamer.
/// \return the metatype id
template<class T> int registerStreamableMetaType() {
int type = qRegisterMetaType<T>();
Bitstream::registerTypeStreamer(type, new StreamableTypeStreamer<T>());
return type;
}
/// Registers a collection type and its streamer.
/// \return the metatype id
template<class T> int registerCollectionMetaType() {
int type = qRegisterMetaType<T>();
Bitstream::registerTypeStreamer(type, new CollectionTypeStreamer<T>());
return type;
}
/// Flags a class as streamable (use as you would Q_OBJECT).
#define STREAMABLE public: \
static const int Type; \

View file

@ -19,7 +19,7 @@
#include <QSet>
#include <QVector>
#include "Bitstream.h"
#include "AttributeRegistry.h"
class ReliableChannel;

View file

@ -23,7 +23,9 @@ REGISTER_META_OBJECT(SharedObject)
SharedObject::SharedObject() :
_id(++_lastID),
_remoteID(0) {
_originID(_id),
_remoteID(0),
_remoteOriginID(0) {
_weakHash.insert(_id, this);
}
@ -39,26 +41,33 @@ void SharedObject::decrementReferenceCount() {
}
}
SharedObject* SharedObject::clone(bool withID) const {
SharedObject* SharedObject::clone(bool withID, SharedObject* target) const {
// default behavior is to make a copy using the no-arg constructor and copy the stored properties
const QMetaObject* metaObject = this->metaObject();
SharedObject* newObject = static_cast<SharedObject*>(metaObject->newInstance());
if (!target) {
target = static_cast<SharedObject*>(metaObject->newInstance());
}
for (int i = 0; i < metaObject->propertyCount(); i++) {
QMetaProperty property = metaObject->property(i);
if (property.isStored()) {
property.write(newObject, property.read(this));
if (property.userType() == qMetaTypeId<SharedObjectPointer>()) {
SharedObject* value = property.read(this).value<SharedObjectPointer>().data();
property.write(target, QVariant::fromValue(value ? value->clone(withID) : value));
} else {
property.write(target, property.read(this));
}
}
}
foreach (const QByteArray& propertyName, dynamicPropertyNames()) {
newObject->setProperty(propertyName, property(propertyName));
target->setProperty(propertyName, property(propertyName));
}
if (withID) {
newObject->setID(_id);
target->setOriginID(_originID);
}
return newObject;
return target;
}
bool SharedObject::equals(const SharedObject* other) const {
bool SharedObject::equals(const SharedObject* other, bool sharedAncestry) const {
if (!other) {
return false;
}
@ -67,7 +76,7 @@ bool SharedObject::equals(const SharedObject* other) const {
}
// default behavior is to compare the properties
const QMetaObject* metaObject = this->metaObject();
if (metaObject != other->metaObject()) {
if (metaObject != other->metaObject() && !sharedAncestry) {
return false;
}
for (int i = 0; i < metaObject->propertyCount(); i++) {
@ -92,13 +101,15 @@ void SharedObject::dump(QDebug debug) const {
debug << this;
const QMetaObject* metaObject = this->metaObject();
for (int i = 0; i < metaObject->propertyCount(); i++) {
debug << metaObject->property(i).name() << metaObject->property(i).read(this);
QMetaProperty property = metaObject->property(i);
if (property.isStored()) {
debug << property.name() << property.read(this);
}
}
QList<QByteArray> dynamicPropertyNames = this->dynamicPropertyNames();
foreach (const QByteArray& propertyName, dynamicPropertyNames) {
debug << propertyName << property(propertyName);
}
}
void SharedObject::setID(int id) {
_weakHash.remove(_id);
_weakHash.insert(_id = id, this);
}
int SharedObject::_lastID = 0;

View file

@ -41,31 +41,44 @@ public:
/// Returns the unique local ID for this object.
int getID() const { return _id; }
/// Returns the local origin ID for this object.
int getOriginID() const { return _originID; }
void setOriginID(int originID) { _originID = originID; }
/// Returns the unique remote ID for this object, or zero if this is a local object.
int getRemoteID() const { return _remoteID; }
void setRemoteID(int remoteID) { _remoteID = remoteID; }
/// Returns the remote origin ID for this object, or zero if this is a local object.
int getRemoteOriginID() const { return _remoteOriginID; }
void setRemoteOriginID(int remoteOriginID) { _remoteOriginID = remoteOriginID; }
int getReferenceCount() const { return _referenceCount.load(); }
void incrementReferenceCount();
void decrementReferenceCount();
/// Creates a new clone of this object.
/// \param withID if true, give the clone the same ID as this object
virtual SharedObject* clone(bool withID = false) const;
/// \param withID if true, give the clone the same origin ID as this object
/// \target if non-NULL, a target object to populate (as opposed to creating a new instance of this object's class)
virtual SharedObject* clone(bool withID = false, SharedObject* target = NULL) const;
/// Tests this object for equality with another.
virtual bool equals(const SharedObject* other) const;
/// \param sharedAncestry if true and the classes of the objects differ, compare their shared ancestry (assuming that
/// this is an instance of a superclass of the other object's class) rather than simply returning false.
virtual bool equals(const SharedObject* other, bool sharedAncestry = false) const;
// Dumps the contents of this object to the debug output.
virtual void dump(QDebug debug = QDebug(QtDebugMsg)) const;
private:
void setID(int id);
int _id;
int _originID;
int _remoteID;
int _remoteOriginID;
QAtomicInt _referenceCount;
static int _lastID;

View file

@ -185,7 +185,7 @@ void ModelTree::addModel(const ModelItemID& modelID, const ModelItemProperties&
glm::vec3 position = model.getPosition();
float size = std::max(MINIMUM_MODEL_ELEMENT_SIZE, model.getRadius());
ModelTreeElement* element = (ModelTreeElement*)getOrCreateChildElementAt(position.x, position.y, position.z, size);
ModelTreeElement* element = static_cast<ModelTreeElement*>(getOrCreateChildElementAt(position.x, position.y, position.z, size));
element->storeModel(model);
_isDirty = true;

View file

@ -313,7 +313,7 @@ void AccountManager::requestAccessToken(const QString& login, const QString& pas
QByteArray postData;
postData.append("grant_type=password&");
postData.append("username=" + login + "&");
postData.append("password=" + password + "&");
postData.append("password=" + QUrl::toPercentEncoding(password) + "&");
postData.append("scope=" + ACCOUNT_MANAGER_REQUESTED_SCOPE);
request.setUrl(grantURL);

View file

@ -57,7 +57,8 @@ Node::Node(const QUuid& uuid, NodeType_t type, const HifiSockAddr& publicSocket,
_linkedData(NULL),
_isAlive(true),
_clockSkewUsec(0),
_mutex()
_mutex(),
_clockSkewMovingPercentile(30, 0.8f) // moving 80th percentile of 30 samples
{
}
@ -133,6 +134,11 @@ float Node::getAverageKilobitsPerSecond() {
}
}
void Node::updateClockSkewUsec(int clockSkewSample) {
_clockSkewMovingPercentile.updatePercentile((float)clockSkewSample);
_clockSkewUsec = (int)_clockSkewMovingPercentile.getValueAtPercentile();
}
QDataStream& operator<<(QDataStream& out, const Node& node) {
out << node._type;
out << node._uuid;

View file

@ -23,6 +23,7 @@
#include "HifiSockAddr.h"
#include "NodeData.h"
#include "SimpleMovingAverage.h"
#include "MovingPercentile.h"
typedef quint8 NodeType_t;
@ -94,7 +95,7 @@ public:
void setPingMs(int pingMs) { _pingMs = pingMs; }
int getClockSkewUsec() const { return _clockSkewUsec; }
void setClockSkewUsec(int clockSkew) { _clockSkewUsec = clockSkew; }
void updateClockSkewUsec(int clockSkewSample);
QMutex& getMutex() { return _mutex; }
friend QDataStream& operator<<(QDataStream& out, const Node& node);
@ -120,6 +121,7 @@ private:
int _pingMs;
int _clockSkewUsec;
QMutex _mutex;
MovingPercentile _clockSkewMovingPercentile;
};
QDebug operator<<(QDebug debug, const Node &message);

View file

@ -96,8 +96,8 @@ void NodeList::timePingReply(const QByteArray& packet, const SharedNodePointer&
int clockSkew = othersReplyTime - othersExprectedReply;
sendingNode->setPingMs(pingTime / 1000);
sendingNode->setClockSkewUsec(clockSkew);
sendingNode->updateClockSkewUsec(clockSkew);
const bool wantDebug = false;
if (wantDebug) {

View file

@ -47,6 +47,10 @@ int packArithmeticallyCodedValue(int value, char* destination) {
PacketVersion versionForPacketType(PacketType type) {
switch (type) {
case PacketTypeMicrophoneAudioNoEcho:
case PacketTypeMicrophoneAudioWithEcho:
case PacketTypeSilentAudioFrame:
return 1;
case PacketTypeAvatarData:
return 3;
case PacketTypeAvatarIdentity:
@ -104,7 +108,7 @@ int populatePacketHeader(char* packet, PacketType type, const QUuid& connectionU
position += NUM_BYTES_RFC4122_UUID;
if (!NON_VERIFIED_PACKETS.contains(type)) {
// pack 16 bytes of zeros where the md5 hash will be placed one data is packed
// pack 16 bytes of zeros where the md5 hash will be placed once data is packed
memset(position, 0, NUM_BYTES_MD5_HASH);
position += NUM_BYTES_MD5_HASH;
}

View file

@ -66,6 +66,7 @@ enum PacketType {
PacketTypeModelAddOrEdit,
PacketTypeModelErase,
PacketTypeModelAddResponse,
PacketTypeOctreeDataNack
};
typedef char PacketVersion;
@ -74,7 +75,8 @@ const QSet<PacketType> NON_VERIFIED_PACKETS = QSet<PacketType>()
<< PacketTypeDomainServerRequireDTLS << PacketTypeDomainConnectRequest
<< PacketTypeDomainList << PacketTypeDomainListRequest << PacketTypeDomainOAuthRequest
<< PacketTypeCreateAssignment << PacketTypeRequestAssignment << PacketTypeStunResponse
<< PacketTypeNodeJsonStats << PacketTypeVoxelQuery << PacketTypeParticleQuery << PacketTypeModelQuery;
<< PacketTypeNodeJsonStats << PacketTypeVoxelQuery << PacketTypeParticleQuery << PacketTypeModelQuery
<< PacketTypeOctreeDataNack;
const int NUM_BYTES_MD5_HASH = 16;
const int NUM_STATIC_HEADER_BYTES = sizeof(PacketVersion) + NUM_BYTES_RFC4122_UUID;

View file

@ -46,6 +46,7 @@ OctreeSceneStats::OctreeSceneStats() :
_incomingReallyLate(0),
_incomingPossibleDuplicate(0),
_missingSequenceNumbers(),
_sequenceNumbersToNack(),
_incomingFlightTimeAverage(samples),
_jurisdictionRoot(NULL)
{
@ -158,6 +159,7 @@ void OctreeSceneStats::copyFromOther(const OctreeSceneStats& other) {
_incomingPossibleDuplicate = other._incomingPossibleDuplicate;
_missingSequenceNumbers = other._missingSequenceNumbers;
_sequenceNumbersToNack = other._sequenceNumbersToNack;
}
@ -926,6 +928,7 @@ void OctreeSceneStats::trackIncomingOctreePacket(const QByteArray& packet,
qDebug() << "found it in _missingSequenceNumbers";
}
_missingSequenceNumbers.remove(sequence);
_sequenceNumbersToNack.remove(sequence);
_incomingLikelyLost--;
_incomingRecovered++;
} else {
@ -955,6 +958,7 @@ void OctreeSceneStats::trackIncomingOctreePacket(const QByteArray& packet,
_incomingLikelyLost += missing;
for(unsigned int missingSequence = expected; missingSequence < sequence; missingSequence++) {
_missingSequenceNumbers << missingSequence;
_sequenceNumbersToNack << missingSequence;
}
}
}
@ -982,9 +986,20 @@ void OctreeSceneStats::trackIncomingOctreePacket(const QByteArray& packet,
qDebug() << "pruning really old missing sequence:" << missingItem;
}
_missingSequenceNumbers.remove(missingItem);
_sequenceNumbersToNack.remove(missingItem);
}
}
}
}
int OctreeSceneStats::getNumSequenceNumbersToNack() const {
return _sequenceNumbersToNack.size();
}
uint16_t OctreeSceneStats::getNextSequenceNumberToNack() {
QSet<uint16_t>::Iterator it = _sequenceNumbersToNack.begin();
uint16_t sequenceNumber = *it;
_sequenceNumbersToNack.remove(sequenceNumber);
return sequenceNumber;
}

View file

@ -16,6 +16,7 @@
#include <NodeList.h>
#include <SharedUtil.h>
#include "JurisdictionMap.h"
#include "OctreePacketData.h"
#define GREENISH 0x40ff40d0
#define YELLOWISH 0xffef40c0
@ -172,6 +173,9 @@ public:
quint32 getIncomingPossibleDuplicate() const { return _incomingPossibleDuplicate; }
float getIncomingFlightTimeAverage() { return _incomingFlightTimeAverage.getAverage(); }
int getNumSequenceNumbersToNack() const;
OCTREE_PACKET_SEQUENCE getNextSequenceNumberToNack();
private:
void copyFromOther(const OctreeSceneStats& other);
@ -272,7 +276,8 @@ private:
quint32 _incomingLate; /// out of order later than expected
quint32 _incomingReallyLate; /// out of order and later than MAX_MISSING_SEQUENCE_OLD_AGE late
quint32 _incomingPossibleDuplicate; /// out of order possibly a duplicate
QSet<uint16_t> _missingSequenceNumbers;
QSet<OCTREE_PACKET_SEQUENCE> _missingSequenceNumbers;
QSet<OCTREE_PACKET_SEQUENCE> _sequenceNumbersToNack;
SimpleMovingAverage _incomingFlightTimeAverage;
// features related items

View file

@ -33,20 +33,7 @@ CapsuleShape::CapsuleShape(float radius, float halfHeight, const glm::vec3& posi
CapsuleShape::CapsuleShape(float radius, const glm::vec3& startPoint, const glm::vec3& endPoint) :
Shape(Shape::CAPSULE_SHAPE), _radius(radius), _halfHeight(0.0f) {
glm::vec3 axis = endPoint - startPoint;
_position = 0.5f * (endPoint + startPoint);
float height = glm::length(axis);
if (height > EPSILON) {
_halfHeight = 0.5f * height;
axis /= height;
glm::vec3 yAxis(0.0f, 1.0f, 0.0f);
float angle = glm::angle(axis, yAxis);
if (angle > EPSILON) {
axis = glm::normalize(glm::cross(yAxis, axis));
_rotation = glm::angleAxis(angle, axis);
}
}
updateBoundingRadius();
setEndPoints(startPoint, endPoint);
}
/// \param[out] startPoint is the center of start cap
@ -80,3 +67,20 @@ void CapsuleShape::setRadiusAndHalfHeight(float radius, float halfHeight) {
updateBoundingRadius();
}
void CapsuleShape::setEndPoints(const glm::vec3& startPoint, const glm::vec3& endPoint) {
glm::vec3 axis = endPoint - startPoint;
_position = 0.5f * (endPoint + startPoint);
float height = glm::length(axis);
if (height > EPSILON) {
_halfHeight = 0.5f * height;
axis /= height;
glm::vec3 yAxis(0.0f, 1.0f, 0.0f);
float angle = glm::angle(axis, yAxis);
if (angle > EPSILON) {
axis = glm::normalize(glm::cross(yAxis, axis));
_rotation = glm::angleAxis(angle, axis);
}
}
updateBoundingRadius();
}

View file

@ -37,6 +37,7 @@ public:
void setRadius(float radius);
void setHalfHeight(float height);
void setRadiusAndHalfHeight(float radius, float height);
void setEndPoints(const glm::vec3& startPoint, const glm::vec3& endPoint);
protected:
void updateBoundingRadius() { _boundingRadius = _radius + _halfHeight; }

View file

@ -0,0 +1,61 @@
//
// MovingPercentile.cpp
// libraries/shared/src
//
// Created by Yixin Wang on 6/4/2014
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "MovingPercentile.h"
MovingPercentile::MovingPercentile(int numSamples, float percentile)
: _numSamples(numSamples),
_percentile(percentile),
_samplesSorted(),
_sampleIds(),
_newSampleId(0),
_indexOfPercentile(0),
_valueAtPercentile(0.0f)
{
}
void MovingPercentile::updatePercentile(float sample) {
// insert the new sample into _samplesSorted
int newSampleIndex;
if (_samplesSorted.size() < _numSamples) {
// if not all samples have been filled yet, simply append it
newSampleIndex = _samplesSorted.size();
_samplesSorted.append(sample);
_sampleIds.append(_newSampleId);
// update _indexOfPercentile
float index = _percentile * (float)(_samplesSorted.size() - 1);
_indexOfPercentile = (int)(index + 0.5f); // round to int
} else {
// find index of sample with id = _newSampleId and replace it with new sample
newSampleIndex = _sampleIds.indexOf(_newSampleId);
_samplesSorted[newSampleIndex] = sample;
}
// increment _newSampleId. cycles from 0 thru N-1
_newSampleId = (_newSampleId == _numSamples - 1) ? 0 : _newSampleId + 1;
// swap new sample with neighbors in _samplesSorted until it's in sorted order
// try swapping up first, then down. element will only be swapped one direction.
while (newSampleIndex < _samplesSorted.size() - 1 && sample > _samplesSorted[newSampleIndex + 1]) {
_samplesSorted.swap(newSampleIndex, newSampleIndex + 1);
_sampleIds.swap(newSampleIndex, newSampleIndex + 1);
newSampleIndex++;
}
while (newSampleIndex > 0 && sample < _samplesSorted[newSampleIndex - 1]) {
_samplesSorted.swap(newSampleIndex, newSampleIndex - 1);
_sampleIds.swap(newSampleIndex, newSampleIndex - 1);
newSampleIndex--;
}
// find new value at percentile
_valueAtPercentile = _samplesSorted[_indexOfPercentile];
}

View file

@ -0,0 +1,36 @@
//
// MovingPercentile.h
// libraries/shared/src
//
// Created by Yixin Wang on 6/4/2014
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_MovingPercentile_h
#define hifi_MovingPercentile_h
#include <qlist.h>
class MovingPercentile {
public:
MovingPercentile(int numSamples, float percentile = 0.5f);
void updatePercentile(float sample);
float getValueAtPercentile() const { return _valueAtPercentile; }
private:
const int _numSamples;
const float _percentile;
QList<float> _samplesSorted;
QList<int> _sampleIds; // incrementally assigned, is cyclic
int _newSampleId;
int _indexOfPercentile;
float _valueAtPercentile;
};
#endif

View file

@ -20,12 +20,16 @@
REGISTER_META_OBJECT(TestSharedObjectA)
REGISTER_META_OBJECT(TestSharedObjectB)
IMPLEMENT_ENUM_METATYPE(TestSharedObjectA, TestEnum)
MetavoxelTests::MetavoxelTests(int& argc, char** argv) :
QCoreApplication(argc, argv) {
}
static int datagramsSent = 0;
static int datagramsReceived = 0;
static int bytesSent = 0;
static int bytesReceived = 0;
static int highPriorityMessagesSent = 0;
static int highPriorityMessagesReceived = 0;
static int unreliableMessagesSent = 0;
@ -36,6 +40,7 @@ static int streamedBytesSent = 0;
static int streamedBytesReceived = 0;
static int sharedObjectsCreated = 0;
static int sharedObjectsDestroyed = 0;
static int objectMutationsPerformed = 0;
static QByteArray createRandomBytes(int minimumSize, int maximumSize) {
QByteArray bytes(randIntInRange(minimumSize, maximumSize), 0);
@ -51,12 +56,36 @@ static QByteArray createRandomBytes() {
return createRandomBytes(MIN_BYTES, MAX_BYTES);
}
static TestSharedObjectA::TestEnum getRandomTestEnum() {
switch (randIntInRange(0, 2)) {
case 0: return TestSharedObjectA::FIRST_TEST_ENUM;
case 1: return TestSharedObjectA::SECOND_TEST_ENUM;
case 2:
default: return TestSharedObjectA::THIRD_TEST_ENUM;
}
}
static TestSharedObjectA::TestFlags getRandomTestFlags() {
TestSharedObjectA::TestFlags flags = 0;
if (randomBoolean()) {
flags |= TestSharedObjectA::FIRST_TEST_FLAG;
}
if (randomBoolean()) {
flags |= TestSharedObjectA::SECOND_TEST_FLAG;
}
if (randomBoolean()) {
flags |= TestSharedObjectA::THIRD_TEST_FLAG;
}
return flags;
}
static TestMessageC createRandomMessageC() {
TestMessageC message;
message.foo = randomBoolean();
message.bar = rand();
message.baz = randFloat();
message.bong.foo = createRandomBytes();
message.bong.baz = getRandomTestEnum();
return message;
}
@ -64,9 +93,11 @@ static bool testSerialization(Bitstream::MetadataType metadataType) {
QByteArray array;
QDataStream outStream(&array, QIODevice::WriteOnly);
Bitstream out(outStream, metadataType);
SharedObjectPointer testObjectWrittenA = new TestSharedObjectA(randFloat());
SharedObjectPointer testObjectWrittenA = new TestSharedObjectA(randFloat(), TestSharedObjectA::SECOND_TEST_ENUM,
TestSharedObjectA::TestFlags(TestSharedObjectA::FIRST_TEST_FLAG | TestSharedObjectA::THIRD_TEST_FLAG));
out << testObjectWrittenA;
SharedObjectPointer testObjectWrittenB = new TestSharedObjectB(randFloat(), createRandomBytes());
SharedObjectPointer testObjectWrittenB = new TestSharedObjectB(randFloat(), createRandomBytes(),
TestSharedObjectB::THIRD_TEST_ENUM, TestSharedObjectB::SECOND_TEST_FLAG);
out << testObjectWrittenB;
TestMessageC messageWritten = createRandomMessageC();
out << QVariant::fromValue(messageWritten);
@ -79,6 +110,10 @@ static bool testSerialization(Bitstream::MetadataType metadataType) {
in.addMetaObjectSubstitution("TestSharedObjectA", &TestSharedObjectB::staticMetaObject);
in.addMetaObjectSubstitution("TestSharedObjectB", &TestSharedObjectA::staticMetaObject);
in.addTypeSubstitution("TestMessageC", TestMessageA::Type);
in.addTypeSubstitution("TestSharedObjectA::TestEnum", "TestSharedObjectB::TestEnum");
in.addTypeSubstitution("TestSharedObjectB::TestEnum", "TestSharedObjectA::TestEnum");
in.addTypeSubstitution("TestSharedObjectA::TestFlags", "TestSharedObjectB::TestFlags");
in.addTypeSubstitution("TestSharedObjectB::TestFlags", "TestSharedObjectA::TestFlags");
SharedObjectPointer testObjectReadA;
in >> testObjectReadA;
@ -86,8 +121,11 @@ static bool testSerialization(Bitstream::MetadataType metadataType) {
qDebug() << "Wrong class for A" << testObjectReadA << metadataType;
return true;
}
if (metadataType == Bitstream::FULL_METADATA && static_cast<TestSharedObjectA*>(testObjectWrittenA.data())->getFoo() !=
static_cast<TestSharedObjectB*>(testObjectReadA.data())->getFoo()) {
if (metadataType == Bitstream::FULL_METADATA && (static_cast<TestSharedObjectA*>(testObjectWrittenA.data())->getFoo() !=
static_cast<TestSharedObjectB*>(testObjectReadA.data())->getFoo() ||
static_cast<TestSharedObjectB*>(testObjectReadA.data())->getBaz() != TestSharedObjectB::SECOND_TEST_ENUM ||
static_cast<TestSharedObjectB*>(testObjectReadA.data())->getBong() !=
TestSharedObjectB::TestFlags(TestSharedObjectB::FIRST_TEST_FLAG | TestSharedObjectB::THIRD_TEST_FLAG))) {
QDebug debug = qDebug() << "Failed to transfer shared field from A to B";
testObjectWrittenA->dump(debug);
testObjectReadA->dump(debug);
@ -100,8 +138,10 @@ static bool testSerialization(Bitstream::MetadataType metadataType) {
qDebug() << "Wrong class for B" << testObjectReadB << metadataType;
return true;
}
if (metadataType == Bitstream::FULL_METADATA && static_cast<TestSharedObjectB*>(testObjectWrittenB.data())->getFoo() !=
static_cast<TestSharedObjectA*>(testObjectReadB.data())->getFoo()) {
if (metadataType == Bitstream::FULL_METADATA && (static_cast<TestSharedObjectB*>(testObjectWrittenB.data())->getFoo() !=
static_cast<TestSharedObjectA*>(testObjectReadB.data())->getFoo() ||
static_cast<TestSharedObjectA*>(testObjectReadB.data())->getBaz() != TestSharedObjectA::THIRD_TEST_ENUM ||
static_cast<TestSharedObjectA*>(testObjectReadB.data())->getBong() != TestSharedObjectA::SECOND_TEST_FLAG)) {
QDebug debug = qDebug() << "Failed to transfer shared field from B to A";
testObjectWrittenB->dump(debug);
testObjectReadB->dump(debug);
@ -146,7 +186,7 @@ bool MetavoxelTests::run() {
bob.setOther(&alice);
// perform a large number of simulation iterations
const int SIMULATION_ITERATIONS = 100000;
const int SIMULATION_ITERATIONS = 10000;
for (int i = 0; i < SIMULATION_ITERATIONS; i++) {
if (alice.simulate(i) || bob.simulate(i)) {
return true;
@ -157,8 +197,10 @@ bool MetavoxelTests::run() {
qDebug() << "Sent" << unreliableMessagesSent << "unreliable messages, received" << unreliableMessagesReceived;
qDebug() << "Sent" << reliableMessagesSent << "reliable messages, received" << reliableMessagesReceived;
qDebug() << "Sent" << streamedBytesSent << "streamed bytes, received" << streamedBytesReceived;
qDebug() << "Sent" << datagramsSent << "datagrams, received" << datagramsReceived;
qDebug() << "Sent" << datagramsSent << "datagrams with" << bytesSent << "bytes, received" <<
datagramsReceived << "with" << bytesReceived << "bytes";
qDebug() << "Created" << sharedObjectsCreated << "shared objects, destroyed" << sharedObjectsDestroyed;
qDebug() << "Performed" << objectMutationsPerformed << "object mutations";
qDebug();
qDebug() << "Running serialization tests...";
@ -175,7 +217,7 @@ bool MetavoxelTests::run() {
static SharedObjectPointer createRandomSharedObject() {
switch (randIntInRange(0, 2)) {
case 0: return new TestSharedObjectA(randFloat());
case 0: return new TestSharedObjectA(randFloat(), getRandomTestEnum(), getRandomTestFlags());
case 1: return new TestSharedObjectB();
case 2:
default: return SharedObjectPointer();
@ -192,6 +234,20 @@ Endpoint::Endpoint(const QByteArray& datagramHeader) :
connect(_sequencer, SIGNAL(receivedHighPriorityMessage(const QVariant&)),
SLOT(handleHighPriorityMessage(const QVariant&)));
connect(_sequencer, SIGNAL(sendAcknowledged(int)), SLOT(clearSendRecordsBefore(int)));
connect(_sequencer, SIGNAL(receiveAcknowledged(int)), SLOT(clearReceiveRecordsBefore(int)));
// insert the baseline send record
SendRecord sendRecord = { 0 };
_sendRecords.append(sendRecord);
// insert the baseline receive record
ReceiveRecord receiveRecord = { 0 };
_receiveRecords.append(receiveRecord);
// create the object that represents out delta-encoded state
_localState = new TestSharedObjectA();
connect(_sequencer->getReliableInputChannel(), SIGNAL(receivedMessage(const QVariant&)),
SLOT(handleReliableMessage(const QVariant&)));
@ -218,16 +274,40 @@ static QVariant createRandomMessage() {
return QVariant::fromValue(message);
}
case 1: {
TestMessageB message = { createRandomBytes(), createRandomSharedObject() };
TestMessageB message = { createRandomBytes(), createRandomSharedObject(), getRandomTestEnum() };
return QVariant::fromValue(message);
}
case 2:
default: {
return QVariant::fromValue(createRandomMessageC());
}
}
}
static SharedObjectPointer mutate(const SharedObjectPointer& state) {
switch(randIntInRange(0, 3)) {
case 0: {
SharedObjectPointer newState = state->clone(true);
static_cast<TestSharedObjectA*>(newState.data())->setFoo(randFloat());
objectMutationsPerformed++;
return newState;
}
case 1: {
SharedObjectPointer newState = state->clone(true);
static_cast<TestSharedObjectA*>(newState.data())->setBaz(getRandomTestEnum());
objectMutationsPerformed++;
return newState;
}
case 2: {
SharedObjectPointer newState = state->clone(true);
static_cast<TestSharedObjectA*>(newState.data())->setBong(getRandomTestFlags());
objectMutationsPerformed++;
return newState;
}
default:
return state;
}
}
static bool messagesEqual(const QVariant& firstMessage, const QVariant& secondMessage) {
int type = firstMessage.userType();
if (secondMessage.userType() != type) {
@ -239,7 +319,7 @@ static bool messagesEqual(const QVariant& firstMessage, const QVariant& secondMe
} else if (type == TestMessageB::Type) {
TestMessageB first = firstMessage.value<TestMessageB>();
TestMessageB second = secondMessage.value<TestMessageB>();
return first.foo == second.foo && equals(first.bar, second.bar);
return first.foo == second.foo && equals(first.bar, second.bar) && first.baz == second.baz;
} else if (type == TestMessageC::Type) {
return firstMessage.value<TestMessageC>() == secondMessage.value<TestMessageC>();
@ -286,10 +366,13 @@ bool Endpoint::simulate(int iterationNumber) {
_reliableMessagesToSend -= 1.0f;
}
// tweak the local state
_localState = mutate(_localState);
// send a packet
try {
Bitstream& out = _sequencer->startPacket();
SequencedTestMessage message = { iterationNumber, createRandomMessage() };
SequencedTestMessage message = { iterationNumber, createRandomMessage(), _localState };
_unreliableMessagesSent.append(message);
unreliableMessagesSent++;
out << message;
@ -300,11 +383,16 @@ bool Endpoint::simulate(int iterationNumber) {
return true;
}
// record the send
SendRecord record = { _sequencer->getOutgoingPacketNumber(), _localState };
_sendRecords.append(record);
return false;
}
void Endpoint::sendDatagram(const QByteArray& datagram) {
datagramsSent++;
bytesSent += datagram.size();
// some datagrams are dropped
const float DROP_PROBABILITY = 0.1f;
@ -330,6 +418,7 @@ void Endpoint::sendDatagram(const QByteArray& datagram) {
_other->_sequencer->receivedDatagram(datagram);
datagramsReceived++;
bytesReceived += datagram.size();
}
void Endpoint::handleHighPriorityMessage(const QVariant& message) {
@ -350,12 +439,21 @@ void Endpoint::readMessage(Bitstream& in) {
SequencedTestMessage message;
in >> message;
_remoteState = message.state;
// record the receipt
ReceiveRecord record = { _sequencer->getIncomingPacketNumber(), message.state };
_receiveRecords.append(record);
for (QList<SequencedTestMessage>::iterator it = _other->_unreliableMessagesSent.begin();
it != _other->_unreliableMessagesSent.end(); it++) {
if (it->sequenceNumber == message.sequenceNumber) {
if (!messagesEqual(it->submessage, message.submessage)) {
throw QString("Sent/received unreliable message mismatch.");
}
if (!it->state->equals(message.state)) {
throw QString("Delta-encoded object mismatch.");
}
_other->_unreliableMessagesSent.erase(_other->_unreliableMessagesSent.begin(), it + 1);
unreliableMessagesReceived++;
return;
@ -393,8 +491,18 @@ void Endpoint::readReliableChannel() {
streamedBytesReceived += bytes.size();
}
TestSharedObjectA::TestSharedObjectA(float foo) :
_foo(foo) {
void Endpoint::clearSendRecordsBefore(int index) {
_sendRecords.erase(_sendRecords.begin(), _sendRecords.begin() + index + 1);
}
void Endpoint::clearReceiveRecordsBefore(int index) {
_receiveRecords.erase(_receiveRecords.begin(), _receiveRecords.begin() + index + 1);
}
TestSharedObjectA::TestSharedObjectA(float foo, TestEnum baz, TestFlags bong) :
_foo(foo),
_baz(baz),
_bong(bong) {
sharedObjectsCreated++;
}
@ -408,9 +516,11 @@ void TestSharedObjectA::setFoo(float foo) {
}
}
TestSharedObjectB::TestSharedObjectB(float foo, const QByteArray& bar) :
TestSharedObjectB::TestSharedObjectB(float foo, const QByteArray& bar, TestEnum baz, TestFlags bong) :
_foo(foo),
_bar(bar) {
_bar(bar),
_baz(baz),
_bong(bong) {
sharedObjectsCreated++;
}

View file

@ -54,9 +54,30 @@ private slots:
void handleReliableMessage(const QVariant& message);
void readReliableChannel();
void clearSendRecordsBefore(int index);
void clearReceiveRecordsBefore(int index);
private:
class SendRecord {
public:
int packetNumber;
SharedObjectPointer localState;
};
class ReceiveRecord {
public:
int packetNumber;
SharedObjectPointer remoteState;
};
DatagramSequencer* _sequencer;
QList<SendRecord> _sendRecords;
QList<ReceiveRecord> _receiveRecords;
SharedObjectPointer _localState;
SharedObjectPointer _remoteState;
Endpoint* _other;
QList<QPair<QByteArray, int> > _delayedDatagrams;
float _highPriorityMessagesToSend;
@ -70,16 +91,31 @@ private:
/// A simple shared object.
class TestSharedObjectA : public SharedObject {
Q_OBJECT
Q_ENUMS(TestEnum)
Q_FLAGS(TestFlag TestFlags)
Q_PROPERTY(float foo READ getFoo WRITE setFoo NOTIFY fooChanged)
Q_PROPERTY(TestEnum baz READ getBaz WRITE setBaz)
Q_PROPERTY(TestFlags bong READ getBong WRITE setBong)
public:
Q_INVOKABLE TestSharedObjectA(float foo = 0.0f);
enum TestEnum { FIRST_TEST_ENUM, SECOND_TEST_ENUM, THIRD_TEST_ENUM };
enum TestFlag { NO_TEST_FLAGS = 0x0, FIRST_TEST_FLAG = 0x01, SECOND_TEST_FLAG = 0x02, THIRD_TEST_FLAG = 0x04 };
Q_DECLARE_FLAGS(TestFlags, TestFlag)
Q_INVOKABLE TestSharedObjectA(float foo = 0.0f, TestEnum baz = FIRST_TEST_ENUM, TestFlags bong = 0);
virtual ~TestSharedObjectA();
void setFoo(float foo);
float getFoo() const { return _foo; }
void setBaz(TestEnum baz) { _baz = baz; }
TestEnum getBaz() const { return _baz; }
void setBong(TestFlags bong) { _bong = bong; }
TestFlags getBong() const { return _bong; }
signals:
void fooChanged(float foo);
@ -87,17 +123,32 @@ signals:
private:
float _foo;
TestEnum _baz;
TestFlags _bong;
};
DECLARE_ENUM_METATYPE(TestSharedObjectA, TestEnum)
/// Another simple shared object.
class TestSharedObjectB : public SharedObject {
Q_OBJECT
Q_ENUMS(TestEnum)
Q_FLAGS(TestFlag TestFlags)
Q_PROPERTY(float foo READ getFoo WRITE setFoo)
Q_PROPERTY(QByteArray bar READ getBar WRITE setBar)
Q_PROPERTY(TestEnum baz READ getBaz WRITE setBaz)
Q_PROPERTY(TestFlags bong READ getBong WRITE setBong)
public:
Q_INVOKABLE TestSharedObjectB(float foo = 0.0f, const QByteArray& bar = QByteArray());
enum TestEnum { ZEROTH_TEST_ENUM, FIRST_TEST_ENUM, SECOND_TEST_ENUM, THIRD_TEST_ENUM, FOURTH_TEST_ENUM };
enum TestFlag { NO_TEST_FLAGS = 0x0, ZEROTH_TEST_FLAG = 0x01, FIRST_TEST_FLAG = 0x02,
SECOND_TEST_FLAG = 0x04, THIRD_TEST_FLAG = 0x08, FOURTH_TEST_FLAG = 0x10 };
Q_DECLARE_FLAGS(TestFlags, TestFlag)
Q_INVOKABLE TestSharedObjectB(float foo = 0.0f, const QByteArray& bar = QByteArray(),
TestEnum baz = FIRST_TEST_ENUM, TestFlags bong = 0);
virtual ~TestSharedObjectB();
void setFoo(float foo) { _foo = foo; }
@ -106,10 +157,18 @@ public:
void setBar(const QByteArray& bar) { _bar = bar; }
const QByteArray& getBar() const { return _bar; }
void setBaz(TestEnum baz) { _baz = baz; }
TestEnum getBaz() const { return _baz; }
void setBong(TestFlags bong) { _bong = bong; }
TestFlags getBong() const { return _bong; }
private:
float _foo;
QByteArray _bar;
TestEnum _baz;
TestFlags _bong;
};
/// A simple test message.
@ -133,6 +192,7 @@ public:
STREAM QByteArray foo;
STREAM SharedObjectPointer bar;
STREAM TestSharedObjectA::TestEnum baz;
};
DECLARE_STREAMABLE_METATYPE(TestMessageB)
@ -156,6 +216,7 @@ public:
STREAM int sequenceNumber;
STREAM QVariant submessage;
STREAM SharedObjectPointer state;
};
DECLARE_STREAMABLE_METATYPE(SequencedTestMessage)

View file

@ -0,0 +1,38 @@
cmake_minimum_required(VERSION 2.8)
if (WIN32)
cmake_policy (SET CMP0020 NEW)
endif (WIN32)
set(TARGET_NAME shared-tests)
set(ROOT_DIR ../..)
set(MACRO_DIR ${ROOT_DIR}/cmake/macros)
# setup for find modules
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../cmake/modules/")
#find_package(Qt5Network REQUIRED)
#find_package(Qt5Script REQUIRED)
#find_package(Qt5Widgets REQUIRED)
include(${MACRO_DIR}/SetupHifiProject.cmake)
setup_hifi_project(${TARGET_NAME} TRUE)
include(${MACRO_DIR}/AutoMTC.cmake)
auto_mtc(${TARGET_NAME} ${ROOT_DIR})
#qt5_use_modules(${TARGET_NAME} Network Script Widgets)
#include glm
include(${MACRO_DIR}/IncludeGLM.cmake)
include_glm(${TARGET_NAME} ${ROOT_DIR})
# link in the shared libraries
include(${MACRO_DIR}/LinkHifiLibrary.cmake)
link_hifi_library(shared ${TARGET_NAME} ${ROOT_DIR})
IF (WIN32)
#target_link_libraries(${TARGET_NAME} Winmm Ws2_32)
ENDIF(WIN32)

View file

@ -0,0 +1,169 @@
//
// MovingPercentileTests.cpp
// tests/shared/src
//
// Created by Yixin Wang on 6/4/2014
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "MovingPercentileTests.h"
#include "SharedUtil.h"
#include "MovingPercentile.h"
#include <qqueue.h>
float MovingPercentileTests::random() {
return rand() / (float)RAND_MAX;
}
void MovingPercentileTests::runAllTests() {
QVector<int> valuesForN;
valuesForN.append(1);
valuesForN.append(2);
valuesForN.append(3);
valuesForN.append(4);
valuesForN.append(5);
valuesForN.append(10);
valuesForN.append(100);
QQueue<float> lastNSamples;
for (int i=0; i<valuesForN.size(); i++) {
int N = valuesForN.at(i);
qDebug() << "testing moving percentile with N =" << N << "...";
{
bool fail = false;
qDebug() << "\t testing running min...";
lastNSamples.clear();
MovingPercentile movingMin(N, 0.0f);
for (int s = 0; s < 3*N; s++) {
float sample = random();
lastNSamples.push_back(sample);
if (lastNSamples.size() > N) {
lastNSamples.pop_front();
}
movingMin.updatePercentile(sample);
float experimentMin = movingMin.getValueAtPercentile();
float actualMin = lastNSamples[0];
for (int j = 0; j < lastNSamples.size(); j++) {
if (lastNSamples.at(j) < actualMin) {
actualMin = lastNSamples.at(j);
}
}
if (experimentMin != actualMin) {
qDebug() << "\t\t FAIL at sample" << s;
fail = true;
break;
}
}
if (!fail) {
qDebug() << "\t\t PASS";
}
}
{
bool fail = false;
qDebug() << "\t testing running max...";
lastNSamples.clear();
MovingPercentile movingMax(N, 1.0f);
for (int s = 0; s < 10000; s++) {
float sample = random();
lastNSamples.push_back(sample);
if (lastNSamples.size() > N) {
lastNSamples.pop_front();
}
movingMax.updatePercentile(sample);
float experimentMax = movingMax.getValueAtPercentile();
float actualMax = lastNSamples[0];
for (int j = 0; j < lastNSamples.size(); j++) {
if (lastNSamples.at(j) > actualMax) {
actualMax = lastNSamples.at(j);
}
}
if (experimentMax != actualMax) {
qDebug() << "\t\t FAIL at sample" << s;
fail = true;
break;
}
}
if (!fail) {
qDebug() << "\t\t PASS";
}
}
{
bool fail = false;
qDebug() << "\t testing running median...";
lastNSamples.clear();
MovingPercentile movingMedian(N, 0.5f);
for (int s = 0; s < 10000; s++) {
float sample = random();
lastNSamples.push_back(sample);
if (lastNSamples.size() > N) {
lastNSamples.pop_front();
}
movingMedian.updatePercentile(sample);
float experimentMedian = movingMedian.getValueAtPercentile();
int samplesLessThan = 0;
int samplesMoreThan = 0;
for (int j=0; j<lastNSamples.size(); j++) {
if (lastNSamples.at(j) < experimentMedian) {
samplesLessThan++;
} else if (lastNSamples.at(j) > experimentMedian) {
samplesMoreThan++;
}
}
if (!(samplesLessThan <= N/2 && samplesMoreThan <= N-1/2)) {
qDebug() << "\t\t FAIL at sample" << s;
fail = true;
break;
}
}
if (!fail) {
qDebug() << "\t\t PASS";
}
}
}
}

View file

@ -0,0 +1,22 @@
//
// MovingPercentileTests.h
// tests/shared/src
//
// Created by Yixin Wang on 6/4/2014
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_MovingPercentileTests_h
#define hifi_MovingPercentileTests_h
namespace MovingPercentileTests {
float random();
void runAllTests();
}
#endif // hifi_MovingPercentileTests_h

16
tests/shared/src/main.cpp Normal file
View file

@ -0,0 +1,16 @@
//
// main.cpp
// tests/physics/src
//
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "MovingPercentileTests.h"
int main(int argc, char** argv) {
MovingPercentileTests::runAllTests();
return 0;
}