mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-25 19:55:07 +02:00
Merge remote-tracking branch 'upstream/master' into packet_recovery_pull
This commit is contained in:
commit
d21a8e73ff
52 changed files with 3089 additions and 743 deletions
|
@ -173,134 +173,151 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
||||
}
|
||||
}
|
||||
|
||||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
||||
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
||||
|
||||
const int16_t* nextOutputStart = bufferToAdd->getNextOutput();
|
||||
|
||||
const int16_t* bufferStart = bufferToAdd->getBuffer();
|
||||
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
|
||||
|
||||
int16_t correctBufferSample[2], delayBufferSample[2];
|
||||
int delayedChannelIndex = 0;
|
||||
|
||||
const int SINGLE_STEREO_OFFSET = 2;
|
||||
|
||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
||||
if (!bufferToAdd->isStereo()) {
|
||||
// this is a mono buffer, which means it gets full attenuation and spatialization
|
||||
|
||||
// setup the int16_t variables for the two sample sets
|
||||
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
|
||||
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
|
||||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
||||
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
||||
|
||||
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
|
||||
const int16_t* bufferStart = bufferToAdd->getBuffer();
|
||||
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
|
||||
|
||||
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
|
||||
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
|
||||
int16_t correctBufferSample[2], delayBufferSample[2];
|
||||
int delayedChannelIndex = 0;
|
||||
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
|
||||
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
|
||||
_clientSamples[delayedChannelIndex],
|
||||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
|
||||
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
|
||||
delayBufferSample[0], delayBufferSample[1]);
|
||||
const int SINGLE_STEREO_OFFSET = 2;
|
||||
|
||||
// perform the MMX add (with saturation) of two correct and delayed samples
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
// assign the results from the result of the mmx arithmetic
|
||||
_clientSamples[s + goodChannelOffset] = shortResults[3];
|
||||
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
|
||||
_clientSamples[delayedChannelIndex] = shortResults[1];
|
||||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
|
||||
}
|
||||
|
||||
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
|
||||
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
|
||||
// Basically we try to take the samples in batches of four, and then handle the remainder
|
||||
// conditionally to get rid of the rest.
|
||||
|
||||
const int DOUBLE_STEREO_OFFSET = 4;
|
||||
const int TRIPLE_STEREO_OFFSET = 6;
|
||||
|
||||
if (numSamplesDelay > 0) {
|
||||
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
|
||||
// to stick at the beginning
|
||||
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
|
||||
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
|
||||
if (delayNextOutputStart < bufferStart) {
|
||||
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
|
||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
||||
|
||||
// setup the int16_t variables for the two sample sets
|
||||
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
|
||||
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
|
||||
|
||||
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
|
||||
|
||||
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
|
||||
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
|
||||
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
|
||||
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
|
||||
_clientSamples[delayedChannelIndex],
|
||||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
|
||||
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
|
||||
delayBufferSample[0], delayBufferSample[1]);
|
||||
|
||||
// perform the MMX add (with saturation) of two correct and delayed samples
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
// assign the results from the result of the mmx arithmetic
|
||||
_clientSamples[s + goodChannelOffset] = shortResults[3];
|
||||
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
|
||||
_clientSamples[delayedChannelIndex] = shortResults[1];
|
||||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
|
||||
}
|
||||
|
||||
int i = 0;
|
||||
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
|
||||
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
|
||||
// Basically we try to take the samples in batches of four, and then handle the remainder
|
||||
// conditionally to get rid of the rest.
|
||||
|
||||
while (i + 3 < numSamplesDelay) {
|
||||
// handle the first cases where we can MMX add four samples at once
|
||||
const int DOUBLE_STEREO_OFFSET = 4;
|
||||
const int TRIPLE_STEREO_OFFSET = 6;
|
||||
|
||||
if (numSamplesDelay > 0) {
|
||||
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
|
||||
// to stick at the beginning
|
||||
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
|
||||
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
|
||||
if (delayNextOutputStart < bufferStart) {
|
||||
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
|
||||
}
|
||||
|
||||
int i = 0;
|
||||
|
||||
while (i + 3 < numSamplesDelay) {
|
||||
// handle the first cases where we can MMX add four samples at once
|
||||
int parentIndex = i * 2;
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
|
||||
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
|
||||
|
||||
// push the index
|
||||
i += 4;
|
||||
}
|
||||
|
||||
int parentIndex = i * 2;
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
|
||||
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
|
||||
|
||||
// push the index
|
||||
i += 4;
|
||||
if (i + 2 < numSamplesDelay) {
|
||||
// MMX add only three delayed samples
|
||||
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
|
||||
0);
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
|
||||
|
||||
} else if (i + 1 < numSamplesDelay) {
|
||||
// MMX add two delayed samples
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
|
||||
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
|
||||
} else if (i < numSamplesDelay) {
|
||||
// MMX add a single delayed sample
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
|
||||
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
}
|
||||
}
|
||||
|
||||
int parentIndex = i * 2;
|
||||
|
||||
if (i + 2 < numSamplesDelay) {
|
||||
// MMX add only three delayed samples
|
||||
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
|
||||
0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
|
||||
0);
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
|
||||
|
||||
} else if (i + 1 < numSamplesDelay) {
|
||||
// MMX add two delayed samples
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
|
||||
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
|
||||
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
|
||||
|
||||
} else if (i < numSamplesDelay) {
|
||||
// MMX add a single delayed sample
|
||||
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
|
||||
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
|
||||
|
||||
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
|
||||
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
|
||||
|
||||
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
|
||||
} else {
|
||||
// stereo buffer - do attenuation but no sample delay for spatialization
|
||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
||||
// use MMX to clamp four additions at a time
|
||||
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int) (nextOutputStart[s] * attenuationCoefficient),
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
_clientSamples[s + 1] = glm::clamp(_clientSamples[s + 1] + (int) (nextOutputStart[s + 1] * attenuationCoefficient),
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
_clientSamples[s + 2] = glm::clamp(_clientSamples[s + 2] + (int) (nextOutputStart[s + 2] * attenuationCoefficient),
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
_clientSamples[s + 3] = glm::clamp(_clientSamples[s + 3] + (int) (nextOutputStart[s + 3] * attenuationCoefficient),
|
||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,10 +50,22 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
|
||||
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
|
||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||
|
||||
// read the first byte after the header to see if this is a stereo or mono buffer
|
||||
quint8 channelFlag = packet.at(numBytesForPacketHeader(packet));
|
||||
bool isStereo = channelFlag == 1;
|
||||
|
||||
if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) {
|
||||
// there's a mismatch in the buffer channels for the incoming and current buffer
|
||||
// so delete our current buffer and create a new one
|
||||
_ringBuffers.removeOne(avatarRingBuffer);
|
||||
avatarRingBuffer->deleteLater();
|
||||
avatarRingBuffer = NULL;
|
||||
}
|
||||
|
||||
if (!avatarRingBuffer) {
|
||||
// we don't have an AvatarAudioRingBuffer yet, so add it
|
||||
avatarRingBuffer = new AvatarAudioRingBuffer();
|
||||
avatarRingBuffer = new AvatarAudioRingBuffer(isStereo);
|
||||
_ringBuffers.push_back(avatarRingBuffer);
|
||||
}
|
||||
|
||||
|
@ -106,7 +118,8 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
|||
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
|
||||
|
||||
if (audioBuffer->willBeAddedToMix()) {
|
||||
audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
audioBuffer->shiftReadPosition(audioBuffer->isStereo()
|
||||
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
audioBuffer->setWillBeAddedToMix(false);
|
||||
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector
|
||||
|
|
|
@ -24,14 +24,14 @@ public:
|
|||
AudioMixerClientData();
|
||||
~AudioMixerClientData();
|
||||
|
||||
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
|
||||
const QList<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
|
||||
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
|
||||
void pushBuffersAfterFrameSend();
|
||||
private:
|
||||
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
|
||||
QList<PositionalAudioRingBuffer*> _ringBuffers;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerClientData_h
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
|
||||
#include "AvatarAudioRingBuffer.h"
|
||||
|
||||
AvatarAudioRingBuffer::AvatarAudioRingBuffer() :
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone) {
|
||||
AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo) :
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo) {
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
|
||||
public:
|
||||
AvatarAudioRingBuffer();
|
||||
AvatarAudioRingBuffer(bool isStereo = false);
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
private:
|
||||
|
|
|
@ -20,21 +20,21 @@ var jointMappings = "\n# Joint list start";
|
|||
for (var i = 0; i < jointList.length; i++) {
|
||||
jointMappings = jointMappings + "\njointIndex = " + jointList[i] + " = " + i;
|
||||
}
|
||||
print(jointMappings + "\n# Joint list end");
|
||||
print(jointMappings + "\n# Joint list end");
|
||||
|
||||
Script.update.connect(function(deltaTime) {
|
||||
cumulativeTime += deltaTime;
|
||||
MyAvatar.setJointData("joint_R_hip", Quat.fromPitchYawRollDegrees(0.0, 0.0, AMPLITUDE * Math.sin(cumulativeTime * FREQUENCY)));
|
||||
MyAvatar.setJointData("joint_L_hip", Quat.fromPitchYawRollDegrees(0.0, 0.0, -AMPLITUDE * Math.sin(cumulativeTime * FREQUENCY)));
|
||||
MyAvatar.setJointData("joint_R_knee", Quat.fromPitchYawRollDegrees(0.0, 0.0,
|
||||
AMPLITUDE * (1.0 + Math.sin(cumulativeTime * FREQUENCY))));
|
||||
MyAvatar.setJointData("joint_L_knee", Quat.fromPitchYawRollDegrees(0.0, 0.0,
|
||||
AMPLITUDE * (1.0 - Math.sin(cumulativeTime * FREQUENCY))));
|
||||
MyAvatar.setJointData("RightUpLeg", Quat.fromPitchYawRollDegrees(AMPLITUDE * Math.sin(cumulativeTime * FREQUENCY), 0.0, 0.0));
|
||||
MyAvatar.setJointData("LeftUpLeg", Quat.fromPitchYawRollDegrees(-AMPLITUDE * Math.sin(cumulativeTime * FREQUENCY), 0.0, 0.0));
|
||||
MyAvatar.setJointData("RightLeg", Quat.fromPitchYawRollDegrees(
|
||||
AMPLITUDE * (1.0 + Math.sin(cumulativeTime * FREQUENCY)),0.0, 0.0));
|
||||
MyAvatar.setJointData("LeftLeg", Quat.fromPitchYawRollDegrees(
|
||||
AMPLITUDE * (1.0 - Math.sin(cumulativeTime * FREQUENCY)),0.0, 0.0));
|
||||
});
|
||||
|
||||
Script.scriptEnding.connect(function() {
|
||||
MyAvatar.clearJointData("joint_R_hip");
|
||||
MyAvatar.clearJointData("joint_L_hip");
|
||||
MyAvatar.clearJointData("joint_R_knee");
|
||||
MyAvatar.clearJointData("joint_L_knee");
|
||||
MyAvatar.clearJointData("RightUpLeg");
|
||||
MyAvatar.clearJointData("LeftUpLeg");
|
||||
MyAvatar.clearJointData("RightLeg");
|
||||
MyAvatar.clearJointData("LeftLeg");
|
||||
});
|
||||
|
|
|
@ -51,7 +51,7 @@ var flockGravity = { x: 0, y: -1, z: 0};
|
|||
var enableRandomXZThrust = false; // leading birds randomly decide to thrust in some random direction.
|
||||
var enableSomeBirdsLead = false; // birds randomly decide not fly toward flock, causing other birds to follow
|
||||
var leaders = 0; // number of birds leading
|
||||
var PROBABILITY_TO_LEAD = 0.1; // probabolity a bird will choose to lead
|
||||
var PROBABILITY_TO_LEAD = 0.1; // probability a bird will choose to lead
|
||||
|
||||
var birds = new Array(); // array of bird state data
|
||||
|
||||
|
|
52
examples/squeezeHands.js
Normal file
52
examples/squeezeHands.js
Normal file
|
@ -0,0 +1,52 @@
|
|||
//
|
||||
// squeezeHands.js
|
||||
// examples
|
||||
//
|
||||
// Created by Philip Rosedale on June 4, 2014
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
var rightHandAnimation = "https://s3-us-west-1.amazonaws.com/highfidelity-public/animations/RightHandAnim.fbx";
|
||||
var leftHandAnimation = "https://s3-us-west-1.amazonaws.com/highfidelity-public/animations/LeftHandAnim.fbx";
|
||||
|
||||
var LEFT = 0;
|
||||
var RIGHT = 1;
|
||||
|
||||
var lastLeftFrame = 0;
|
||||
var lastRightFrame = 0;
|
||||
|
||||
var LAST_FRAME = 11.0; // What is the number of the last frame we want to use in the animation?
|
||||
var SMOOTH_FACTOR = 0.80;
|
||||
|
||||
|
||||
Script.update.connect(function(deltaTime) {
|
||||
var leftTriggerValue = Math.sqrt(Controller.getTriggerValue(LEFT));
|
||||
var rightTriggerValue = Math.sqrt(Controller.getTriggerValue(RIGHT));
|
||||
|
||||
var leftFrame, rightFrame;
|
||||
|
||||
// Average last few trigger frames together for a bit of smoothing
|
||||
leftFrame = (leftTriggerValue * LAST_FRAME) * (1.0 - SMOOTH_FACTOR) + lastLeftFrame * SMOOTH_FACTOR;
|
||||
rightFrame = (rightTriggerValue * LAST_FRAME) * (1.0 - SMOOTH_FACTOR) + lastRightFrame * SMOOTH_FACTOR;
|
||||
|
||||
|
||||
if ((leftFrame != lastLeftFrame) && leftHandAnimation.length){
|
||||
MyAvatar.stopAnimation(leftHandAnimation);
|
||||
MyAvatar.startAnimation(leftHandAnimation, 30.0, 1.0, false, true, leftFrame, leftFrame);
|
||||
}
|
||||
if ((rightFrame != lastRightFrame) && rightHandAnimation.length) {
|
||||
MyAvatar.stopAnimation(rightHandAnimation);
|
||||
MyAvatar.startAnimation(rightHandAnimation, 30.0, 1.0, false, true, rightFrame, rightFrame);
|
||||
}
|
||||
|
||||
lastLeftFrame = leftFrame;
|
||||
lastRightFrame = rightFrame;
|
||||
});
|
||||
|
||||
Script.scriptEnding.connect(function() {
|
||||
MyAvatar.stopAnimation(leftHandAnimation);
|
||||
MyAvatar.stopAnimation(rightHandAnimation);
|
||||
});
|
|
@ -293,8 +293,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
|
|||
// move the silentNodeTimer to the _nodeThread
|
||||
QTimer* silentNodeTimer = new QTimer();
|
||||
connect(silentNodeTimer, SIGNAL(timeout()), nodeList, SLOT(removeSilentNodes()));
|
||||
silentNodeTimer->moveToThread(_nodeThread);
|
||||
silentNodeTimer->start(NODE_SILENCE_THRESHOLD_MSECS);
|
||||
silentNodeTimer->moveToThread(_nodeThread);
|
||||
|
||||
// send the identity packet for our avatar each second to our avatar mixer
|
||||
QTimer* identityPacketTimer = new QTimer();
|
||||
|
@ -1098,7 +1098,8 @@ void Application::mouseMoveEvent(QMouseEvent* event) {
|
|||
|
||||
|
||||
_lastMouseMove = usecTimestampNow();
|
||||
if (_mouseHidden) {
|
||||
|
||||
if (_mouseHidden && !OculusManager::isConnected()) {
|
||||
getGLWidget()->setCursor(Qt::ArrowCursor);
|
||||
_mouseHidden = false;
|
||||
_seenMouseMove = true;
|
||||
|
@ -1842,7 +1843,8 @@ void Application::updateMyAvatarLookAtPosition() {
|
|||
}
|
||||
} else {
|
||||
// I am not looking at anyone else, so just look forward
|
||||
lookAtSpot = _myAvatar->getHead()->calculateAverageEyePosition() + (_myAvatar->getHead()->getFinalOrientation() * glm::vec3(0.f, 0.f, -TREE_SCALE));
|
||||
lookAtSpot = _myAvatar->getHead()->calculateAverageEyePosition() +
|
||||
(_myAvatar->getHead()->getFinalOrientationInWorldFrame() * glm::vec3(0.f, 0.f, -TREE_SCALE));
|
||||
}
|
||||
// TODO: Add saccade to mouse pointer when stable, IF not looking at someone (since we know we are looking at it)
|
||||
/*
|
||||
|
|
|
@ -68,6 +68,7 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
|||
_proceduralOutputDevice(NULL),
|
||||
_inputRingBuffer(0),
|
||||
_ringBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL),
|
||||
_isStereoInput(false),
|
||||
_averagedLatency(0.0),
|
||||
_measuredJitter(0),
|
||||
_jitterBufferSamples(initialJitterBufferSamples),
|
||||
|
@ -289,20 +290,27 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
|||
if (sourceToDestinationFactor >= 2) {
|
||||
// we need to downsample from 48 to 24
|
||||
// for now this only supports a mono output - this would be the case for audio input
|
||||
|
||||
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
|
||||
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
|
||||
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
|
||||
if (destinationAudioFormat.channelCount() == 1) {
|
||||
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
|
||||
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
|
||||
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
|
||||
(sourceSamples[i - sourceAudioFormat.channelCount()] / 2)
|
||||
+ (sourceSamples[i] / 2);
|
||||
} else {
|
||||
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
|
||||
} else {
|
||||
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
|
||||
(sourceSamples[i - sourceAudioFormat.channelCount()] / 4)
|
||||
+ (sourceSamples[i] / 2)
|
||||
+ (sourceSamples[i + sourceAudioFormat.channelCount()] / 4);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// this is a 48 to 24 resampling but both source and destination are two channels
|
||||
// squish two samples into one in each channel
|
||||
for (int i = 0; i < numSourceSamples; i += 4) {
|
||||
destinationSamples[i / 2] = (sourceSamples[i] / 2) + (sourceSamples[i + 2] / 2);
|
||||
destinationSamples[(i / 2) + 1] = (sourceSamples[i + 1] / 2) + (sourceSamples[i + 3] / 2);
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
if (sourceAudioFormat.sampleRate() == destinationAudioFormat.sampleRate()) {
|
||||
// mono to stereo, same sample rate
|
||||
|
@ -405,12 +413,12 @@ bool Audio::switchOutputToAudioDevice(const QString& outputDeviceName) {
|
|||
}
|
||||
|
||||
void Audio::handleAudioInput() {
|
||||
static char monoAudioDataPacket[MAX_PACKET_SIZE];
|
||||
static char audioDataPacket[MAX_PACKET_SIZE];
|
||||
|
||||
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
|
||||
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat);
|
||||
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||
|
||||
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes);
|
||||
static int16_t* networkAudioSamples = (int16_t*) (audioDataPacket + leadingBytes);
|
||||
|
||||
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
|
||||
|
||||
|
@ -452,126 +460,139 @@ void Audio::handleAudioInput() {
|
|||
|
||||
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
|
||||
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
||||
|
||||
int numNetworkBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
|
||||
int numNetworkSamples = _isStereoInput ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
|
||||
// zero out the monoAudioSamples array and the locally injected audio
|
||||
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
memset(networkAudioSamples, 0, numNetworkBytes);
|
||||
|
||||
if (!_muted) {
|
||||
// we aren't muted, downsample the input audio
|
||||
linearResampling((int16_t*) inputAudioSamples,
|
||||
monoAudioSamples,
|
||||
inputSamplesRequired,
|
||||
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
||||
linearResampling((int16_t*) inputAudioSamples, networkAudioSamples,
|
||||
inputSamplesRequired, numNetworkSamples,
|
||||
_inputFormat, _desiredInputFormat);
|
||||
|
||||
//
|
||||
// Impose Noise Gate
|
||||
//
|
||||
// The Noise Gate is used to reject constant background noise by measuring the noise
|
||||
// floor observed at the microphone and then opening the 'gate' to allow microphone
|
||||
// signals to be transmitted when the microphone samples average level exceeds a multiple
|
||||
// of the noise floor.
|
||||
//
|
||||
// NOISE_GATE_HEIGHT: How loud you have to speak relative to noise background to open the gate.
|
||||
// Make this value lower for more sensitivity and less rejection of noise.
|
||||
// NOISE_GATE_WIDTH: The number of samples in an audio frame for which the height must be exceeded
|
||||
// to open the gate.
|
||||
// NOISE_GATE_CLOSE_FRAME_DELAY: Once the noise is below the gate height for the frame, how many frames
|
||||
// will we wait before closing the gate.
|
||||
// NOISE_GATE_FRAMES_TO_AVERAGE: How many audio frames should we average together to compute noise floor.
|
||||
// More means better rejection but also can reject continuous things like singing.
|
||||
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
|
||||
|
||||
|
||||
float loudness = 0;
|
||||
float thisSample = 0;
|
||||
int samplesOverNoiseGate = 0;
|
||||
|
||||
const float NOISE_GATE_HEIGHT = 7.0f;
|
||||
const int NOISE_GATE_WIDTH = 5;
|
||||
const int NOISE_GATE_CLOSE_FRAME_DELAY = 5;
|
||||
const int NOISE_GATE_FRAMES_TO_AVERAGE = 5;
|
||||
const float DC_OFFSET_AVERAGING = 0.99f;
|
||||
const float CLIPPING_THRESHOLD = 0.90f;
|
||||
|
||||
//
|
||||
// Check clipping, adjust DC offset, and check if should open noise gate
|
||||
//
|
||||
float measuredDcOffset = 0.0f;
|
||||
// Increment the time since the last clip
|
||||
if (_timeSinceLastClip >= 0.0f) {
|
||||
_timeSinceLastClip += (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE;
|
||||
}
|
||||
|
||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||
measuredDcOffset += monoAudioSamples[i];
|
||||
monoAudioSamples[i] -= (int16_t) _dcOffset;
|
||||
thisSample = fabsf(monoAudioSamples[i]);
|
||||
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
|
||||
_timeSinceLastClip = 0.0f;
|
||||
// only impose the noise gate and perform tone injection if we sending mono audio
|
||||
if (!_isStereoInput) {
|
||||
|
||||
//
|
||||
// Impose Noise Gate
|
||||
//
|
||||
// The Noise Gate is used to reject constant background noise by measuring the noise
|
||||
// floor observed at the microphone and then opening the 'gate' to allow microphone
|
||||
// signals to be transmitted when the microphone samples average level exceeds a multiple
|
||||
// of the noise floor.
|
||||
//
|
||||
// NOISE_GATE_HEIGHT: How loud you have to speak relative to noise background to open the gate.
|
||||
// Make this value lower for more sensitivity and less rejection of noise.
|
||||
// NOISE_GATE_WIDTH: The number of samples in an audio frame for which the height must be exceeded
|
||||
// to open the gate.
|
||||
// NOISE_GATE_CLOSE_FRAME_DELAY: Once the noise is below the gate height for the frame, how many frames
|
||||
// will we wait before closing the gate.
|
||||
// NOISE_GATE_FRAMES_TO_AVERAGE: How many audio frames should we average together to compute noise floor.
|
||||
// More means better rejection but also can reject continuous things like singing.
|
||||
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
|
||||
|
||||
|
||||
float loudness = 0;
|
||||
float thisSample = 0;
|
||||
int samplesOverNoiseGate = 0;
|
||||
|
||||
const float NOISE_GATE_HEIGHT = 7.0f;
|
||||
const int NOISE_GATE_WIDTH = 5;
|
||||
const int NOISE_GATE_CLOSE_FRAME_DELAY = 5;
|
||||
const int NOISE_GATE_FRAMES_TO_AVERAGE = 5;
|
||||
const float DC_OFFSET_AVERAGING = 0.99f;
|
||||
const float CLIPPING_THRESHOLD = 0.90f;
|
||||
|
||||
//
|
||||
// Check clipping, adjust DC offset, and check if should open noise gate
|
||||
//
|
||||
float measuredDcOffset = 0.0f;
|
||||
// Increment the time since the last clip
|
||||
if (_timeSinceLastClip >= 0.0f) {
|
||||
_timeSinceLastClip += (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE;
|
||||
}
|
||||
loudness += thisSample;
|
||||
// Noise Reduction: Count peaks above the average loudness
|
||||
if (_noiseGateEnabled && (thisSample > (_noiseGateMeasuredFloor * NOISE_GATE_HEIGHT))) {
|
||||
samplesOverNoiseGate++;
|
||||
}
|
||||
}
|
||||
|
||||
measuredDcOffset /= NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
if (_dcOffset == 0.0f) {
|
||||
// On first frame, copy over measured offset
|
||||
_dcOffset = measuredDcOffset;
|
||||
} else {
|
||||
_dcOffset = DC_OFFSET_AVERAGING * _dcOffset + (1.0f - DC_OFFSET_AVERAGING) * measuredDcOffset;
|
||||
}
|
||||
|
||||
// Add tone injection if enabled
|
||||
const float TONE_FREQ = 220.0f / SAMPLE_RATE * TWO_PI;
|
||||
const float QUARTER_VOLUME = 8192.0f;
|
||||
if (_toneInjectionEnabled) {
|
||||
loudness = 0.0f;
|
||||
|
||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||
monoAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
|
||||
loudness += fabsf(monoAudioSamples[i]);
|
||||
}
|
||||
}
|
||||
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// If Noise Gate is enabled, check and turn the gate on and off
|
||||
if (!_toneInjectionEnabled && _noiseGateEnabled) {
|
||||
float averageOfAllSampleFrames = 0.0f;
|
||||
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
|
||||
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
|
||||
float smallestSample = FLT_MAX;
|
||||
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
|
||||
float thisAverage = 0.0f;
|
||||
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
|
||||
thisAverage += _noiseSampleFrames[j];
|
||||
averageOfAllSampleFrames += _noiseSampleFrames[j];
|
||||
}
|
||||
thisAverage /= NOISE_GATE_FRAMES_TO_AVERAGE;
|
||||
|
||||
if (thisAverage < smallestSample) {
|
||||
smallestSample = thisAverage;
|
||||
}
|
||||
measuredDcOffset += networkAudioSamples[i];
|
||||
networkAudioSamples[i] -= (int16_t) _dcOffset;
|
||||
thisSample = fabsf(networkAudioSamples[i]);
|
||||
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
|
||||
_timeSinceLastClip = 0.0f;
|
||||
}
|
||||
loudness += thisSample;
|
||||
// Noise Reduction: Count peaks above the average loudness
|
||||
if (_noiseGateEnabled && (thisSample > (_noiseGateMeasuredFloor * NOISE_GATE_HEIGHT))) {
|
||||
samplesOverNoiseGate++;
|
||||
}
|
||||
averageOfAllSampleFrames /= NUMBER_OF_NOISE_SAMPLE_FRAMES;
|
||||
_noiseGateMeasuredFloor = smallestSample;
|
||||
_noiseGateSampleCounter = 0;
|
||||
|
||||
}
|
||||
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
|
||||
_noiseGateOpen = true;
|
||||
_noiseGateFramesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
|
||||
|
||||
measuredDcOffset /= NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
if (_dcOffset == 0.0f) {
|
||||
// On first frame, copy over measured offset
|
||||
_dcOffset = measuredDcOffset;
|
||||
} else {
|
||||
if (--_noiseGateFramesToClose == 0) {
|
||||
_noiseGateOpen = false;
|
||||
_dcOffset = DC_OFFSET_AVERAGING * _dcOffset + (1.0f - DC_OFFSET_AVERAGING) * measuredDcOffset;
|
||||
}
|
||||
|
||||
// Add tone injection if enabled
|
||||
const float TONE_FREQ = 220.0f / SAMPLE_RATE * TWO_PI;
|
||||
const float QUARTER_VOLUME = 8192.0f;
|
||||
if (_toneInjectionEnabled) {
|
||||
loudness = 0.0f;
|
||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||
networkAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
|
||||
loudness += fabsf(networkAudioSamples[i]);
|
||||
}
|
||||
}
|
||||
if (!_noiseGateOpen) {
|
||||
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
_lastInputLoudness = 0;
|
||||
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// If Noise Gate is enabled, check and turn the gate on and off
|
||||
if (!_toneInjectionEnabled && _noiseGateEnabled) {
|
||||
float averageOfAllSampleFrames = 0.0f;
|
||||
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
|
||||
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
|
||||
float smallestSample = FLT_MAX;
|
||||
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
|
||||
float thisAverage = 0.0f;
|
||||
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
|
||||
thisAverage += _noiseSampleFrames[j];
|
||||
averageOfAllSampleFrames += _noiseSampleFrames[j];
|
||||
}
|
||||
thisAverage /= NOISE_GATE_FRAMES_TO_AVERAGE;
|
||||
|
||||
if (thisAverage < smallestSample) {
|
||||
smallestSample = thisAverage;
|
||||
}
|
||||
}
|
||||
averageOfAllSampleFrames /= NUMBER_OF_NOISE_SAMPLE_FRAMES;
|
||||
_noiseGateMeasuredFloor = smallestSample;
|
||||
_noiseGateSampleCounter = 0;
|
||||
|
||||
}
|
||||
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
|
||||
_noiseGateOpen = true;
|
||||
_noiseGateFramesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
|
||||
} else {
|
||||
if (--_noiseGateFramesToClose == 0) {
|
||||
_noiseGateOpen = false;
|
||||
}
|
||||
}
|
||||
if (!_noiseGateOpen) {
|
||||
memset(networkAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
_lastInputLoudness = 0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
float loudness = 0.0f;
|
||||
|
||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; i++) {
|
||||
loudness += fabsf(networkAudioSamples[i]);
|
||||
}
|
||||
|
||||
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
}
|
||||
} else {
|
||||
// our input loudness is 0, since we're muted
|
||||
|
@ -580,19 +601,19 @@ void Audio::handleAudioInput() {
|
|||
|
||||
// at this point we have clean monoAudioSamples, which match our target output...
|
||||
// this is what we should send to our interested listeners
|
||||
if (_processSpatialAudio && !_muted && _audioOutput) {
|
||||
QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
||||
if (_processSpatialAudio && !_muted && !_isStereoInput && _audioOutput) {
|
||||
QByteArray monoInputData((char*)networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
||||
emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat);
|
||||
}
|
||||
|
||||
if (_proceduralAudioOutput) {
|
||||
processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
if (!_isStereoInput && _proceduralAudioOutput) {
|
||||
processProceduralAudio(networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
}
|
||||
|
||||
if (_scopeEnabled && !_scopeEnabledPause) {
|
||||
if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
|
||||
unsigned int numMonoAudioChannels = 1;
|
||||
unsigned int monoAudioChannel = 0;
|
||||
addBufferToScope(_scopeInput, _scopeInputOffset, monoAudioSamples, monoAudioChannel, numMonoAudioChannels);
|
||||
addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, monoAudioChannel, numMonoAudioChannels);
|
||||
_scopeInputOffset += NETWORK_SAMPLES_PER_FRAME;
|
||||
_scopeInputOffset %= _samplesPerScope;
|
||||
}
|
||||
|
@ -603,10 +624,8 @@ void Audio::handleAudioInput() {
|
|||
if (audioMixer && audioMixer->getActiveSocket()) {
|
||||
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
|
||||
glm::vec3 headPosition = interfaceAvatar->getHead()->getPosition();
|
||||
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientation();
|
||||
|
||||
// we need the amount of bytes in the buffer + 1 for type
|
||||
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
||||
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
|
||||
quint8 isStereo = _isStereoInput ? 1 : 0;
|
||||
|
||||
int numAudioBytes = 0;
|
||||
|
||||
|
@ -615,11 +634,12 @@ void Audio::handleAudioInput() {
|
|||
packetType = PacketTypeSilentAudioFrame;
|
||||
|
||||
// we need to indicate how many silent samples this is to the audio mixer
|
||||
monoAudioSamples[0] = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
audioDataPacket[0] = _isStereoInput
|
||||
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
|
||||
: NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
numAudioBytes = sizeof(int16_t);
|
||||
|
||||
} else {
|
||||
numAudioBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
|
||||
numAudioBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
|
||||
packetType = PacketTypeMicrophoneAudioWithEcho;
|
||||
|
@ -628,7 +648,10 @@ void Audio::handleAudioInput() {
|
|||
}
|
||||
}
|
||||
|
||||
char* currentPacketPtr = monoAudioDataPacket + populatePacketHeader(monoAudioDataPacket, packetType);
|
||||
char* currentPacketPtr = audioDataPacket + populatePacketHeader(audioDataPacket, packetType);
|
||||
|
||||
// set the mono/stereo byte
|
||||
*currentPacketPtr++ = isStereo;
|
||||
|
||||
// memcpy the three float positions
|
||||
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
|
||||
|
@ -638,7 +661,7 @@ void Audio::handleAudioInput() {
|
|||
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
||||
currentPacketPtr += sizeof(headOrientation);
|
||||
|
||||
nodeList->writeDatagram(monoAudioDataPacket, numAudioBytes + leadingBytes, audioMixer);
|
||||
nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
|
||||
|
||||
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
|
||||
.updateValue(numAudioBytes + leadingBytes);
|
||||
|
@ -761,6 +784,24 @@ void Audio::toggleAudioNoiseReduction() {
|
|||
_noiseGateEnabled = !_noiseGateEnabled;
|
||||
}
|
||||
|
||||
void Audio::toggleStereoInput() {
|
||||
int oldChannelCount = _desiredInputFormat.channelCount();
|
||||
QAction* stereoAudioOption = Menu::getInstance()->getActionForOption(MenuOption::StereoAudio);
|
||||
|
||||
if (stereoAudioOption->isChecked()) {
|
||||
_desiredInputFormat.setChannelCount(2);
|
||||
_isStereoInput = true;
|
||||
} else {
|
||||
_desiredInputFormat.setChannelCount(1);
|
||||
_isStereoInput = false;
|
||||
}
|
||||
|
||||
if (oldChannelCount != _desiredInputFormat.channelCount()) {
|
||||
// change in channel count for desired input format, restart the input device
|
||||
switchInputToAudioDevice(_inputAudioDeviceName);
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||
_ringBuffer.parseData(audioByteArray);
|
||||
|
||||
|
@ -1300,18 +1341,21 @@ bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
|
|||
|
||||
if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) {
|
||||
qDebug() << "The format to be used for audio input is" << _inputFormat;
|
||||
|
||||
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
|
||||
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
|
||||
_audioInput->setBufferSize(_numInputCallbackBytes);
|
||||
|
||||
// how do we want to handle input working, but output not working?
|
||||
int numFrameSamples = calculateNumberOfFrameSamples(_numInputCallbackBytes);
|
||||
_inputRingBuffer.resizeForFrameSize(numFrameSamples);
|
||||
_inputDevice = _audioInput->start();
|
||||
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
|
||||
|
||||
supportedFormat = true;
|
||||
|
||||
// if the user wants stereo but this device can't provide then bail
|
||||
if (!_isStereoInput || _inputFormat.channelCount() == 2) {
|
||||
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
|
||||
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
|
||||
_audioInput->setBufferSize(_numInputCallbackBytes);
|
||||
|
||||
// how do we want to handle input working, but output not working?
|
||||
int numFrameSamples = calculateNumberOfFrameSamples(_numInputCallbackBytes);
|
||||
_inputRingBuffer.resizeForFrameSize(numFrameSamples);
|
||||
_inputDevice = _audioInput->start();
|
||||
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
|
||||
|
||||
supportedFormat = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return supportedFormat;
|
||||
|
|
|
@ -85,6 +85,7 @@ public slots:
|
|||
void toggleScope();
|
||||
void toggleScopePause();
|
||||
void toggleAudioSpatialProcessing();
|
||||
void toggleStereoInput();
|
||||
void selectAudioScopeFiveFrames();
|
||||
void selectAudioScopeTwentyFrames();
|
||||
void selectAudioScopeFiftyFrames();
|
||||
|
@ -127,6 +128,7 @@ private:
|
|||
QIODevice* _proceduralOutputDevice;
|
||||
AudioRingBuffer _inputRingBuffer;
|
||||
AudioRingBuffer _ringBuffer;
|
||||
bool _isStereoInput;
|
||||
|
||||
QString _inputAudioDeviceName;
|
||||
QString _outputAudioDeviceName;
|
||||
|
|
|
@ -459,7 +459,7 @@ void AudioReflector::calculateAllReflections() {
|
|||
// only recalculate when we've moved, or if the attributes have changed
|
||||
// TODO: what about case where new voxels are added in front of us???
|
||||
bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
|
||||
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation();
|
||||
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientationInWorldFrame() : _myAvatar->getOrientation();
|
||||
glm::vec3 origin = _myAvatar->getHead()->getPosition();
|
||||
glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition();
|
||||
|
||||
|
|
|
@ -348,7 +348,6 @@ Menu::Menu() :
|
|||
|
||||
QMenu* avatarOptionsMenu = developerMenu->addMenu("Avatar Options");
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::AllowOculusCameraModeChange, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::Avatars, 0, true);
|
||||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::AvatarsReceiveShadows, 0, true);
|
||||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::RenderSkeletonCollisionShapes);
|
||||
|
@ -375,6 +374,10 @@ Menu::Menu() :
|
|||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::GlowWhenSpeaking, 0, true);
|
||||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::ChatCircling, 0, false);
|
||||
|
||||
QMenu* oculusOptionsMenu = developerMenu->addMenu("Oculus Options");
|
||||
addCheckableActionToQMenuAndActionHash(oculusOptionsMenu, MenuOption::AllowOculusCameraModeChange, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(oculusOptionsMenu, MenuOption::DisplayOculusOverlays, 0, true);
|
||||
|
||||
QMenu* handOptionsMenu = developerMenu->addMenu("Hand Options");
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(handOptionsMenu,
|
||||
|
@ -429,6 +432,8 @@ Menu::Menu() :
|
|||
SLOT(toggleAudioNoiseReduction()));
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio);
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoLocalAudio);
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::StereoAudio, 0, false,
|
||||
appInstance->getAudio(), SLOT(toggleStereoInput()));
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::MuteAudio,
|
||||
Qt::CTRL | Qt::Key_M,
|
||||
false,
|
||||
|
|
|
@ -326,6 +326,7 @@ namespace MenuOption {
|
|||
const QString DisplayModelBounds = "Display Model Bounds";
|
||||
const QString DisplayModelElementProxy = "Display Model Element Bounds";
|
||||
const QString DisplayModelElementChildProxies = "Display Model Element Children";
|
||||
const QString DisplayOculusOverlays = "Display Oculus Overlays";
|
||||
const QString DisplayTimingDetails = "Display Timing Details";
|
||||
const QString DontFadeOnVoxelServerChanges = "Don't Fade In/Out on Voxel Server Changes";
|
||||
const QString EchoLocalAudio = "Echo Local Audio";
|
||||
|
@ -401,6 +402,7 @@ namespace MenuOption {
|
|||
const QString StandOnNearbyFloors = "Stand on nearby floors";
|
||||
const QString Stars = "Stars";
|
||||
const QString Stats = "Stats";
|
||||
const QString StereoAudio = "Stereo Audio";
|
||||
const QString StopAllScripts = "Stop All Scripts";
|
||||
const QString SuppressShortTimings = "Suppress Timings Less than 10ms";
|
||||
const QString TestPing = "Test Ping";
|
||||
|
|
|
@ -400,9 +400,9 @@ bool closeEnoughForGovernmentWork(float a, float b) {
|
|||
void runTimingTests() {
|
||||
// How long does it take to make a call to get the time?
|
||||
const int numTests = 1000000;
|
||||
int iResults[numTests];
|
||||
int* iResults = (int*)malloc(sizeof(int) * numTests);
|
||||
float fTest = 1.0;
|
||||
float fResults[numTests];
|
||||
float* fResults = (float*)malloc(sizeof(float) * numTests);
|
||||
QElapsedTimer startTime;
|
||||
startTime.start();
|
||||
float elapsedUsecs;
|
||||
|
@ -413,7 +413,7 @@ void runTimingTests() {
|
|||
|
||||
// Random number generation
|
||||
startTime.start();
|
||||
for (int i = 1; i < numTests; i++) {
|
||||
for (int i = 0; i < numTests; i++) {
|
||||
iResults[i] = rand();
|
||||
}
|
||||
elapsedUsecs = (float)startTime.nsecsElapsed() * NSEC_TO_USEC;
|
||||
|
@ -421,16 +421,19 @@ void runTimingTests() {
|
|||
|
||||
// Random number generation using randFloat()
|
||||
startTime.start();
|
||||
for (int i = 1; i < numTests; i++) {
|
||||
for (int i = 0; i < numTests; i++) {
|
||||
fResults[i] = randFloat();
|
||||
}
|
||||
elapsedUsecs = (float)startTime.nsecsElapsed() * NSEC_TO_USEC;
|
||||
qDebug("randFloat() stored in array usecs: %f, first result: %f", elapsedUsecs / (float) numTests, fResults[0]);
|
||||
|
||||
free(iResults);
|
||||
free(fResults);
|
||||
|
||||
// PowF function
|
||||
fTest = 1145323.2342f;
|
||||
startTime.start();
|
||||
for (int i = 1; i < numTests; i++) {
|
||||
for (int i = 0; i < numTests; i++) {
|
||||
fTest = powf(fTest, 0.5f);
|
||||
}
|
||||
elapsedUsecs = (float)startTime.nsecsElapsed() * NSEC_TO_USEC;
|
||||
|
@ -440,7 +443,7 @@ void runTimingTests() {
|
|||
float distance;
|
||||
glm::vec3 pointA(randVector()), pointB(randVector());
|
||||
startTime.start();
|
||||
for (int i = 1; i < numTests; i++) {
|
||||
for (int i = 0; i < numTests; i++) {
|
||||
//glm::vec3 temp = pointA - pointB;
|
||||
//float distanceSquared = glm::dot(temp, temp);
|
||||
distance = glm::distance(pointA, pointB);
|
||||
|
@ -454,7 +457,7 @@ void runTimingTests() {
|
|||
float result;
|
||||
|
||||
startTime.start();
|
||||
for (int i = 1; i < numTests; i++) {
|
||||
for (int i = 0; i < numTests; i++) {
|
||||
glm::vec3 temp = vecA-vecB;
|
||||
result = glm::dot(temp,temp);
|
||||
}
|
||||
|
|
|
@ -377,7 +377,7 @@ void Avatar::simulateAttachments(float deltaTime) {
|
|||
if (!isMyAvatar()) {
|
||||
model->setLODDistance(getLODDistance());
|
||||
}
|
||||
if (_skeletonModel.getJointPosition(jointIndex, jointPosition) &&
|
||||
if (_skeletonModel.getJointPositionInWorldFrame(jointIndex, jointPosition) &&
|
||||
_skeletonModel.getJointCombinedRotation(jointIndex, jointRotation)) {
|
||||
model->setTranslation(jointPosition + jointRotation * attachment.translation * _scale);
|
||||
model->setRotation(jointRotation * attachment.rotation);
|
||||
|
@ -713,7 +713,7 @@ glm::vec3 Avatar::getJointPosition(int index) const {
|
|||
return position;
|
||||
}
|
||||
glm::vec3 position;
|
||||
_skeletonModel.getJointPosition(index, position);
|
||||
_skeletonModel.getJointPositionInWorldFrame(index, position);
|
||||
return position;
|
||||
}
|
||||
|
||||
|
@ -725,7 +725,7 @@ glm::vec3 Avatar::getJointPosition(const QString& name) const {
|
|||
return position;
|
||||
}
|
||||
glm::vec3 position;
|
||||
_skeletonModel.getJointPosition(getJointIndex(name), position);
|
||||
_skeletonModel.getJointPositionInWorldFrame(getJointIndex(name), position);
|
||||
return position;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,10 +48,10 @@ void FaceModel::simulate(float deltaTime, bool fullUpdate) {
|
|||
|
||||
void FaceModel::maybeUpdateNeckRotation(const JointState& parentState, const FBXJoint& joint, JointState& state) {
|
||||
// get the rotation axes in joint space and use them to adjust the rotation
|
||||
glm::mat3 axes = glm::mat3_cast(_rotation);
|
||||
glm::mat3 inverse = glm::mat3(glm::inverse(parentState._transform * glm::translate(state.getDefaultTranslationInParentFrame()) *
|
||||
glm::mat3 axes = glm::mat3_cast(glm::quat());
|
||||
glm::mat3 inverse = glm::mat3(glm::inverse(parentState.getTransform() * glm::translate(state.getDefaultTranslationInParentFrame()) *
|
||||
joint.preTransform * glm::mat4_cast(joint.preRotation)));
|
||||
state._rotation = glm::angleAxis(- RADIANS_PER_DEGREE * _owningHead->getFinalRoll(), glm::normalize(inverse * axes[2]))
|
||||
state._rotationInParentFrame = glm::angleAxis(- RADIANS_PER_DEGREE * _owningHead->getFinalRoll(), glm::normalize(inverse * axes[2]))
|
||||
* glm::angleAxis(RADIANS_PER_DEGREE * _owningHead->getFinalYaw(), glm::normalize(inverse * axes[1]))
|
||||
* glm::angleAxis(- RADIANS_PER_DEGREE * _owningHead->getFinalPitch(), glm::normalize(inverse * axes[0]))
|
||||
* joint.rotation;
|
||||
|
@ -59,14 +59,16 @@ void FaceModel::maybeUpdateNeckRotation(const JointState& parentState, const FBX
|
|||
|
||||
void FaceModel::maybeUpdateEyeRotation(const JointState& parentState, const FBXJoint& joint, JointState& state) {
|
||||
// likewise with the eye joints
|
||||
glm::mat4 inverse = glm::inverse(parentState._transform * glm::translate(state.getDefaultTranslationInParentFrame()) *
|
||||
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation));
|
||||
glm::vec3 front = glm::vec3(inverse * glm::vec4(_owningHead->getFinalOrientation() * IDENTITY_FRONT, 0.0f));
|
||||
// NOTE: at the moment we do the math in the world-frame, hence the inverse transform is more complex than usual.
|
||||
glm::mat4 inverse = glm::inverse(glm::mat4_cast(_rotation) * parentState.getTransform() *
|
||||
glm::translate(state.getDefaultTranslationInParentFrame()) *
|
||||
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation));
|
||||
glm::vec3 front = glm::vec3(inverse * glm::vec4(_owningHead->getFinalOrientationInWorldFrame() * IDENTITY_FRONT, 0.0f));
|
||||
glm::vec3 lookAt = glm::vec3(inverse * glm::vec4(_owningHead->getLookAtPosition() +
|
||||
_owningHead->getSaccade() - _translation, 1.0f));
|
||||
glm::quat between = rotationBetween(front, lookAt);
|
||||
const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE;
|
||||
state._rotation = glm::angleAxis(glm::clamp(glm::angle(between), -MAX_ANGLE, MAX_ANGLE), glm::axis(between)) *
|
||||
state._rotationInParentFrame = glm::angleAxis(glm::clamp(glm::angle(between), -MAX_ANGLE, MAX_ANGLE), glm::axis(between)) *
|
||||
joint.rotation;
|
||||
}
|
||||
|
||||
|
@ -92,6 +94,6 @@ bool FaceModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& secondEy
|
|||
return false;
|
||||
}
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
return getJointPosition(geometry.leftEyeJointIndex, firstEyePosition) &&
|
||||
getJointPosition(geometry.rightEyeJointIndex, secondEyePosition);
|
||||
return getJointPositionInWorldFrame(geometry.leftEyeJointIndex, firstEyePosition) &&
|
||||
getJointPositionInWorldFrame(geometry.rightEyeJointIndex, secondEyePosition);
|
||||
}
|
||||
|
|
|
@ -188,9 +188,12 @@ void Head::setScale (float scale) {
|
|||
_scale = scale;
|
||||
}
|
||||
|
||||
glm::quat Head::getFinalOrientation() const {
|
||||
return _owningAvatar->getOrientation() * glm::quat(glm::radians(
|
||||
glm::vec3(getFinalPitch(), getFinalYaw(), getFinalRoll() )));
|
||||
glm::quat Head::getFinalOrientationInWorldFrame() const {
|
||||
return _owningAvatar->getOrientation() * getFinalOrientationInLocalFrame();
|
||||
}
|
||||
|
||||
glm::quat Head::getFinalOrientationInLocalFrame() const {
|
||||
return glm::quat(glm::radians(glm::vec3(getFinalPitch(), getFinalYaw(), getFinalRoll() )));
|
||||
}
|
||||
|
||||
glm::quat Head::getCameraOrientation () const {
|
||||
|
|
|
@ -50,9 +50,13 @@ public:
|
|||
void setRenderLookatVectors(bool onOff) { _renderLookatVectors = onOff; }
|
||||
void setLeanSideways(float leanSideways) { _leanSideways = leanSideways; }
|
||||
void setLeanForward(float leanForward) { _leanForward = leanForward; }
|
||||
|
||||
/// \return orientationBase+Delta
|
||||
glm::quat getFinalOrientationInLocalFrame() const;
|
||||
|
||||
/// \return orientationBody * orientationBase+Delta
|
||||
glm::quat getFinalOrientation() const;
|
||||
/// \return orientationBody * (orientationBase+Delta)
|
||||
glm::quat getFinalOrientationInWorldFrame() const;
|
||||
|
||||
|
||||
/// \return orientationBody * orientationBasePitch
|
||||
glm::quat getCameraOrientation () const;
|
||||
|
|
|
@ -236,7 +236,7 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
|
|||
estimatedRotation = glm::degrees(safeEulerAngles(tracker->getHeadRotation()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Rotate the body if the head is turned beyond the screen
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::TurnWithHead)) {
|
||||
const float TRACKER_YAW_TURN_SENSITIVITY = 0.5f;
|
||||
|
@ -433,11 +433,11 @@ void MyAvatar::removeAnimationHandle(const AnimationHandlePointer& handle) {
|
|||
}
|
||||
|
||||
void MyAvatar::startAnimation(const QString& url, float fps, float priority,
|
||||
bool loop, bool hold, int firstFrame, int lastFrame, const QStringList& maskedJoints) {
|
||||
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) {
|
||||
if (QThread::currentThread() != thread()) {
|
||||
QMetaObject::invokeMethod(this, "startAnimation", Q_ARG(const QString&, url), Q_ARG(float, fps),
|
||||
Q_ARG(float, priority), Q_ARG(bool, loop), Q_ARG(bool, hold), Q_ARG(int, firstFrame),
|
||||
Q_ARG(int, lastFrame), Q_ARG(const QStringList&, maskedJoints));
|
||||
Q_ARG(float, priority), Q_ARG(bool, loop), Q_ARG(bool, hold), Q_ARG(float, firstFrame),
|
||||
Q_ARG(float, lastFrame), Q_ARG(const QStringList&, maskedJoints));
|
||||
return;
|
||||
}
|
||||
AnimationHandlePointer handle = _skeletonModel.createAnimationHandle();
|
||||
|
@ -453,11 +453,11 @@ void MyAvatar::startAnimation(const QString& url, float fps, float priority,
|
|||
}
|
||||
|
||||
void MyAvatar::startAnimationByRole(const QString& role, const QString& url, float fps, float priority,
|
||||
bool loop, bool hold, int firstFrame, int lastFrame, const QStringList& maskedJoints) {
|
||||
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) {
|
||||
if (QThread::currentThread() != thread()) {
|
||||
QMetaObject::invokeMethod(this, "startAnimationByRole", Q_ARG(const QString&, role), Q_ARG(const QString&, url),
|
||||
Q_ARG(float, fps), Q_ARG(float, priority), Q_ARG(bool, loop), Q_ARG(bool, hold), Q_ARG(int, firstFrame),
|
||||
Q_ARG(int, lastFrame), Q_ARG(const QStringList&, maskedJoints));
|
||||
Q_ARG(float, fps), Q_ARG(float, priority), Q_ARG(bool, loop), Q_ARG(bool, hold), Q_ARG(float, firstFrame),
|
||||
Q_ARG(float, lastFrame), Q_ARG(const QStringList&, maskedJoints));
|
||||
return;
|
||||
}
|
||||
// check for a configured animation for the role
|
||||
|
@ -627,8 +627,8 @@ void MyAvatar::loadData(QSettings* settings) {
|
|||
handle->setLoop(settings->value("loop", true).toBool());
|
||||
handle->setHold(settings->value("hold", false).toBool());
|
||||
handle->setStartAutomatically(settings->value("startAutomatically", true).toBool());
|
||||
handle->setFirstFrame(settings->value("firstFrame", 0).toInt());
|
||||
handle->setLastFrame(settings->value("lastFrame", INT_MAX).toInt());
|
||||
handle->setFirstFrame(settings->value("firstFrame", 0.0f).toFloat());
|
||||
handle->setLastFrame(settings->value("lastFrame", INT_MAX).toFloat());
|
||||
handle->setMaskedJoints(settings->value("maskedJoints").toStringList());
|
||||
}
|
||||
settings->endArray();
|
||||
|
@ -724,7 +724,8 @@ void MyAvatar::updateLookAtTargetAvatar() {
|
|||
Avatar* avatar = static_cast<Avatar*>(avatarPointer.data());
|
||||
avatar->setIsLookAtTarget(false);
|
||||
if (!avatar->isMyAvatar()) {
|
||||
float angleTo = glm::angle(getHead()->getFinalOrientation() * glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::vec3 DEFAULT_GAZE_IN_HEAD_FRAME = glm::vec3(0.0f, 0.0f, -1.0f);
|
||||
float angleTo = glm::angle(getHead()->getFinalOrientationInWorldFrame() * DEFAULT_GAZE_IN_HEAD_FRAME,
|
||||
glm::normalize(avatar->getHead()->getEyePosition() - getHead()->getEyePosition()));
|
||||
if (angleTo < smallestAngleTo) {
|
||||
_lookAtTargetAvatar = avatarPointer;
|
||||
|
|
|
@ -68,7 +68,7 @@ public:
|
|||
|
||||
/// Allows scripts to run animations.
|
||||
Q_INVOKABLE void startAnimation(const QString& url, float fps = 30.0f, float priority = 1.0f, bool loop = false,
|
||||
bool hold = false, int firstFrame = 0, int lastFrame = INT_MAX, const QStringList& maskedJoints = QStringList());
|
||||
bool hold = false, float firstFrame = 0.0f, float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
|
||||
|
||||
/// Stops an animation as identified by a URL.
|
||||
Q_INVOKABLE void stopAnimation(const QString& url);
|
||||
|
@ -76,8 +76,8 @@ public:
|
|||
/// Starts an animation by its role, using the provided URL and parameters if the avatar doesn't have a custom
|
||||
/// animation for the role.
|
||||
Q_INVOKABLE void startAnimationByRole(const QString& role, const QString& url = QString(), float fps = 30.0f,
|
||||
float priority = 1.0f, bool loop = false, bool hold = false, int firstFrame = 0,
|
||||
int lastFrame = INT_MAX, const QStringList& maskedJoints = QStringList());
|
||||
float priority = 1.0f, bool loop = false, bool hold = false, float firstFrame = 0.0f,
|
||||
float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
|
||||
|
||||
/// Stops an animation identified by its role.
|
||||
Q_INVOKABLE void stopAnimationByRole(const QString& role);
|
||||
|
|
|
@ -21,6 +21,11 @@ SkeletonModel::SkeletonModel(Avatar* owningAvatar) :
|
|||
_owningAvatar(owningAvatar) {
|
||||
}
|
||||
|
||||
void SkeletonModel::setJointStates(QVector<JointState> states) {
|
||||
Model::setJointStates(states);
|
||||
_ragDoll.init(_jointStates);
|
||||
}
|
||||
|
||||
const float PALM_PRIORITY = 3.0f;
|
||||
|
||||
void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
|
||||
|
@ -46,7 +51,7 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
|
|||
int jointIndex = geometry.humanIKJointIndices.at(humanIKJointIndex);
|
||||
if (jointIndex != -1) {
|
||||
JointState& state = _jointStates[jointIndex];
|
||||
state.setRotation(_rotation * prioVR->getJointRotations().at(i), PALM_PRIORITY);
|
||||
state.setRotationFromBindFrame(prioVR->getJointRotations().at(i), PALM_PRIORITY);
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
@ -63,7 +68,9 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
|
|||
if (_owningAvatar->getHandState() == HAND_STATE_NULL) {
|
||||
restoreRightHandPosition(HAND_RESTORATION_RATE, PALM_PRIORITY);
|
||||
} else {
|
||||
applyHandPosition(geometry.rightHandJointIndex, _owningAvatar->getHandPosition());
|
||||
// transform into model-frame
|
||||
glm::vec3 handPosition = glm::inverse(_rotation) * (_owningAvatar->getHandPosition() - _translation);
|
||||
applyHandPosition(geometry.rightHandJointIndex, handPosition);
|
||||
}
|
||||
restoreLeftHandPosition(HAND_RESTORATION_RATE, PALM_PRIORITY);
|
||||
|
||||
|
@ -76,6 +83,21 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
|
|||
applyPalmData(geometry.leftHandJointIndex, hand->getPalms()[leftPalmIndex]);
|
||||
applyPalmData(geometry.rightHandJointIndex, hand->getPalms()[rightPalmIndex]);
|
||||
}
|
||||
|
||||
simulateRagDoll(deltaTime);
|
||||
}
|
||||
|
||||
void SkeletonModel::simulateRagDoll(float deltaTime) {
|
||||
_ragDoll.slaveToSkeleton(_jointStates, 0.5f);
|
||||
|
||||
float MIN_CONSTRAINT_ERROR = 0.005f; // 5mm
|
||||
int MAX_ITERATIONS = 4;
|
||||
int iterations = 0;
|
||||
float delta = 0.0f;
|
||||
do {
|
||||
delta = _ragDoll.enforceConstraints();
|
||||
++iterations;
|
||||
} while (delta > MIN_CONSTRAINT_ERROR && iterations < MAX_ITERATIONS);
|
||||
}
|
||||
|
||||
void SkeletonModel::getHandShapes(int jointIndex, QVector<const Shape*>& shapes) const {
|
||||
|
@ -119,6 +141,7 @@ void SkeletonModel::getBodyShapes(QVector<const Shape*>& shapes) const {
|
|||
void SkeletonModel::renderIKConstraints() {
|
||||
renderJointConstraints(getRightHandJointIndex());
|
||||
renderJointConstraints(getLeftHandJointIndex());
|
||||
renderRagDoll();
|
||||
}
|
||||
|
||||
class IndexValue {
|
||||
|
@ -135,6 +158,7 @@ void SkeletonModel::applyHandPosition(int jointIndex, const glm::vec3& position)
|
|||
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
|
||||
return;
|
||||
}
|
||||
// NOTE: 'position' is in model-frame
|
||||
setJointPosition(jointIndex, position, glm::quat(), false, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
|
||||
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
|
@ -147,7 +171,7 @@ void SkeletonModel::applyHandPosition(int jointIndex, const glm::vec3& position)
|
|||
return;
|
||||
}
|
||||
JointState& state = _jointStates[jointIndex];
|
||||
glm::quat handRotation = state.getJointRotation(true);
|
||||
glm::quat handRotation = state.getRotation();
|
||||
|
||||
// align hand with forearm
|
||||
float sign = (jointIndex == geometry.rightHandJointIndex) ? 1.0f : -1.0f;
|
||||
|
@ -167,36 +191,41 @@ void SkeletonModel::applyPalmData(int jointIndex, PalmData& palm) {
|
|||
|
||||
// rotate palm to align with its normal (normal points out of hand's palm)
|
||||
glm::quat palmRotation;
|
||||
glm::quat r0, r1;
|
||||
if (!Menu::getInstance()->isOptionChecked(MenuOption::AlternateIK) &&
|
||||
Menu::getInstance()->isOptionChecked(MenuOption::AlignForearmsWithWrists)) {
|
||||
JointState parentState = _jointStates[parentJointIndex];
|
||||
palmRotation = parentState.getJointRotation(true);
|
||||
palmRotation = parentState.getRotationFromBindToModelFrame();
|
||||
r0 = palmRotation;
|
||||
} else {
|
||||
JointState state = _jointStates[jointIndex];
|
||||
palmRotation = state.getJointRotation(true);
|
||||
palmRotation = state.getRotationFromBindToModelFrame();
|
||||
}
|
||||
palmRotation = rotationBetween(palmRotation * geometry.palmDirection, palm.getNormal()) * palmRotation;
|
||||
glm::quat inverseRotation = glm::inverse(_rotation);
|
||||
glm::vec3 palmNormal = inverseRotation * palm.getNormal();
|
||||
palmRotation = rotationBetween(palmRotation * geometry.palmDirection, palmNormal) * palmRotation;
|
||||
r1 = palmRotation;
|
||||
|
||||
// rotate palm to align with finger direction
|
||||
glm::vec3 direction = palm.getFingerDirection();
|
||||
glm::vec3 direction = inverseRotation * palm.getFingerDirection();
|
||||
palmRotation = rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), direction) * palmRotation;
|
||||
|
||||
// set hand position, rotation
|
||||
glm::vec3 palmPosition = inverseRotation * (palm.getPosition() - _translation);
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AlternateIK)) {
|
||||
setHandPosition(jointIndex, palm.getPosition(), palmRotation);
|
||||
setHandPosition(jointIndex, palmPosition, palmRotation);
|
||||
|
||||
} else if (Menu::getInstance()->isOptionChecked(MenuOption::AlignForearmsWithWrists)) {
|
||||
glm::vec3 forearmVector = palmRotation * glm::vec3(sign, 0.0f, 0.0f);
|
||||
setJointPosition(parentJointIndex, palm.getPosition() + forearmVector *
|
||||
setJointPosition(parentJointIndex, palmPosition + forearmVector *
|
||||
geometry.joints.at(jointIndex).distanceToParent * extractUniformScale(_scale),
|
||||
glm::quat(), false, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
|
||||
JointState& parentState = _jointStates[parentJointIndex];
|
||||
parentState.setRotation(palmRotation, PALM_PRIORITY);
|
||||
// slam parent-relative rotation to identity
|
||||
_jointStates[jointIndex]._rotation = glm::quat();
|
||||
|
||||
parentState.setRotationFromBindFrame(palmRotation, PALM_PRIORITY);
|
||||
// lock hand to forearm by slamming its rotation (in parent-frame) to identity
|
||||
_jointStates[jointIndex]._rotationInParentFrame = glm::quat();
|
||||
} else {
|
||||
setJointPosition(jointIndex, palm.getPosition(), palmRotation,
|
||||
setJointPosition(jointIndex, palmPosition, palmRotation,
|
||||
true, -1, false, glm::vec3(0.0f, -1.0f, 0.0f), PALM_PRIORITY);
|
||||
}
|
||||
}
|
||||
|
@ -221,9 +250,7 @@ void SkeletonModel::updateJointState(int index) {
|
|||
Model::updateJointState(index);
|
||||
|
||||
if (index == _geometry->getFBXGeometry().rootJointIndex) {
|
||||
state._transform[3][0] = 0.0f;
|
||||
state._transform[3][1] = 0.0f;
|
||||
state._transform[3][2] = 0.0f;
|
||||
state.clearTransformTranslation();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,10 +259,10 @@ void SkeletonModel::maybeUpdateLeanRotation(const JointState& parentState, const
|
|||
return;
|
||||
}
|
||||
// get the rotation axes in joint space and use them to adjust the rotation
|
||||
glm::mat3 axes = glm::mat3_cast(_rotation);
|
||||
glm::mat3 inverse = glm::mat3(glm::inverse(parentState._transform * glm::translate(state.getDefaultTranslationInParentFrame()) *
|
||||
glm::mat3 axes = glm::mat3_cast(glm::quat());
|
||||
glm::mat3 inverse = glm::mat3(glm::inverse(parentState.getTransform() * glm::translate(state.getDefaultTranslationInParentFrame()) *
|
||||
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation)));
|
||||
state._rotation = glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanSideways(),
|
||||
state._rotationInParentFrame = glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanSideways(),
|
||||
glm::normalize(inverse * axes[2])) * glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanForward(),
|
||||
glm::normalize(inverse * axes[0])) * joint.rotation;
|
||||
}
|
||||
|
@ -259,11 +286,11 @@ void SkeletonModel::renderJointConstraints(int jointIndex) {
|
|||
do {
|
||||
const FBXJoint& joint = geometry.joints.at(jointIndex);
|
||||
const JointState& jointState = _jointStates.at(jointIndex);
|
||||
glm::vec3 position = extractTranslation(jointState._transform) + _translation;
|
||||
glm::vec3 position = _rotation * jointState.getPosition() + _translation;
|
||||
|
||||
glPushMatrix();
|
||||
glTranslatef(position.x, position.y, position.z);
|
||||
glm::quat parentRotation = (joint.parentIndex == -1) ? _rotation : _jointStates.at(joint.parentIndex)._combinedRotation;
|
||||
glm::quat parentRotation = (joint.parentIndex == -1) ? _rotation : _rotation * _jointStates.at(joint.parentIndex).getRotation();
|
||||
glm::vec3 rotationAxis = glm::axis(parentRotation);
|
||||
glRotatef(glm::degrees(glm::angle(parentRotation)), rotationAxis.x, rotationAxis.y, rotationAxis.z);
|
||||
float fanScale = directionSize * 0.75f;
|
||||
|
@ -296,7 +323,7 @@ void SkeletonModel::renderJointConstraints(int jointIndex) {
|
|||
}
|
||||
glPopMatrix();
|
||||
|
||||
renderOrientationDirections(position, jointState._combinedRotation, directionSize);
|
||||
renderOrientationDirections(position, _rotation * jointState.getRotation(), directionSize);
|
||||
jointIndex = joint.parentIndex;
|
||||
|
||||
} while (jointIndex != -1 && geometry.joints.at(jointIndex).isFree);
|
||||
|
@ -359,21 +386,21 @@ void SkeletonModel::setHandPosition(int jointIndex, const glm::vec3& position, c
|
|||
glm::quat shoulderRotation = rotationBetween(forwardVector, elbowPosition - shoulderPosition);
|
||||
|
||||
JointState& shoulderState = _jointStates[shoulderJointIndex];
|
||||
shoulderState.setRotation(shoulderRotation, PALM_PRIORITY);
|
||||
shoulderState.setRotationFromBindFrame(shoulderRotation, PALM_PRIORITY);
|
||||
|
||||
JointState& elbowState = _jointStates[elbowJointIndex];
|
||||
elbowState.setRotation(rotationBetween(shoulderRotation * forwardVector, wristPosition - elbowPosition) * shoulderRotation, PALM_PRIORITY);
|
||||
elbowState.setRotationFromBindFrame(rotationBetween(shoulderRotation * forwardVector, wristPosition - elbowPosition) * shoulderRotation, PALM_PRIORITY);
|
||||
|
||||
JointState& handState = _jointStates[jointIndex];
|
||||
handState.setRotation(rotation, PALM_PRIORITY);
|
||||
handState.setRotationFromBindFrame(rotation, PALM_PRIORITY);
|
||||
}
|
||||
|
||||
bool SkeletonModel::getLeftHandPosition(glm::vec3& position) const {
|
||||
return getJointPosition(getLeftHandJointIndex(), position);
|
||||
return getJointPositionInWorldFrame(getLeftHandJointIndex(), position);
|
||||
}
|
||||
|
||||
bool SkeletonModel::getRightHandPosition(glm::vec3& position) const {
|
||||
return getJointPosition(getRightHandJointIndex(), position);
|
||||
return getJointPositionInWorldFrame(getRightHandJointIndex(), position);
|
||||
}
|
||||
|
||||
bool SkeletonModel::restoreLeftHandPosition(float fraction, float priority) {
|
||||
|
@ -381,7 +408,7 @@ bool SkeletonModel::restoreLeftHandPosition(float fraction, float priority) {
|
|||
}
|
||||
|
||||
bool SkeletonModel::getLeftShoulderPosition(glm::vec3& position) const {
|
||||
return getJointPosition(getLastFreeJointIndex(getLeftHandJointIndex()), position);
|
||||
return getJointPositionInWorldFrame(getLastFreeJointIndex(getLeftHandJointIndex()), position);
|
||||
}
|
||||
|
||||
float SkeletonModel::getLeftArmLength() const {
|
||||
|
@ -393,7 +420,7 @@ bool SkeletonModel::restoreRightHandPosition(float fraction, float priority) {
|
|||
}
|
||||
|
||||
bool SkeletonModel::getRightShoulderPosition(glm::vec3& position) const {
|
||||
return getJointPosition(getLastFreeJointIndex(getRightHandJointIndex()), position);
|
||||
return getJointPositionInWorldFrame(getLastFreeJointIndex(getRightHandJointIndex()), position);
|
||||
}
|
||||
|
||||
float SkeletonModel::getRightArmLength() const {
|
||||
|
@ -401,11 +428,11 @@ float SkeletonModel::getRightArmLength() const {
|
|||
}
|
||||
|
||||
bool SkeletonModel::getHeadPosition(glm::vec3& headPosition) const {
|
||||
return isActive() && getJointPosition(_geometry->getFBXGeometry().headJointIndex, headPosition);
|
||||
return isActive() && getJointPositionInWorldFrame(_geometry->getFBXGeometry().headJointIndex, headPosition);
|
||||
}
|
||||
|
||||
bool SkeletonModel::getNeckPosition(glm::vec3& neckPosition) const {
|
||||
return isActive() && getJointPosition(_geometry->getFBXGeometry().neckJointIndex, neckPosition);
|
||||
return isActive() && getJointPositionInWorldFrame(_geometry->getFBXGeometry().neckJointIndex, neckPosition);
|
||||
}
|
||||
|
||||
bool SkeletonModel::getNeckParentRotation(glm::quat& neckParentRotation) const {
|
||||
|
@ -416,7 +443,7 @@ bool SkeletonModel::getNeckParentRotation(glm::quat& neckParentRotation) const {
|
|||
if (geometry.neckJointIndex == -1) {
|
||||
return false;
|
||||
}
|
||||
return getJointRotation(geometry.joints.at(geometry.neckJointIndex).parentIndex, neckParentRotation);
|
||||
return getJointRotationInWorldFrame(geometry.joints.at(geometry.neckJointIndex).parentIndex, neckParentRotation);
|
||||
}
|
||||
|
||||
bool SkeletonModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& secondEyePosition) const {
|
||||
|
@ -424,18 +451,18 @@ bool SkeletonModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& seco
|
|||
return false;
|
||||
}
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
if (getJointPosition(geometry.leftEyeJointIndex, firstEyePosition) &&
|
||||
getJointPosition(geometry.rightEyeJointIndex, secondEyePosition)) {
|
||||
if (getJointPositionInWorldFrame(geometry.leftEyeJointIndex, firstEyePosition) &&
|
||||
getJointPositionInWorldFrame(geometry.rightEyeJointIndex, secondEyePosition)) {
|
||||
return true;
|
||||
}
|
||||
// no eye joints; try to estimate based on head/neck joints
|
||||
glm::vec3 neckPosition, headPosition;
|
||||
if (getJointPosition(geometry.neckJointIndex, neckPosition) &&
|
||||
getJointPosition(geometry.headJointIndex, headPosition)) {
|
||||
if (getJointPositionInWorldFrame(geometry.neckJointIndex, neckPosition) &&
|
||||
getJointPositionInWorldFrame(geometry.headJointIndex, headPosition)) {
|
||||
const float EYE_PROPORTION = 0.6f;
|
||||
glm::vec3 baseEyePosition = glm::mix(neckPosition, headPosition, EYE_PROPORTION);
|
||||
glm::quat headRotation;
|
||||
getJointRotation(geometry.headJointIndex, headRotation);
|
||||
getJointRotationInWorldFrame(geometry.headJointIndex, headRotation);
|
||||
const float EYES_FORWARD = 0.25f;
|
||||
const float EYE_SEPARATION = 0.1f;
|
||||
float headHeight = glm::distance(neckPosition, headPosition);
|
||||
|
@ -446,3 +473,30 @@ bool SkeletonModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& seco
|
|||
return false;
|
||||
}
|
||||
|
||||
void SkeletonModel::renderRagDoll() {
|
||||
const int BALL_SUBDIVISIONS = 6;
|
||||
glDisable(GL_DEPTH_TEST);
|
||||
glDisable(GL_LIGHTING);
|
||||
glPushMatrix();
|
||||
|
||||
Application::getInstance()->loadTranslatedViewMatrix(_translation);
|
||||
QVector<glm::vec3> points = _ragDoll.getPoints();
|
||||
int numPoints = points.size();
|
||||
float alpha = 0.3f;
|
||||
float radius1 = 0.008f;
|
||||
float radius2 = 0.01f;
|
||||
for (int i = 0; i < numPoints; ++i) {
|
||||
glPushMatrix();
|
||||
// draw each point as a yellow hexagon with black border
|
||||
glm::vec3 position = _rotation * points[i];
|
||||
glTranslatef(position.x, position.y, position.z);
|
||||
glColor4f(0.0f, 0.0f, 0.0f, alpha);
|
||||
glutSolidSphere(radius2, BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
|
||||
glColor4f(1.0f, 1.0f, 0.0f, alpha);
|
||||
glutSolidSphere(radius1, BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
|
||||
glPopMatrix();
|
||||
}
|
||||
glPopMatrix();
|
||||
glEnable(GL_DEPTH_TEST);
|
||||
glEnable(GL_LIGHTING);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define hifi_SkeletonModel_h
|
||||
|
||||
#include "renderer/Model.h"
|
||||
#include "renderer/RagDoll.h"
|
||||
|
||||
class Avatar;
|
||||
|
||||
|
@ -23,8 +24,11 @@ class SkeletonModel : public Model {
|
|||
public:
|
||||
|
||||
SkeletonModel(Avatar* owningAvatar);
|
||||
|
||||
|
||||
void setJointStates(QVector<JointState> states);
|
||||
|
||||
void simulate(float deltaTime, bool fullUpdate = true);
|
||||
void simulateRagDoll(float deltaTime);
|
||||
|
||||
/// \param jointIndex index of hand joint
|
||||
/// \param shapes[out] list in which is stored pointers to hand shapes
|
||||
|
@ -89,8 +93,11 @@ public:
|
|||
/// \return whether or not both eye meshes were found
|
||||
bool getEyePositions(glm::vec3& firstEyePosition, glm::vec3& secondEyePosition) const;
|
||||
|
||||
void renderRagDoll();
|
||||
protected:
|
||||
|
||||
/// \param jointIndex index of joint in model
|
||||
/// \param position position of joint in model-frame
|
||||
void applyHandPosition(int jointIndex, const glm::vec3& position);
|
||||
|
||||
void applyPalmData(int jointIndex, PalmData& palm);
|
||||
|
@ -105,9 +112,14 @@ protected:
|
|||
private:
|
||||
|
||||
void renderJointConstraints(int jointIndex);
|
||||
|
||||
/// \param jointIndex index of joint in model
|
||||
/// \param position position of joint in model-frame
|
||||
/// \param rotation rotation of joint in model-frame
|
||||
void setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation);
|
||||
|
||||
Avatar* _owningAvatar;
|
||||
RagDoll _ragDoll;
|
||||
};
|
||||
|
||||
#endif // hifi_SkeletonModel_h
|
||||
|
|
|
@ -86,7 +86,7 @@ void OculusManager::display(Camera& whichCamera) {
|
|||
// We only need to render the overlays to a texture once, then we just render the texture as a quad
|
||||
// PrioVR will only work if renderOverlay is called, calibration is connected to Application::renderingOverlay()
|
||||
applicationOverlay.renderOverlay(true);
|
||||
const bool displayOverlays = false;
|
||||
const bool displayOverlays = Menu::getInstance()->isOptionChecked(MenuOption::DisplayOculusOverlays);
|
||||
|
||||
Application::getInstance()->getGlowEffect()->prepare();
|
||||
|
||||
|
|
|
@ -76,23 +76,24 @@ static void setPalm(float deltaTime, int index) {
|
|||
}
|
||||
}
|
||||
|
||||
// NOTE: this math is done in the worl-frame with unecessary complexity.
|
||||
// TODO: transfom this to stay in the model-frame.
|
||||
glm::vec3 position;
|
||||
glm::quat rotation;
|
||||
|
||||
SkeletonModel* skeletonModel = &Application::getInstance()->getAvatar()->getSkeletonModel();
|
||||
int jointIndex;
|
||||
glm::quat inverseRotation = glm::inverse(Application::getInstance()->getAvatar()->getOrientation());
|
||||
if (index == LEFT_HAND_INDEX) {
|
||||
jointIndex = skeletonModel->getLeftHandJointIndex();
|
||||
skeletonModel->getJointRotation(jointIndex, rotation, true);
|
||||
skeletonModel->getJointRotationInWorldFrame(jointIndex, rotation);
|
||||
rotation = inverseRotation * rotation * glm::quat(glm::vec3(0.0f, PI_OVER_TWO, 0.0f));
|
||||
|
||||
} else {
|
||||
jointIndex = skeletonModel->getRightHandJointIndex();
|
||||
skeletonModel->getJointRotation(jointIndex, rotation, true);
|
||||
skeletonModel->getJointRotationInWorldFrame(jointIndex, rotation);
|
||||
rotation = inverseRotation * rotation * glm::quat(glm::vec3(0.0f, -PI_OVER_TWO, 0.0f));
|
||||
}
|
||||
skeletonModel->getJointPosition(jointIndex, position);
|
||||
skeletonModel->getJointPositionInWorldFrame(jointIndex, position);
|
||||
position = inverseRotation * (position - skeletonModel->getTranslation());
|
||||
|
||||
palm->setRawRotation(rotation);
|
||||
|
|
101
interface/src/renderer/JointState.cpp
Normal file
101
interface/src/renderer/JointState.cpp
Normal file
|
@ -0,0 +1,101 @@
|
|||
//
|
||||
// JointState.cpp
|
||||
// interface/src/renderer
|
||||
//
|
||||
// Created by Andrzej Kapolka on 10/18/13.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <glm/gtx/norm.hpp>
|
||||
|
||||
//#include <GeometryUtil.h>
|
||||
#include <SharedUtil.h>
|
||||
|
||||
#include "JointState.h"
|
||||
|
||||
JointState::JointState() :
|
||||
_animationPriority(0.0f),
|
||||
_fbxJoint(NULL) {
|
||||
}
|
||||
|
||||
void JointState::setFBXJoint(const FBXJoint* joint) {
|
||||
assert(joint != NULL);
|
||||
_rotationInParentFrame = joint->rotation;
|
||||
// NOTE: JointState does not own the FBXJoint to which it points.
|
||||
_fbxJoint = joint;
|
||||
}
|
||||
|
||||
void JointState::copyState(const JointState& state) {
|
||||
_rotationInParentFrame = state._rotationInParentFrame;
|
||||
_transform = state._transform;
|
||||
_rotation = extractRotation(_transform);
|
||||
_animationPriority = state._animationPriority;
|
||||
// DO NOT copy _fbxJoint
|
||||
}
|
||||
|
||||
void JointState::computeTransform(const glm::mat4& parentTransform) {
|
||||
glm::quat modifiedRotation = _fbxJoint->preRotation * _rotationInParentFrame * _fbxJoint->postRotation;
|
||||
glm::mat4 modifiedTransform = _fbxJoint->preTransform * glm::mat4_cast(modifiedRotation) * _fbxJoint->postTransform;
|
||||
_transform = parentTransform * glm::translate(_fbxJoint->translation) * modifiedTransform;
|
||||
_rotation = extractRotation(_transform);
|
||||
}
|
||||
|
||||
glm::quat JointState::getRotationFromBindToModelFrame() const {
|
||||
return _rotation * _fbxJoint->inverseBindRotation;
|
||||
}
|
||||
|
||||
void JointState::restoreRotation(float fraction, float priority) {
|
||||
assert(_fbxJoint != NULL);
|
||||
if (priority == _animationPriority || _animationPriority == 0.0f) {
|
||||
_rotationInParentFrame = safeMix(_rotationInParentFrame, _fbxJoint->rotation, fraction);
|
||||
_animationPriority = 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
void JointState::setRotationFromBindFrame(const glm::quat& rotation, float priority) {
|
||||
assert(_fbxJoint != NULL);
|
||||
if (priority >= _animationPriority) {
|
||||
// rotation is from bind- to model-frame
|
||||
_rotationInParentFrame = _rotationInParentFrame * glm::inverse(_rotation) * rotation * glm::inverse(_fbxJoint->inverseBindRotation);
|
||||
_animationPriority = priority;
|
||||
}
|
||||
}
|
||||
|
||||
void JointState::clearTransformTranslation() {
|
||||
_transform[3][0] = 0.0f;
|
||||
_transform[3][1] = 0.0f;
|
||||
_transform[3][2] = 0.0f;
|
||||
}
|
||||
|
||||
void JointState::setRotation(const glm::quat& rotation, bool constrain, float priority) {
|
||||
applyRotationDelta(rotation * glm::inverse(_rotation), true, priority);
|
||||
}
|
||||
|
||||
void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) {
|
||||
// NOTE: delta is in jointParent-frame
|
||||
assert(_fbxJoint != NULL);
|
||||
if (priority < _animationPriority) {
|
||||
return;
|
||||
}
|
||||
_animationPriority = priority;
|
||||
if (!constrain || (_fbxJoint->rotationMin == glm::vec3(-PI, -PI, -PI) &&
|
||||
_fbxJoint->rotationMax == glm::vec3(PI, PI, PI))) {
|
||||
// no constraints
|
||||
_rotationInParentFrame = _rotationInParentFrame * glm::inverse(_rotation) * delta * _rotation;
|
||||
_rotation = delta * _rotation;
|
||||
return;
|
||||
}
|
||||
glm::quat targetRotation = delta * _rotation;
|
||||
glm::vec3 eulers = safeEulerAngles(_rotationInParentFrame * glm::inverse(_rotation) * targetRotation);
|
||||
glm::quat newRotation = glm::quat(glm::clamp(eulers, _fbxJoint->rotationMin, _fbxJoint->rotationMax));
|
||||
_rotation = _rotation * glm::inverse(_rotationInParentFrame) * newRotation;
|
||||
_rotationInParentFrame = newRotation;
|
||||
}
|
||||
|
||||
const glm::vec3& JointState::getDefaultTranslationInParentFrame() const {
|
||||
assert(_fbxJoint != NULL);
|
||||
return _fbxJoint->translation;
|
||||
}
|
66
interface/src/renderer/JointState.h
Normal file
66
interface/src/renderer/JointState.h
Normal file
|
@ -0,0 +1,66 @@
|
|||
//
|
||||
// JointState.h
|
||||
// interface/src/renderer
|
||||
//
|
||||
// Created by Andrzej Kapolka on 10/18/13.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_JointState_h
|
||||
#define hifi_JointState_h
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtx/quaternion.hpp>
|
||||
#include <glm/gtx/transform.hpp>
|
||||
|
||||
#include <FBXReader.h>
|
||||
|
||||
class JointState {
|
||||
public:
|
||||
JointState();
|
||||
|
||||
void setFBXJoint(const FBXJoint* joint);
|
||||
const FBXJoint& getFBXJoint() const { return *_fbxJoint; }
|
||||
|
||||
void copyState(const JointState& state);
|
||||
|
||||
void computeTransform(const glm::mat4& parentTransform);
|
||||
const glm::mat4& getTransform() const { return _transform; }
|
||||
|
||||
glm::quat getRotation() const { return _rotation; }
|
||||
glm::vec3 getPosition() const { return extractTranslation(_transform); }
|
||||
|
||||
/// \return rotation from bind to model frame
|
||||
glm::quat getRotationFromBindToModelFrame() const;
|
||||
|
||||
/// \param rotation rotation of joint in model-frame
|
||||
void setRotation(const glm::quat& rotation, bool constrain, float priority);
|
||||
|
||||
/// \param delta is in the jointParent-frame
|
||||
void applyRotationDelta(const glm::quat& delta, bool constrain = true, float priority = 1.0f);
|
||||
|
||||
const glm::vec3& getDefaultTranslationInParentFrame() const;
|
||||
|
||||
void restoreRotation(float fraction, float priority);
|
||||
|
||||
/// \param rotation is from bind- to model-frame
|
||||
/// computes and sets new _rotationInParentFrame
|
||||
/// NOTE: the JointState's model-frame transform/rotation are NOT updated!
|
||||
void setRotationFromBindFrame(const glm::quat& rotation, float priority);
|
||||
|
||||
void clearTransformTranslation();
|
||||
|
||||
glm::quat _rotationInParentFrame; // joint- to parentJoint-frame
|
||||
float _animationPriority; // the priority of the animation affecting this joint
|
||||
|
||||
private:
|
||||
glm::mat4 _transform; // joint- to model-frame
|
||||
glm::quat _rotation; // joint- to model-frame
|
||||
|
||||
const FBXJoint* _fbxJoint; // JointState does NOT own its FBXJoint
|
||||
};
|
||||
|
||||
#endif // hifi_JointState_h
|
|
@ -166,38 +166,21 @@ QVector<JointState> Model::createJointStates(const FBXGeometry& geometry) {
|
|||
jointStates.append(state);
|
||||
}
|
||||
|
||||
// compute transforms
|
||||
// Unfortunately, the joints are not neccessarily in order from parents to children,
|
||||
// so we must iterate over the list multiple times until all are set correctly.
|
||||
QVector<bool> jointIsSet;
|
||||
// compute model transforms
|
||||
int numJoints = jointStates.size();
|
||||
jointIsSet.fill(false, numJoints);
|
||||
int numJointsSet = 0;
|
||||
int lastNumJointsSet = -1;
|
||||
while (numJointsSet < numJoints && numJointsSet != lastNumJointsSet) {
|
||||
lastNumJointsSet = numJointsSet;
|
||||
for (int i = 0; i < numJoints; ++i) {
|
||||
if (jointIsSet[i]) {
|
||||
continue;
|
||||
}
|
||||
JointState& state = jointStates[i];
|
||||
const FBXJoint& joint = state.getFBXJoint();
|
||||
int parentIndex = joint.parentIndex;
|
||||
if (parentIndex == -1) {
|
||||
_rootIndex = i;
|
||||
glm::mat4 baseTransform = glm::mat4_cast(_rotation) * glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
|
||||
state.computeTransforms(baseTransform, _rotation);
|
||||
++numJointsSet;
|
||||
jointIsSet[i] = true;
|
||||
} else if (jointIsSet[parentIndex]) {
|
||||
const JointState& parentState = jointStates.at(parentIndex);
|
||||
state.computeTransforms(parentState._transform, parentState._combinedRotation);
|
||||
++numJointsSet;
|
||||
jointIsSet[i] = true;
|
||||
}
|
||||
for (int i = 0; i < numJoints; ++i) {
|
||||
JointState& state = jointStates[i];
|
||||
const FBXJoint& joint = state.getFBXJoint();
|
||||
int parentIndex = joint.parentIndex;
|
||||
if (parentIndex == -1) {
|
||||
_rootIndex = i;
|
||||
glm::mat4 parentTransform = glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
|
||||
state.computeTransform(parentTransform);
|
||||
} else {
|
||||
const JointState& parentState = jointStates.at(parentIndex);
|
||||
state.computeTransform(parentState.getTransform());
|
||||
}
|
||||
}
|
||||
|
||||
return jointStates;
|
||||
}
|
||||
|
||||
|
@ -476,7 +459,7 @@ void Model::reset() {
|
|||
}
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
for (int i = 0; i < _jointStates.size(); i++) {
|
||||
_jointStates[i]._rotation = geometry.joints.at(i).rotation;
|
||||
_jointStates[i]._rotationInParentFrame = geometry.joints.at(i).rotation;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -527,12 +510,12 @@ bool Model::updateGeometry() {
|
|||
deleteGeometry();
|
||||
_dilatedTextures.clear();
|
||||
_geometry = geometry;
|
||||
_jointStates = newJointStates;
|
||||
setJointStates(newJointStates);
|
||||
needToRebuild = true;
|
||||
} else if (_jointStates.isEmpty()) {
|
||||
const FBXGeometry& fbxGeometry = geometry->getFBXGeometry();
|
||||
if (fbxGeometry.joints.size() > 0) {
|
||||
_jointStates = createJointStates(fbxGeometry);
|
||||
setJointStates(createJointStates(fbxGeometry));
|
||||
needToRebuild = true;
|
||||
}
|
||||
} else if (!geometry->isLoaded()) {
|
||||
|
@ -574,6 +557,11 @@ bool Model::updateGeometry() {
|
|||
return needFullUpdate;
|
||||
}
|
||||
|
||||
// virtual
|
||||
void Model::setJointStates(QVector<JointState> states) {
|
||||
_jointStates = states;
|
||||
}
|
||||
|
||||
bool Model::render(float alpha, RenderMode mode, bool receiveShadows) {
|
||||
// render the attachments
|
||||
foreach (Model* attachment, _attachments) {
|
||||
|
@ -686,7 +674,7 @@ bool Model::getJointState(int index, glm::quat& rotation) const {
|
|||
if (index == -1 || index >= _jointStates.size()) {
|
||||
return false;
|
||||
}
|
||||
rotation = _jointStates.at(index)._rotation;
|
||||
rotation = _jointStates.at(index)._rotationInParentFrame;
|
||||
const glm::quat& defaultRotation = _geometry->getFBXGeometry().joints.at(index).rotation;
|
||||
return glm::abs(rotation.x - defaultRotation.x) >= EPSILON ||
|
||||
glm::abs(rotation.y - defaultRotation.y) >= EPSILON ||
|
||||
|
@ -699,7 +687,7 @@ void Model::setJointState(int index, bool valid, const glm::quat& rotation, floa
|
|||
JointState& state = _jointStates[index];
|
||||
if (priority >= state._animationPriority) {
|
||||
if (valid) {
|
||||
state._rotation = rotation;
|
||||
state._rotationInParentFrame = rotation;
|
||||
state._animationPriority = priority;
|
||||
} else {
|
||||
state.restoreRotation(1.0f, priority);
|
||||
|
@ -731,19 +719,29 @@ void Model::setURL(const QUrl& url, const QUrl& fallback, bool retainCurrent, bo
|
|||
}
|
||||
}
|
||||
|
||||
bool Model::getJointPositionInWorldFrame(int jointIndex, glm::vec3& position) const {
|
||||
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
|
||||
return false;
|
||||
}
|
||||
// position is in world-frame
|
||||
position = _translation + _rotation * _jointStates[jointIndex].getPosition();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Model::getJointPosition(int jointIndex, glm::vec3& position) const {
|
||||
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
|
||||
return false;
|
||||
}
|
||||
position = _translation + extractTranslation(_jointStates[jointIndex]._transform);
|
||||
// position is in model-frame
|
||||
position = extractTranslation(_jointStates[jointIndex].getTransform());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Model::getJointRotation(int jointIndex, glm::quat& rotation, bool fromBind) const {
|
||||
bool Model::getJointRotationInWorldFrame(int jointIndex, glm::quat& rotation) const {
|
||||
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
|
||||
return false;
|
||||
}
|
||||
rotation = _jointStates[jointIndex].getJointRotation(fromBind);
|
||||
rotation = _rotation * _jointStates[jointIndex].getRotation();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -751,7 +749,7 @@ bool Model::getJointCombinedRotation(int jointIndex, glm::quat& rotation) const
|
|||
if (jointIndex == -1 || jointIndex >= _jointStates.size()) {
|
||||
return false;
|
||||
}
|
||||
rotation = _jointStates[jointIndex]._combinedRotation;
|
||||
rotation = _rotation * _jointStates[jointIndex].getRotation();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -963,15 +961,16 @@ void Model::updateShapePositions() {
|
|||
glm::vec3 rootPosition(0.0f);
|
||||
_boundingRadius = 0.0f;
|
||||
float uniformScale = extractUniformScale(_scale);
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
for (int i = 0; i < _jointStates.size(); i++) {
|
||||
const FBXJoint& joint = geometry.joints[i];
|
||||
const JointState& state = _jointStates[i];
|
||||
const FBXJoint& joint = state.getFBXJoint();
|
||||
// shape position and rotation need to be in world-frame
|
||||
glm::vec3 jointToShapeOffset = uniformScale * (_jointStates[i]._combinedRotation * joint.shapePosition);
|
||||
glm::vec3 worldPosition = extractTranslation(_jointStates[i]._transform) + jointToShapeOffset + _translation;
|
||||
glm::quat stateRotation = state.getRotation();
|
||||
glm::vec3 shapeOffset = uniformScale * (stateRotation * joint.shapePosition);
|
||||
glm::vec3 worldPosition = _translation + _rotation * (state.getPosition() + shapeOffset);
|
||||
Shape* shape = _jointShapes[i];
|
||||
shape->setPosition(worldPosition);
|
||||
shape->setRotation(_jointStates[i]._combinedRotation * joint.shapeRotation);
|
||||
shape->setRotation(_rotation * stateRotation * joint.shapeRotation);
|
||||
float distance = glm::distance(worldPosition, _translation) + shape->getBoundingRadius();
|
||||
if (distance > _boundingRadius) {
|
||||
_boundingRadius = distance;
|
||||
|
@ -993,12 +992,12 @@ bool Model::findRayIntersection(const glm::vec3& origin, const glm::vec3& direct
|
|||
float radiusScale = extractUniformScale(_scale);
|
||||
for (int i = 0; i < _jointStates.size(); i++) {
|
||||
const FBXJoint& joint = geometry.joints[i];
|
||||
glm::vec3 end = extractTranslation(_jointStates[i]._transform);
|
||||
glm::vec3 end = _translation + _rotation * _jointStates[i].getPosition();
|
||||
float endRadius = joint.boneRadius * radiusScale;
|
||||
glm::vec3 start = end;
|
||||
float startRadius = joint.boneRadius * radiusScale;
|
||||
if (joint.parentIndex != -1) {
|
||||
start = extractTranslation(_jointStates[joint.parentIndex]._transform);
|
||||
start = _translation + _rotation * _jointStates[joint.parentIndex].getPosition();
|
||||
startRadius = geometry.joints[joint.parentIndex].boneRadius * radiusScale;
|
||||
}
|
||||
// for now, use average of start and end radii
|
||||
|
@ -1208,8 +1207,8 @@ void Model::simulateInternal(float deltaTime) {
|
|||
|
||||
glm::vec3 jointTranslation = _translation;
|
||||
glm::quat jointRotation = _rotation;
|
||||
getJointPosition(attachment.jointIndex, jointTranslation);
|
||||
getJointRotation(attachment.jointIndex, jointRotation);
|
||||
getJointPositionInWorldFrame(attachment.jointIndex, jointTranslation);
|
||||
getJointRotationInWorldFrame(attachment.jointIndex, jointRotation);
|
||||
|
||||
model->setTranslation(jointTranslation + jointRotation * attachment.translation * _scale);
|
||||
model->setRotation(jointRotation * attachment.rotation);
|
||||
|
@ -1220,12 +1219,13 @@ void Model::simulateInternal(float deltaTime) {
|
|||
}
|
||||
}
|
||||
|
||||
glm::mat4 modelToWorld = glm::mat4_cast(_rotation);
|
||||
for (int i = 0; i < _meshStates.size(); i++) {
|
||||
MeshState& state = _meshStates[i];
|
||||
const FBXMesh& mesh = geometry.meshes.at(i);
|
||||
for (int j = 0; j < mesh.clusters.size(); j++) {
|
||||
const FBXCluster& cluster = mesh.clusters.at(j);
|
||||
state.clusterMatrices[j] = _jointStates[cluster.jointIndex]._transform * cluster.inverseBindMatrix;
|
||||
state.clusterMatrices[j] = modelToWorld * _jointStates[cluster.jointIndex].getTransform() * cluster.inverseBindMatrix;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1239,22 +1239,23 @@ void Model::updateJointState(int index) {
|
|||
JointState& state = _jointStates[index];
|
||||
const FBXJoint& joint = state.getFBXJoint();
|
||||
|
||||
if (joint.parentIndex == -1) {
|
||||
// compute model transforms
|
||||
int parentIndex = joint.parentIndex;
|
||||
if (parentIndex == -1) {
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
glm::mat4 baseTransform = glm::mat4_cast(_rotation) * glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
|
||||
state.computeTransforms(baseTransform, _rotation);
|
||||
glm::mat4 parentTransform = glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
|
||||
state.computeTransform(parentTransform);
|
||||
} else {
|
||||
const JointState& parentState = _jointStates.at(joint.parentIndex);
|
||||
state.computeTransforms(parentState._transform, parentState._combinedRotation);
|
||||
const JointState& parentState = _jointStates.at(parentIndex);
|
||||
state.computeTransform(parentState.getTransform());
|
||||
}
|
||||
}
|
||||
|
||||
bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const glm::quat& rotation, bool useRotation,
|
||||
bool Model::setJointPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation, bool useRotation,
|
||||
int lastFreeIndex, bool allIntermediatesFree, const glm::vec3& alignment, float priority) {
|
||||
if (jointIndex == -1 || _jointStates.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
glm::vec3 relativePosition = translation - _translation;
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
const QVector<int>& freeLineage = geometry.joints.at(jointIndex).freeLineage;
|
||||
if (freeLineage.isEmpty()) {
|
||||
|
@ -1267,21 +1268,19 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const
|
|||
// this is a cyclic coordinate descent algorithm: see
|
||||
// http://www.ryanjuckett.com/programming/animation/21-cyclic-coordinate-descent-in-2d
|
||||
const int ITERATION_COUNT = 1;
|
||||
glm::vec3 worldAlignment = _rotation * alignment;
|
||||
glm::vec3 worldAlignment = alignment;
|
||||
for (int i = 0; i < ITERATION_COUNT; i++) {
|
||||
// first, try to rotate the end effector as close as possible to the target rotation, if any
|
||||
glm::quat endRotation;
|
||||
if (useRotation) {
|
||||
JointState& state = _jointStates[jointIndex];
|
||||
|
||||
// TODO: figure out what this is trying to do and combine it into one JointState method
|
||||
endRotation = state.getJointRotation(true);
|
||||
state.applyRotationDelta(rotation * glm::inverse(endRotation), true, priority);
|
||||
endRotation = state.getJointRotation(true);
|
||||
state.setRotation(rotation, true, priority);
|
||||
endRotation = state.getRotation();
|
||||
}
|
||||
|
||||
// then, we go from the joint upwards, rotating the end as close as possible to the target
|
||||
glm::vec3 endPosition = extractTranslation(_jointStates[jointIndex]._transform);
|
||||
glm::vec3 endPosition = extractTranslation(_jointStates[jointIndex].getTransform());
|
||||
for (int j = 1; freeLineage.at(j - 1) != lastFreeIndex; j++) {
|
||||
int index = freeLineage.at(j);
|
||||
JointState& state = _jointStates[index];
|
||||
|
@ -1289,18 +1288,18 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const
|
|||
if (!(joint.isFree || allIntermediatesFree)) {
|
||||
continue;
|
||||
}
|
||||
glm::vec3 jointPosition = extractTranslation(state._transform);
|
||||
glm::vec3 jointPosition = extractTranslation(state.getTransform());
|
||||
glm::vec3 jointVector = endPosition - jointPosition;
|
||||
glm::quat oldCombinedRotation = state._combinedRotation;
|
||||
glm::quat oldCombinedRotation = state.getRotation();
|
||||
glm::quat combinedDelta;
|
||||
float combinedWeight;
|
||||
if (useRotation) {
|
||||
combinedDelta = safeMix(rotation * glm::inverse(endRotation),
|
||||
rotationBetween(jointVector, relativePosition - jointPosition), 0.5f);
|
||||
rotationBetween(jointVector, position - jointPosition), 0.5f);
|
||||
combinedWeight = 2.0f;
|
||||
|
||||
} else {
|
||||
combinedDelta = rotationBetween(jointVector, relativePosition - jointPosition);
|
||||
combinedDelta = rotationBetween(jointVector, position - jointPosition);
|
||||
combinedWeight = 1.0f;
|
||||
}
|
||||
if (alignment != glm::vec3() && j > 1) {
|
||||
|
@ -1309,7 +1308,7 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const
|
|||
for (int k = j - 1; k > 0; k--) {
|
||||
int index = freeLineage.at(k);
|
||||
updateJointState(index);
|
||||
positionSum += extractTranslation(_jointStates.at(index)._transform);
|
||||
positionSum += extractTranslation(_jointStates.at(index).getTransform());
|
||||
}
|
||||
glm::vec3 projectedCenterOfMass = glm::cross(jointVector,
|
||||
glm::cross(positionSum / (j - 1.0f) - jointPosition, jointVector));
|
||||
|
@ -1321,7 +1320,7 @@ bool Model::setJointPosition(int jointIndex, const glm::vec3& translation, const
|
|||
}
|
||||
}
|
||||
state.applyRotationDelta(combinedDelta, true, priority);
|
||||
glm::quat actualDelta = state._combinedRotation * glm::inverse(oldCombinedRotation);
|
||||
glm::quat actualDelta = state.getRotation() * glm::inverse(oldCombinedRotation);
|
||||
endPosition = actualDelta * jointVector + jointPosition;
|
||||
if (useRotation) {
|
||||
endRotation = actualDelta * endRotation;
|
||||
|
@ -1344,7 +1343,7 @@ bool Model::restoreJointPosition(int jointIndex, float fraction, float priority)
|
|||
}
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
const QVector<int>& freeLineage = geometry.joints.at(jointIndex).freeLineage;
|
||||
|
||||
|
||||
foreach (int index, freeLineage) {
|
||||
JointState& state = _jointStates[index];
|
||||
state.restoreRotation(fraction, priority);
|
||||
|
@ -1470,12 +1469,12 @@ void Model::applyCollision(CollisionInfo& collision) {
|
|||
|
||||
glm::vec3 jointPosition(0.0f);
|
||||
int jointIndex = collision._intData;
|
||||
if (getJointPosition(jointIndex, jointPosition)) {
|
||||
if (getJointPositionInWorldFrame(jointIndex, jointPosition)) {
|
||||
const FBXJoint& joint = _geometry->getFBXGeometry().joints[jointIndex];
|
||||
if (joint.parentIndex != -1) {
|
||||
// compute the approximate distance (travel) that the joint needs to move
|
||||
glm::vec3 start;
|
||||
getJointPosition(joint.parentIndex, start);
|
||||
getJointPositionInWorldFrame(joint.parentIndex, start);
|
||||
glm::vec3 contactPoint = collision._contactPoint - start;
|
||||
glm::vec3 penetrationEnd = contactPoint + collision._penetration;
|
||||
glm::vec3 axis = glm::cross(contactPoint, penetrationEnd);
|
||||
|
@ -1486,8 +1485,9 @@ void Model::applyCollision(CollisionInfo& collision) {
|
|||
float angle = asinf(travel / (glm::length(contactPoint) * glm::length(penetrationEnd)));
|
||||
axis = glm::normalize(axis);
|
||||
glm::vec3 end;
|
||||
getJointPosition(jointIndex, end);
|
||||
glm::vec3 newEnd = start + glm::angleAxis(angle, axis) * (end - start);
|
||||
getJointPositionInWorldFrame(jointIndex, end);
|
||||
// transform into model-frame
|
||||
glm::vec3 newEnd = glm::inverse(_rotation) * (start + glm::angleAxis(angle, axis) * (end - start) - _translation);
|
||||
// try to move it
|
||||
setJointPosition(jointIndex, newEnd, glm::quat(), false, -1, true);
|
||||
}
|
||||
|
@ -1895,8 +1895,8 @@ AnimationHandle::AnimationHandle(Model* model) :
|
|||
_loop(false),
|
||||
_hold(false),
|
||||
_startAutomatically(false),
|
||||
_firstFrame(0),
|
||||
_lastFrame(INT_MAX),
|
||||
_firstFrame(0.0f),
|
||||
_lastFrame(FLT_MAX),
|
||||
_running(false) {
|
||||
}
|
||||
|
||||
|
@ -1927,41 +1927,40 @@ void AnimationHandle::simulate(float deltaTime) {
|
|||
stop();
|
||||
return;
|
||||
}
|
||||
int lastFrameIndex = qMin(_lastFrame, animationGeometry.animationFrames.size() - 1);
|
||||
int firstFrameIndex = qMin(_firstFrame, lastFrameIndex);
|
||||
if ((!_loop && _frameIndex >= lastFrameIndex) || firstFrameIndex == lastFrameIndex) {
|
||||
float endFrameIndex = qMin(_lastFrame, animationGeometry.animationFrames.size() - (_loop ? 0.0f : 1.0f));
|
||||
float startFrameIndex = qMin(_firstFrame, endFrameIndex);
|
||||
if ((!_loop && (_frameIndex < startFrameIndex || _frameIndex > endFrameIndex)) || startFrameIndex == endFrameIndex) {
|
||||
// passed the end; apply the last frame
|
||||
const FBXAnimationFrame& frame = animationGeometry.animationFrames.at(lastFrameIndex);
|
||||
for (int i = 0; i < _jointMappings.size(); i++) {
|
||||
int mapping = _jointMappings.at(i);
|
||||
if (mapping != -1) {
|
||||
JointState& state = _model->_jointStates[mapping];
|
||||
if (_priority >= state._animationPriority) {
|
||||
state._rotation = frame.rotations.at(i);
|
||||
state._animationPriority = _priority;
|
||||
}
|
||||
}
|
||||
}
|
||||
applyFrame(glm::clamp(_frameIndex, startFrameIndex, endFrameIndex));
|
||||
if (!_hold) {
|
||||
stop();
|
||||
}
|
||||
return;
|
||||
}
|
||||
int frameCount = lastFrameIndex - firstFrameIndex + 1;
|
||||
_frameIndex = firstFrameIndex + glm::mod(qMax(_frameIndex - firstFrameIndex, 0.0f), (float)frameCount);
|
||||
// wrap within the the desired range
|
||||
if (_frameIndex < startFrameIndex) {
|
||||
_frameIndex = endFrameIndex - glm::mod(endFrameIndex - _frameIndex, endFrameIndex - startFrameIndex);
|
||||
|
||||
} else if (_frameIndex > endFrameIndex) {
|
||||
_frameIndex = startFrameIndex + glm::mod(_frameIndex - startFrameIndex, endFrameIndex - startFrameIndex);
|
||||
}
|
||||
|
||||
// blend between the closest two frames
|
||||
const FBXAnimationFrame& ceilFrame = animationGeometry.animationFrames.at(
|
||||
firstFrameIndex + ((int)glm::ceil(_frameIndex) - firstFrameIndex) % frameCount);
|
||||
const FBXAnimationFrame& floorFrame = animationGeometry.animationFrames.at(
|
||||
firstFrameIndex + ((int)glm::floor(_frameIndex) - firstFrameIndex) % frameCount);
|
||||
float frameFraction = glm::fract(_frameIndex);
|
||||
applyFrame(_frameIndex);
|
||||
}
|
||||
|
||||
void AnimationHandle::applyFrame(float frameIndex) {
|
||||
const FBXGeometry& animationGeometry = _animation->getGeometry();
|
||||
int frameCount = animationGeometry.animationFrames.size();
|
||||
const FBXAnimationFrame& floorFrame = animationGeometry.animationFrames.at((int)glm::floor(frameIndex) % frameCount);
|
||||
const FBXAnimationFrame& ceilFrame = animationGeometry.animationFrames.at((int)glm::ceil(frameIndex) % frameCount);
|
||||
float frameFraction = glm::fract(frameIndex);
|
||||
for (int i = 0; i < _jointMappings.size(); i++) {
|
||||
int mapping = _jointMappings.at(i);
|
||||
if (mapping != -1) {
|
||||
JointState& state = _model->_jointStates[mapping];
|
||||
if (_priority >= state._animationPriority) {
|
||||
state._rotation = safeMix(floorFrame.rotations.at(i), ceilFrame.rotations.at(i), frameFraction);
|
||||
state._rotationInParentFrame = safeMix(floorFrame.rotations.at(i), ceilFrame.rotations.at(i), frameFraction);
|
||||
state._animationPriority = _priority;
|
||||
}
|
||||
}
|
||||
|
@ -1980,79 +1979,3 @@ void AnimationHandle::replaceMatchingPriorities(float newPriority) {
|
|||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// JointState TODO: move this class to its own files
|
||||
// ----------------------------------------------------------------------------
|
||||
JointState::JointState() :
|
||||
_animationPriority(0.0f),
|
||||
_fbxJoint(NULL) {
|
||||
}
|
||||
|
||||
void JointState::setFBXJoint(const FBXJoint* joint) {
|
||||
assert(joint != NULL);
|
||||
_rotation = joint->rotation;
|
||||
// NOTE: JointState does not own the FBXJoint to which it points.
|
||||
_fbxJoint = joint;
|
||||
}
|
||||
|
||||
void JointState::copyState(const JointState& state) {
|
||||
_rotation = state._rotation;
|
||||
_transform = state._transform;
|
||||
_combinedRotation = state._combinedRotation;
|
||||
_animationPriority = state._animationPriority;
|
||||
// DO NOT copy _fbxJoint
|
||||
}
|
||||
|
||||
void JointState::computeTransforms(const glm::mat4& baseTransform, const glm::quat& baseRotation) {
|
||||
assert(_fbxJoint != NULL);
|
||||
glm::quat combinedRotation = _fbxJoint->preRotation * _rotation * _fbxJoint->postRotation;
|
||||
_transform = baseTransform * glm::translate(_fbxJoint->translation) * _fbxJoint->preTransform
|
||||
* glm::mat4_cast(combinedRotation) * _fbxJoint->postTransform;
|
||||
_combinedRotation = baseRotation * combinedRotation;
|
||||
}
|
||||
|
||||
glm::quat JointState::getJointRotation(bool fromBind) const {
|
||||
assert(_fbxJoint != NULL);
|
||||
return _combinedRotation * (fromBind ? _fbxJoint->inverseBindRotation : _fbxJoint->inverseDefaultRotation);
|
||||
}
|
||||
|
||||
void JointState::restoreRotation(float fraction, float priority) {
|
||||
assert(_fbxJoint != NULL);
|
||||
if (priority == _animationPriority) {
|
||||
_rotation = safeMix(_rotation, _fbxJoint->rotation, fraction);
|
||||
_animationPriority = 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
void JointState::setRotation(const glm::quat& rotation, float priority) {
|
||||
assert(_fbxJoint != NULL);
|
||||
if (priority >= _animationPriority) {
|
||||
_rotation = _rotation * glm::inverse(_combinedRotation) * rotation * glm::inverse(_fbxJoint->inverseBindRotation);
|
||||
_animationPriority = priority;
|
||||
}
|
||||
}
|
||||
|
||||
void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) {
|
||||
assert(_fbxJoint != NULL);
|
||||
if (priority < _animationPriority) {
|
||||
return;
|
||||
}
|
||||
_animationPriority = priority;
|
||||
if (!constrain || (_fbxJoint->rotationMin == glm::vec3(-PI, -PI, -PI) &&
|
||||
_fbxJoint->rotationMax == glm::vec3(PI, PI, PI))) {
|
||||
// no constraints
|
||||
_rotation = _rotation * glm::inverse(_combinedRotation) * delta * _combinedRotation;
|
||||
_combinedRotation = delta * _combinedRotation;
|
||||
return;
|
||||
}
|
||||
glm::quat targetRotation = delta * _combinedRotation;
|
||||
glm::vec3 eulers = safeEulerAngles(_rotation * glm::inverse(_combinedRotation) * targetRotation);
|
||||
glm::quat newRotation = glm::quat(glm::clamp(eulers, _fbxJoint->rotationMin, _fbxJoint->rotationMax));
|
||||
_combinedRotation = _combinedRotation * glm::inverse(_rotation) * newRotation;
|
||||
_rotation = newRotation;
|
||||
}
|
||||
|
||||
const glm::vec3& JointState::getDefaultTranslationInParentFrame() const {
|
||||
assert(_fbxJoint != NULL);
|
||||
return _fbxJoint->translation;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include "GeometryCache.h"
|
||||
#include "InterfaceConfig.h"
|
||||
#include "JointState.h"
|
||||
#include "ProgramObject.h"
|
||||
#include "TextureCache.h"
|
||||
|
||||
|
@ -30,40 +31,6 @@ class Shape;
|
|||
|
||||
typedef QSharedPointer<AnimationHandle> AnimationHandlePointer;
|
||||
typedef QWeakPointer<AnimationHandle> WeakAnimationHandlePointer;
|
||||
|
||||
class JointState {
|
||||
public:
|
||||
JointState();
|
||||
|
||||
void setFBXJoint(const FBXJoint* joint);
|
||||
const FBXJoint& getFBXJoint() const { return *_fbxJoint; }
|
||||
|
||||
void copyState(const JointState& state);
|
||||
|
||||
/// computes new _transform and _combinedRotation
|
||||
void computeTransforms(const glm::mat4& baseTransform, const glm::quat& baseRotation);
|
||||
|
||||
/// \return rotation from the joint's default (or bind) frame to world frame
|
||||
glm::quat getJointRotation(bool fromBind = false) const;
|
||||
|
||||
void applyRotationDelta(const glm::quat& delta, bool constrain = true, float priority = 1.0f);
|
||||
|
||||
const glm::vec3& getDefaultTranslationInParentFrame() const;
|
||||
|
||||
void restoreRotation(float fraction, float priority);
|
||||
|
||||
/// \param rotation is from bind- to world-frame
|
||||
/// computes parent relative _rotation and sets that
|
||||
void setRotation(const glm::quat& rotation, float priority);
|
||||
|
||||
glm::quat _rotation; // rotation relative to parent
|
||||
glm::mat4 _transform; // rotation to world frame + translation in model frame
|
||||
glm::quat _combinedRotation; // rotation from joint local to world frame
|
||||
float _animationPriority; // the priority of the animation affecting this joint
|
||||
|
||||
private:
|
||||
const FBXJoint* _fbxJoint; // JointState does NOT own its FBXJoint
|
||||
};
|
||||
|
||||
/// A generic 3D model displaying geometry loaded from a URL.
|
||||
class Model : public QObject {
|
||||
|
@ -155,10 +122,15 @@ public:
|
|||
/// Returns the index of the last free ancestor of the indexed joint, or -1 if not found.
|
||||
int getLastFreeJointIndex(int jointIndex) const;
|
||||
|
||||
bool getJointPosition(int jointIndex, glm::vec3& position) const;
|
||||
bool getJointRotation(int jointIndex, glm::quat& rotation, bool fromBind = false) const;
|
||||
bool getJointPositionInWorldFrame(int jointIndex, glm::vec3& position) const;
|
||||
bool getJointRotationInWorldFrame(int jointIndex, glm::quat& rotation) const;
|
||||
bool getJointCombinedRotation(int jointIndex, glm::quat& rotation) const;
|
||||
|
||||
/// \param jointIndex index of joint in model structure
|
||||
/// \param position[out] position of joint in model-frame
|
||||
/// \return true if joint exists
|
||||
bool getJointPosition(int jointIndex, glm::vec3& position) const;
|
||||
|
||||
QStringList getJointNames() const;
|
||||
|
||||
AnimationHandlePointer createAnimationHandle();
|
||||
|
@ -234,6 +206,8 @@ protected:
|
|||
|
||||
// returns 'true' if needs fullUpdate after geometry change
|
||||
bool updateGeometry();
|
||||
|
||||
virtual void setJointStates(QVector<JointState> states);
|
||||
|
||||
void setScaleInternal(const glm::vec3& scale);
|
||||
void scaleToFit();
|
||||
|
@ -244,7 +218,15 @@ protected:
|
|||
/// Updates the state of the joint at the specified index.
|
||||
virtual void updateJointState(int index);
|
||||
|
||||
bool setJointPosition(int jointIndex, const glm::vec3& translation, const glm::quat& rotation = glm::quat(),
|
||||
/// \param jointIndex index of joint in model structure
|
||||
/// \param position position of joint in model-frame
|
||||
/// \param rotation rotation of joint in model-frame
|
||||
/// \param useRotation false if rotation should be ignored
|
||||
/// \param lastFreeIndex
|
||||
/// \param allIntermediatesFree
|
||||
/// \param alignment
|
||||
/// \return true if joint exists
|
||||
bool setJointPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation = glm::quat(),
|
||||
bool useRotation = false, int lastFreeIndex = -1, bool allIntermediatesFree = false,
|
||||
const glm::vec3& alignment = glm::vec3(0.0f, -1.0f, 0.0f), float priority = 1.0f);
|
||||
|
||||
|
@ -395,11 +377,11 @@ public:
|
|||
void setStartAutomatically(bool startAutomatically);
|
||||
bool getStartAutomatically() const { return _startAutomatically; }
|
||||
|
||||
void setFirstFrame(int firstFrame) { _firstFrame = firstFrame; }
|
||||
int getFirstFrame() const { return _firstFrame; }
|
||||
void setFirstFrame(float firstFrame) { _firstFrame = firstFrame; }
|
||||
float getFirstFrame() const { return _firstFrame; }
|
||||
|
||||
void setLastFrame(int lastFrame) { _lastFrame = lastFrame; }
|
||||
int getLastFrame() const { return _lastFrame; }
|
||||
void setLastFrame(float lastFrame) { _lastFrame = lastFrame; }
|
||||
float getLastFrame() const { return _lastFrame; }
|
||||
|
||||
void setMaskedJoints(const QStringList& maskedJoints);
|
||||
const QStringList& getMaskedJoints() const { return _maskedJoints; }
|
||||
|
@ -423,6 +405,7 @@ private:
|
|||
AnimationHandle(Model* model);
|
||||
|
||||
void simulate(float deltaTime);
|
||||
void applyFrame(float frameIndex);
|
||||
void replaceMatchingPriorities(float newPriority);
|
||||
|
||||
Model* _model;
|
||||
|
@ -435,8 +418,8 @@ private:
|
|||
bool _loop;
|
||||
bool _hold;
|
||||
bool _startAutomatically;
|
||||
int _firstFrame;
|
||||
int _lastFrame;
|
||||
float _firstFrame;
|
||||
float _lastFrame;
|
||||
QStringList _maskedJoints;
|
||||
bool _running;
|
||||
QVector<int> _jointMappings;
|
||||
|
|
131
interface/src/renderer/RagDoll.cpp
Normal file
131
interface/src/renderer/RagDoll.cpp
Normal file
|
@ -0,0 +1,131 @@
|
|||
//
|
||||
// RagDoll.cpp
|
||||
// interface/src/avatar
|
||||
//
|
||||
// Created by Andrew Meadows 2014.05.30
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtx/quaternion.hpp>
|
||||
#include <glm/gtx/transform.hpp>
|
||||
|
||||
#include <CollisionInfo.h>
|
||||
#include <SharedUtil.h>
|
||||
|
||||
#include "RagDoll.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// FixedConstraint
|
||||
// ----------------------------------------------------------------------------
|
||||
FixedConstraint::FixedConstraint() : _point(NULL), _anchor(0.0f, 0.0f, 0.0f) {
|
||||
}
|
||||
|
||||
float FixedConstraint::enforce() {
|
||||
assert(_point != NULL);
|
||||
float distance = glm::distance(_anchor, *_point);
|
||||
*_point = _anchor;
|
||||
return distance;
|
||||
}
|
||||
|
||||
void FixedConstraint::setPoint(glm::vec3* point) {
|
||||
_point = point;
|
||||
}
|
||||
|
||||
void FixedConstraint::setAnchor(const glm::vec3& anchor) {
|
||||
_anchor = anchor;
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DistanceConstraint
|
||||
// ----------------------------------------------------------------------------
|
||||
DistanceConstraint::DistanceConstraint(glm::vec3* pointA, glm::vec3* pointB) : _distance(-1.0f) {
|
||||
_points[0] = pointA;
|
||||
_points[1] = pointB;
|
||||
_distance = glm::distance(*(_points[0]), *(_points[1]));
|
||||
}
|
||||
|
||||
DistanceConstraint::DistanceConstraint(const DistanceConstraint& other) {
|
||||
_distance = other._distance;
|
||||
_points[0] = other._points[0];
|
||||
_points[1] = other._points[1];
|
||||
}
|
||||
|
||||
void DistanceConstraint::setDistance(float distance) {
|
||||
_distance = fabsf(distance);
|
||||
}
|
||||
|
||||
float DistanceConstraint::enforce() {
|
||||
float newDistance = glm::distance(*(_points[0]), *(_points[1]));
|
||||
glm::vec3 direction(0.0f, 1.0f, 0.0f);
|
||||
if (newDistance > EPSILON) {
|
||||
direction = (*(_points[0]) - *(_points[1])) / newDistance;
|
||||
}
|
||||
glm::vec3 center = 0.5f * (*(_points[0]) + *(_points[1]));
|
||||
*(_points[0]) = center + (0.5f * _distance) * direction;
|
||||
*(_points[1]) = center - (0.5f * _distance) * direction;
|
||||
return glm::abs(newDistance - _distance);
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// RagDoll
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
RagDoll::RagDoll() {
|
||||
}
|
||||
|
||||
RagDoll::~RagDoll() {
|
||||
clear();
|
||||
}
|
||||
|
||||
void RagDoll::init(const QVector<JointState>& states) {
|
||||
clear();
|
||||
const int numStates = states.size();
|
||||
_points.reserve(numStates);
|
||||
for (int i = 0; i < numStates; ++i) {
|
||||
const JointState& state = states[i];
|
||||
_points.push_back(state.getPosition());
|
||||
int parentIndex = state.getFBXJoint().parentIndex;
|
||||
assert(parentIndex < i);
|
||||
if (parentIndex != -1) {
|
||||
DistanceConstraint* stick = new DistanceConstraint(&(_points[i]), &(_points[parentIndex]));
|
||||
_constraints.push_back(stick);
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Delete all data.
|
||||
void RagDoll::clear() {
|
||||
int numConstraints = _constraints.size();
|
||||
for (int i = 0; i < numConstraints; ++i) {
|
||||
delete _constraints[i];
|
||||
}
|
||||
_constraints.clear();
|
||||
_points.clear();
|
||||
}
|
||||
|
||||
float RagDoll::slaveToSkeleton(const QVector<JointState>& states, float fraction) {
|
||||
const int numStates = states.size();
|
||||
assert(numStates == _points.size());
|
||||
fraction = glm::clamp(fraction, 0.0f, 1.0f);
|
||||
float maxDistance = 0.0f;
|
||||
for (int i = 0; i < numStates; ++i) {
|
||||
glm::vec3 oldPoint = _points[i];
|
||||
_points[i] = (1.0f - fraction) * _points[i] + fraction * states[i].getPosition();
|
||||
maxDistance = glm::max(maxDistance, glm::distance(oldPoint, _points[i]));
|
||||
}
|
||||
return maxDistance;
|
||||
}
|
||||
|
||||
float RagDoll::enforceConstraints() {
|
||||
float maxDistance = 0.0f;
|
||||
const int numConstraints = _constraints.size();
|
||||
for (int i = 0; i < numConstraints; ++i) {
|
||||
DistanceConstraint* c = static_cast<DistanceConstraint*>(_constraints[i]);
|
||||
//maxDistance = glm::max(maxDistance, _constraints[i]->enforce());
|
||||
maxDistance = glm::max(maxDistance, c->enforce());
|
||||
}
|
||||
return maxDistance;
|
||||
}
|
78
interface/src/renderer/RagDoll.h
Normal file
78
interface/src/renderer/RagDoll.h
Normal file
|
@ -0,0 +1,78 @@
|
|||
//
|
||||
// RagDoll.h
|
||||
// interface/src/avatar
|
||||
//
|
||||
// Created by Andrew Meadows 2014.05.30
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_RagDoll_h
|
||||
#define hifi_RagDoll_h
|
||||
|
||||
#include "renderer/Model.h"
|
||||
|
||||
class Constraint {
|
||||
public:
|
||||
Constraint() {}
|
||||
virtual ~Constraint() {}
|
||||
|
||||
/// Enforce contraint by moving relevant points.
|
||||
/// \return max distance of point movement
|
||||
virtual float enforce() = 0;
|
||||
};
|
||||
|
||||
class FixedConstraint : public Constraint {
|
||||
public:
|
||||
FixedConstraint();
|
||||
float enforce();
|
||||
void setPoint(glm::vec3* point);
|
||||
void setAnchor(const glm::vec3& anchor);
|
||||
private:
|
||||
glm::vec3* _point;
|
||||
glm::vec3 _anchor;
|
||||
};
|
||||
|
||||
class DistanceConstraint : public Constraint {
|
||||
public:
|
||||
DistanceConstraint(glm::vec3* pointA, glm::vec3* pointB);
|
||||
DistanceConstraint(const DistanceConstraint& other);
|
||||
float enforce();
|
||||
void setDistance(float distance);
|
||||
private:
|
||||
float _distance;
|
||||
glm::vec3* _points[2];
|
||||
};
|
||||
|
||||
class RagDoll {
|
||||
public:
|
||||
|
||||
RagDoll();
|
||||
virtual ~RagDoll();
|
||||
|
||||
/// Create points and constraints based on topology of collection of joints
|
||||
/// \param joints list of connected joint states
|
||||
void init(const QVector<JointState>& states);
|
||||
|
||||
/// Delete all data.
|
||||
void clear();
|
||||
|
||||
/// \param states list of joint states
|
||||
/// \param fraction range from 0.0 (no movement) to 1.0 (use joint locations)
|
||||
/// \return max distance of point movement
|
||||
float slaveToSkeleton(const QVector<JointState>& states, float fraction);
|
||||
|
||||
/// Enforce contraints.
|
||||
/// \return max distance of point movement
|
||||
float enforceConstraints();
|
||||
|
||||
const QVector<glm::vec3>& getPoints() const { return _points; }
|
||||
|
||||
private:
|
||||
QVector<Constraint*> _constraints;
|
||||
QVector<glm::vec3> _points;
|
||||
};
|
||||
|
||||
#endif // hifi_RagDoll_h
|
|
@ -98,6 +98,7 @@ AnimationPanel::AnimationPanel(AnimationsDialog* dialog, const AnimationHandlePo
|
|||
|
||||
layout->addRow("FPS:", _fps = new QDoubleSpinBox());
|
||||
_fps->setSingleStep(0.01);
|
||||
_fps->setMinimum(-FLT_MAX);
|
||||
_fps->setMaximum(FLT_MAX);
|
||||
_fps->setValue(handle->getFPS());
|
||||
connect(_fps, SIGNAL(valueChanged(double)), SLOT(updateHandle()));
|
||||
|
@ -128,15 +129,17 @@ AnimationPanel::AnimationPanel(AnimationsDialog* dialog, const AnimationHandlePo
|
|||
_startAutomatically->setChecked(handle->getStartAutomatically());
|
||||
connect(_startAutomatically, SIGNAL(toggled(bool)), SLOT(updateHandle()));
|
||||
|
||||
layout->addRow("First Frame:", _firstFrame = new QSpinBox());
|
||||
layout->addRow("First Frame:", _firstFrame = new QDoubleSpinBox());
|
||||
_firstFrame->setSingleStep(0.01);
|
||||
_firstFrame->setMaximum(INT_MAX);
|
||||
_firstFrame->setValue(handle->getFirstFrame());
|
||||
connect(_firstFrame, SIGNAL(valueChanged(int)), SLOT(updateHandle()));
|
||||
connect(_firstFrame, SIGNAL(valueChanged(double)), SLOT(updateHandle()));
|
||||
|
||||
layout->addRow("Last Frame:", _lastFrame = new QSpinBox());
|
||||
layout->addRow("Last Frame:", _lastFrame = new QDoubleSpinBox());
|
||||
_lastFrame->setSingleStep(0.01);
|
||||
_lastFrame->setMaximum(INT_MAX);
|
||||
_lastFrame->setValue(handle->getLastFrame());
|
||||
connect(_lastFrame, SIGNAL(valueChanged(int)), SLOT(updateHandle()));
|
||||
connect(_lastFrame, SIGNAL(valueChanged(double)), SLOT(updateHandle()));
|
||||
|
||||
QHBoxLayout* buttons = new QHBoxLayout();
|
||||
layout->addRow(buttons);
|
||||
|
|
|
@ -22,7 +22,6 @@ class QComboBox;
|
|||
class QDoubleSpinner;
|
||||
class QLineEdit;
|
||||
class QPushButton;
|
||||
class QSpinBox;
|
||||
class QVBoxLayout;
|
||||
|
||||
/// Allows users to edit the avatar animations.
|
||||
|
@ -71,8 +70,8 @@ private:
|
|||
QCheckBox* _loop;
|
||||
QCheckBox* _hold;
|
||||
QCheckBox* _startAutomatically;
|
||||
QSpinBox* _firstFrame;
|
||||
QSpinBox* _lastFrame;
|
||||
QDoubleSpinBox* _firstFrame;
|
||||
QDoubleSpinBox* _lastFrame;
|
||||
QLineEdit* _maskedJoints;
|
||||
QPushButton* _chooseMaskedJoints;
|
||||
QPushButton* _start;
|
||||
|
|
|
@ -19,7 +19,11 @@
|
|||
|
||||
#include "ui/Stats.h"
|
||||
|
||||
ApplicationOverlay::ApplicationOverlay() : _framebufferObject(NULL) {
|
||||
ApplicationOverlay::ApplicationOverlay() :
|
||||
_framebufferObject(NULL),
|
||||
_oculusAngle(65.0f * RADIANS_PER_DEGREE),
|
||||
_distance(0.5f),
|
||||
_uiType(HEMISPHERE) {
|
||||
|
||||
}
|
||||
|
||||
|
@ -45,6 +49,10 @@ void ApplicationOverlay::renderOverlay(bool renderToTexture) {
|
|||
BandwidthMeter* bandwidthMeter = application->getBandwidthMeter();
|
||||
NodeBounds& nodeBoundsDisplay = application->getNodeBoundsDisplay();
|
||||
|
||||
int mouseX = application->getMouseX();
|
||||
int mouseY = application->getMouseY();
|
||||
bool renderPointer = renderToTexture;
|
||||
|
||||
if (renderToTexture) {
|
||||
getFramebufferObject()->bind();
|
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
||||
|
@ -220,6 +228,34 @@ void ApplicationOverlay::renderOverlay(bool renderToTexture) {
|
|||
|
||||
overlays.render2D();
|
||||
|
||||
// Render a crosshair over the pointer when in Oculus
|
||||
if (renderPointer) {
|
||||
const float pointerWidth = 10;
|
||||
const float pointerHeight = 10;
|
||||
const float crossPad = 4;
|
||||
|
||||
mouseX -= pointerWidth / 2.0f;
|
||||
mouseY += pointerHeight / 2.0f;
|
||||
|
||||
glBegin(GL_QUADS);
|
||||
|
||||
glColor3f(1, 0, 0);
|
||||
|
||||
//Horizontal crosshair
|
||||
glVertex2i(mouseX, mouseY - crossPad);
|
||||
glVertex2i(mouseX + pointerWidth, mouseY - crossPad);
|
||||
glVertex2i(mouseX + pointerWidth, mouseY - pointerHeight + crossPad);
|
||||
glVertex2i(mouseX, mouseY - pointerHeight + crossPad);
|
||||
|
||||
//Vertical crosshair
|
||||
glVertex2i(mouseX + crossPad, mouseY);
|
||||
glVertex2i(mouseX + pointerWidth - crossPad, mouseY);
|
||||
glVertex2i(mouseX + pointerWidth - crossPad, mouseY - pointerHeight);
|
||||
glVertex2i(mouseX + crossPad, mouseY - pointerHeight);
|
||||
|
||||
glEnd();
|
||||
}
|
||||
|
||||
glPopMatrix();
|
||||
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
|
@ -227,7 +263,6 @@ void ApplicationOverlay::renderOverlay(bool renderToTexture) {
|
|||
glEnable(GL_LIGHTING);
|
||||
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_CONSTANT_ALPHA, GL_ONE);
|
||||
|
||||
|
||||
if (renderToTexture) {
|
||||
getFramebufferObject()->release();
|
||||
}
|
||||
|
@ -262,6 +297,17 @@ void ApplicationOverlay::displayOverlayTexture(Camera& whichCamera) {
|
|||
glDisable(GL_TEXTURE_2D);
|
||||
}
|
||||
|
||||
// Fast helper functions
|
||||
inline float max(float a, float b) {
|
||||
return (a > b) ? a : b;
|
||||
}
|
||||
|
||||
inline float min(float a, float b) {
|
||||
return (a < b) ? a : b;
|
||||
}
|
||||
|
||||
const float textureFov = PI / 2.5f;
|
||||
|
||||
// Draws the FBO texture for Oculus rift. TODO: Draw a curved texture instead of plane.
|
||||
void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
|
||||
|
||||
|
@ -271,23 +317,40 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
|
|||
MyAvatar* myAvatar = application->getAvatar();
|
||||
const glm::vec3& viewMatrixTranslation = application->getViewMatrixTranslation();
|
||||
|
||||
// Calculates the world space width and height of the texture based on a desired FOV
|
||||
const float overlayFov = whichCamera.getFieldOfView() * PI / 180.0f;
|
||||
const float overlayDistance = 1;
|
||||
int mouseX = application->getMouseX();
|
||||
int mouseY = application->getMouseY();
|
||||
const int widgetWidth = glWidget->width();
|
||||
const int widgetHeight = glWidget->height();
|
||||
float magnifyWidth = 80.0f;
|
||||
float magnifyHeight = 60.0f;
|
||||
const float magnification = 4.0f;
|
||||
|
||||
// Get vertical FoV of the displayed overlay texture
|
||||
const float halfVerticalAngle = _oculusAngle / 2.0f;
|
||||
const float overlayAspectRatio = glWidget->width() / (float)glWidget->height();
|
||||
const float overlayHeight = overlayDistance * tan(overlayFov);
|
||||
const float overlayWidth = overlayHeight * overlayAspectRatio;
|
||||
const float halfOverlayWidth = overlayWidth / 2;
|
||||
const float halfOverlayHeight = overlayHeight / 2;
|
||||
const float halfOverlayHeight = _distance * tan(halfVerticalAngle);
|
||||
const float overlayHeight = halfOverlayHeight * 2.0f;
|
||||
|
||||
// The more vertices, the better the curve
|
||||
const int numHorizontalVertices = 20;
|
||||
const int numVerticalVertices = 20;
|
||||
// U texture coordinate width at each quad
|
||||
const float quadTexWidth = 1.0f / (numHorizontalVertices - 1);
|
||||
const float quadTexHeight = 1.0f / (numVerticalVertices - 1);
|
||||
|
||||
// Get horizontal angle and angle increment from vertical angle and aspect ratio
|
||||
const float horizontalAngle = halfVerticalAngle * 2.0f * overlayAspectRatio;
|
||||
const float angleIncrement = horizontalAngle / (numHorizontalVertices - 1);
|
||||
const float halfHorizontalAngle = horizontalAngle / 2;
|
||||
|
||||
const float verticalAngleIncrement = _oculusAngle / (numVerticalVertices - 1);
|
||||
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
|
||||
glDepthMask(GL_FALSE);
|
||||
|
||||
glEnable(GL_BLEND);
|
||||
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
|
||||
|
||||
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_CONSTANT_ALPHA, GL_ONE);
|
||||
glBindTexture(GL_TEXTURE_2D, getFramebufferObject()->texture());
|
||||
glDisable(GL_DEPTH_TEST);
|
||||
glEnable(GL_DEPTH_TEST);
|
||||
glDisable(GL_LIGHTING);
|
||||
glEnable(GL_TEXTURE_2D);
|
||||
|
||||
|
@ -305,21 +368,166 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
|
|||
glm::vec3 pos = whichCamera.getPosition();
|
||||
glm::quat rot = myAvatar->getOrientation();
|
||||
glm::vec3 axis = glm::axis(rot);
|
||||
pos += rot * glm::vec3(0.0, 0.0, -overlayDistance);
|
||||
|
||||
|
||||
glTranslatef(pos.x, pos.y, pos.z);
|
||||
glRotatef(glm::degrees(glm::angle(rot)), axis.x, axis.y, axis.z);
|
||||
|
||||
glBegin(GL_QUADS);
|
||||
glTexCoord2f(1, 0); glVertex3f(-halfOverlayWidth, halfOverlayHeight, 0);
|
||||
glTexCoord2f(0, 0); glVertex3f(halfOverlayWidth, halfOverlayHeight, 0);
|
||||
glTexCoord2f(0, 1); glVertex3f(halfOverlayWidth, -halfOverlayHeight, 0);
|
||||
glTexCoord2f(1, 1); glVertex3f(-halfOverlayWidth, -halfOverlayHeight, 0);
|
||||
glEnd();
|
||||
glColor3f(1.0f, 1.0f, 1.0f);
|
||||
|
||||
glDepthMask(GL_TRUE);
|
||||
|
||||
glEnable(GL_ALPHA_TEST);
|
||||
glAlphaFunc(GL_GREATER, 0.01f);
|
||||
|
||||
//Draw the magnifying glass
|
||||
|
||||
mouseX -= magnifyWidth / 2;
|
||||
mouseY -= magnifyHeight / 2;
|
||||
|
||||
//clamp the magnification
|
||||
if (mouseX < 0) {
|
||||
magnifyWidth += mouseX;
|
||||
mouseX = 0;
|
||||
} else if (mouseX + magnifyWidth > widgetWidth) {
|
||||
magnifyWidth = widgetWidth - mouseX;
|
||||
}
|
||||
if (mouseY < 0) {
|
||||
magnifyHeight += mouseY;
|
||||
mouseY = 0;
|
||||
} else if (mouseY + magnifyHeight > widgetHeight) {
|
||||
magnifyHeight = widgetHeight - mouseY;
|
||||
}
|
||||
|
||||
const float halfMagnifyHeight = magnifyHeight / 2.0f;
|
||||
|
||||
float newWidth = magnifyWidth * magnification;
|
||||
float newHeight = magnifyHeight * magnification;
|
||||
|
||||
// Magnification Texture Coordinates
|
||||
float magnifyULeft = mouseX / (float)widgetWidth;
|
||||
float magnifyURight = (mouseX + magnifyWidth) / (float)widgetWidth;
|
||||
float magnifyVBottom = 1.0f - mouseY / (float)widgetHeight;
|
||||
float magnifyVTop = 1.0f - (mouseY + magnifyHeight) / (float)widgetHeight;
|
||||
|
||||
// Coordinates of magnification overlay
|
||||
float newMouseX = (mouseX + magnifyWidth / 2) - newWidth / 2.0f;
|
||||
float newMouseY = (mouseY + magnifyHeight / 2) + newHeight / 2.0f;
|
||||
|
||||
// Get angle on the UI
|
||||
float leftAngle = (newMouseX / (float)widgetWidth) * horizontalAngle - halfHorizontalAngle;
|
||||
float rightAngle = ((newMouseX + newWidth) / (float)widgetWidth) * horizontalAngle - halfHorizontalAngle;
|
||||
|
||||
float bottomAngle = (newMouseY / (float)widgetHeight) * _oculusAngle - halfVerticalAngle;
|
||||
float topAngle = ((newMouseY - newHeight) / (float)widgetHeight) * _oculusAngle - halfVerticalAngle;
|
||||
|
||||
float leftX, rightX, leftZ, rightZ, topZ, bottomZ;
|
||||
|
||||
// Get position on hemisphere using angle
|
||||
if (_uiType == HEMISPHERE) {
|
||||
|
||||
//Get new UV coordinates from our magnification window
|
||||
float newULeft = newMouseX / widgetWidth;
|
||||
float newURight = (newMouseX + newWidth) / widgetWidth;
|
||||
float newVBottom = 1.0 - newMouseY / widgetHeight;
|
||||
float newVTop = 1.0 - (newMouseY - newHeight) / widgetHeight;
|
||||
|
||||
// Project our position onto the hemisphere using the UV coordinates
|
||||
float lX = sin((newULeft - 0.5f) * textureFov);
|
||||
float rX = sin((newURight - 0.5f) * textureFov);
|
||||
float bY = sin((newVBottom - 0.5f) * textureFov);
|
||||
float tY = sin((newVTop - 0.5f) * textureFov);
|
||||
|
||||
float dist;
|
||||
//Bottom Left
|
||||
dist = sqrt(lX * lX + bY * bY);
|
||||
float blZ = sqrt(1.0f - dist * dist);
|
||||
//Top Left
|
||||
dist = sqrt(lX * lX + tY * tY);
|
||||
float tlZ = sqrt(1.0f - dist * dist);
|
||||
//Bottom Right
|
||||
dist = sqrt(rX * rX + bY * bY);
|
||||
float brZ = sqrt(1.0f - dist * dist);
|
||||
//Top Right
|
||||
dist = sqrt(rX * rX + tY * tY);
|
||||
float trZ = sqrt(1.0f - dist * dist);
|
||||
|
||||
glBegin(GL_QUADS);
|
||||
|
||||
glTexCoord2f(magnifyULeft, magnifyVBottom); glVertex3f(lX, tY, -tlZ);
|
||||
glTexCoord2f(magnifyURight, magnifyVBottom); glVertex3f(rX, tY, -trZ);
|
||||
glTexCoord2f(magnifyURight, magnifyVTop); glVertex3f(rX, bY, -brZ);
|
||||
glTexCoord2f(magnifyULeft, magnifyVTop); glVertex3f(lX, bY, -blZ);
|
||||
|
||||
glEnd();
|
||||
|
||||
} else {
|
||||
leftX = sin(leftAngle) * _distance;
|
||||
rightX = sin(rightAngle) * _distance;
|
||||
leftZ = -cos(leftAngle) * _distance;
|
||||
rightZ = -cos(rightAngle) * _distance;
|
||||
if (_uiType == CURVED_SEMICIRCLE) {
|
||||
topZ = -cos(topAngle * overlayAspectRatio) * _distance;
|
||||
bottomZ = -cos(bottomAngle * overlayAspectRatio) * _distance;
|
||||
} else {
|
||||
// Dont want to use topZ or bottomZ for SEMICIRCLE
|
||||
topZ = -99999;
|
||||
bottomZ = -99999;
|
||||
}
|
||||
|
||||
float bottomY = (1.0 - newMouseY / (float)widgetHeight) * halfOverlayHeight * 2.0f - halfOverlayHeight;
|
||||
float topY = bottomY + (newHeight / widgetHeight) * halfOverlayHeight * 2;
|
||||
|
||||
//TODO: Remove immediate mode in favor of VBO
|
||||
glBegin(GL_QUADS);
|
||||
|
||||
glTexCoord2f(magnifyULeft, magnifyVBottom); glVertex3f(leftX, topY, max(topZ, leftZ));
|
||||
glTexCoord2f(magnifyURight, magnifyVBottom); glVertex3f(rightX, topY, max(topZ, rightZ));
|
||||
glTexCoord2f(magnifyURight, magnifyVTop); glVertex3f(rightX, bottomY, max(bottomZ, rightZ));
|
||||
glTexCoord2f(magnifyULeft, magnifyVTop); glVertex3f(leftX, bottomY, max(bottomZ, leftZ));
|
||||
|
||||
glEnd();
|
||||
}
|
||||
glDepthMask(GL_FALSE);
|
||||
glDisable(GL_ALPHA_TEST);
|
||||
|
||||
//TODO: Remove immediate mode in favor of VBO
|
||||
if (_uiType == HEMISPHERE) {
|
||||
renderTexturedHemisphere();
|
||||
} else{
|
||||
glBegin(GL_QUADS);
|
||||
// Place the vertices in a semicircle curve around the camera
|
||||
for (int i = 0; i < numHorizontalVertices - 1; i++) {
|
||||
for (int j = 0; j < numVerticalVertices - 1; j++) {
|
||||
|
||||
// Calculate the X and Z coordinates from the angles and radius from camera
|
||||
leftX = sin(angleIncrement * i - halfHorizontalAngle) * _distance;
|
||||
rightX = sin(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
|
||||
leftZ = -cos(angleIncrement * i - halfHorizontalAngle) * _distance;
|
||||
rightZ = -cos(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
|
||||
if (_uiType == 2) {
|
||||
topZ = -cos((verticalAngleIncrement * (j + 1) - halfVerticalAngle) * overlayAspectRatio) * _distance;
|
||||
bottomZ = -cos((verticalAngleIncrement * j - halfVerticalAngle) * overlayAspectRatio) * _distance;
|
||||
} else {
|
||||
topZ = -99999;
|
||||
bottomZ = -99999;
|
||||
}
|
||||
|
||||
glTexCoord2f(quadTexWidth * i, (j + 1) * quadTexHeight);
|
||||
glVertex3f(leftX, (j + 1) * quadTexHeight * overlayHeight - halfOverlayHeight, max(topZ, leftZ));
|
||||
glTexCoord2f(quadTexWidth * (i + 1), (j + 1) * quadTexHeight);
|
||||
glVertex3f(rightX, (j + 1) * quadTexHeight * overlayHeight - halfOverlayHeight, max(topZ, rightZ));
|
||||
glTexCoord2f(quadTexWidth * (i + 1), j * quadTexHeight);
|
||||
glVertex3f(rightX, j * quadTexHeight * overlayHeight - halfOverlayHeight, max(bottomZ, rightZ));
|
||||
glTexCoord2f(quadTexWidth * i, j * quadTexHeight);
|
||||
glVertex3f(leftX, j * quadTexHeight * overlayHeight - halfOverlayHeight, max(bottomZ, leftZ));
|
||||
}
|
||||
}
|
||||
|
||||
glEnd();
|
||||
}
|
||||
|
||||
glPopMatrix();
|
||||
|
||||
glEnable(GL_DEPTH_TEST);
|
||||
glDepthMask(GL_TRUE);
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
glDisable(GL_TEXTURE_2D);
|
||||
|
@ -329,13 +537,106 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
|
|||
|
||||
}
|
||||
|
||||
void ApplicationOverlay::renderTexturedHemisphere() {
|
||||
const int slices = 80;
|
||||
const int stacks = 80;
|
||||
|
||||
static VerticesIndices vbo(0, 0);
|
||||
int vertices = slices * (stacks - 1) + 1;
|
||||
int indices = slices * 2 * 3 * (stacks - 2) + slices * 3;
|
||||
if (vbo.first == 0) {
|
||||
TextureVertex* vertexData = new TextureVertex[vertices];
|
||||
TextureVertex* vertex = vertexData;
|
||||
for (int i = 0; i < stacks - 1; i++) {
|
||||
float phi = PI_OVER_TWO * (float)i / (float)(stacks - 1);
|
||||
float z = -sinf(phi), radius = cosf(phi);
|
||||
|
||||
for (int j = 0; j < slices; j++) {
|
||||
float theta = TWO_PI * (float)j / (float)slices;
|
||||
|
||||
vertex->position.x = sinf(theta) * radius;
|
||||
vertex->position.y = cosf(theta) * radius;
|
||||
vertex->position.z = z;
|
||||
vertex->uv.x = asin(vertex->position.x) / (textureFov) + 0.5f;
|
||||
vertex->uv.y = asin(vertex->position.y) / (textureFov) + 0.5f;
|
||||
vertex++;
|
||||
}
|
||||
}
|
||||
vertex->position.x = 0.0f;
|
||||
vertex->position.y = 0.0f;
|
||||
vertex->position.z = -1.0f;
|
||||
vertex->uv.x = 0.5f;
|
||||
vertex->uv.y = 0.5f;
|
||||
vertex++;
|
||||
|
||||
glGenBuffers(1, &vbo.first);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, vbo.first);
|
||||
const int BYTES_PER_VERTEX = sizeof(TextureVertex);
|
||||
glBufferData(GL_ARRAY_BUFFER, vertices * BYTES_PER_VERTEX, vertexData, GL_STATIC_DRAW);
|
||||
delete[] vertexData;
|
||||
|
||||
GLushort* indexData = new GLushort[indices];
|
||||
GLushort* index = indexData;
|
||||
for (int i = 0; i < stacks - 2; i++) {
|
||||
GLushort bottom = i * slices;
|
||||
GLushort top = bottom + slices;
|
||||
for (int j = 0; j < slices; j++) {
|
||||
int next = (j + 1) % slices;
|
||||
|
||||
*(index++) = bottom + j;
|
||||
*(index++) = top + next;
|
||||
*(index++) = top + j;
|
||||
|
||||
*(index++) = bottom + j;
|
||||
*(index++) = bottom + next;
|
||||
*(index++) = top + next;
|
||||
}
|
||||
}
|
||||
GLushort bottom = (stacks - 2) * slices;
|
||||
GLushort top = bottom + slices;
|
||||
for (int i = 0; i < slices; i++) {
|
||||
*(index++) = bottom + i;
|
||||
*(index++) = bottom + (i + 1) % slices;
|
||||
*(index++) = top;
|
||||
}
|
||||
|
||||
glGenBuffers(1, &vbo.second);
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo.second);
|
||||
const int BYTES_PER_INDEX = sizeof(GLushort);
|
||||
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices * BYTES_PER_INDEX, indexData, GL_STATIC_DRAW);
|
||||
delete[] indexData;
|
||||
|
||||
} else {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, vbo.first);
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo.second);
|
||||
}
|
||||
|
||||
glEnableClientState(GL_VERTEX_ARRAY);
|
||||
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
|
||||
|
||||
glVertexPointer(3, GL_FLOAT, sizeof(TextureVertex), (void*)0);
|
||||
glTexCoordPointer(2, GL_FLOAT, sizeof(TextureVertex), (void*)12);
|
||||
|
||||
glDrawRangeElements(GL_TRIANGLES, 0, vertices - 1, indices, GL_UNSIGNED_SHORT, 0);
|
||||
|
||||
glDisableClientState(GL_VERTEX_ARRAY);
|
||||
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
|
||||
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
|
||||
|
||||
}
|
||||
|
||||
QOpenGLFramebufferObject* ApplicationOverlay::getFramebufferObject() {
|
||||
if (!_framebufferObject) {
|
||||
_framebufferObject = new QOpenGLFramebufferObject(Application::getInstance()->getGLWidget()->size());
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, _framebufferObject->texture());
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
GLfloat borderColor[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
|
||||
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
|
||||
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
|
||||
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
}
|
||||
return _framebufferObject;
|
||||
|
|
|
@ -19,6 +19,8 @@ class QOpenGLFramebufferObject;
|
|||
class ApplicationOverlay {
|
||||
public:
|
||||
|
||||
enum UIType { HEMISPHERE, SEMICIRCLE, CURVED_SEMICIRCLE };
|
||||
|
||||
ApplicationOverlay();
|
||||
~ApplicationOverlay();
|
||||
|
||||
|
@ -28,11 +30,28 @@ public:
|
|||
|
||||
// Getters
|
||||
QOpenGLFramebufferObject* getFramebufferObject();
|
||||
float getOculusAngle() const { return _oculusAngle; }
|
||||
|
||||
// Setters
|
||||
void setOculusAngle(float oculusAngle) { _oculusAngle = oculusAngle; }
|
||||
void setUIType(UIType uiType) { _uiType = uiType; }
|
||||
|
||||
private:
|
||||
// Interleaved vertex data
|
||||
struct TextureVertex {
|
||||
glm::vec3 position;
|
||||
glm::vec2 uv;
|
||||
};
|
||||
|
||||
typedef QPair<GLuint, GLuint> VerticesIndices;
|
||||
|
||||
void renderTexturedHemisphere();
|
||||
|
||||
QOpenGLFramebufferObject* _framebufferObject;
|
||||
float _trailingAudioLoudness;
|
||||
float _oculusAngle;
|
||||
float _distance;
|
||||
UIType _uiType;
|
||||
};
|
||||
|
||||
#endif // hifi_ApplicationOverlay_h
|
|
@ -20,14 +20,15 @@
|
|||
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
|
||||
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type) :
|
||||
AudioRingBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
|
||||
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo) :
|
||||
AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
|
||||
_type(type),
|
||||
_position(0.0f, 0.0f, 0.0f),
|
||||
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
||||
_willBeAddedToMix(false),
|
||||
_shouldLoopbackForNode(false),
|
||||
_shouldOutputStarveDebug(true)
|
||||
_shouldOutputStarveDebug(true),
|
||||
_isStereo(isStereo)
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -40,6 +41,9 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
|
|||
// skip the packet header (includes the source UUID)
|
||||
int readBytes = numBytesForPacketHeader(packet);
|
||||
|
||||
// hop over the channel flag that has already been read in AudioMixerClientData
|
||||
readBytes += sizeof(quint8);
|
||||
// read the positional data
|
||||
readBytes += parsePositionalData(packet.mid(readBytes));
|
||||
|
||||
if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) {
|
||||
|
|
|
@ -24,7 +24,7 @@ public:
|
|||
Injector
|
||||
};
|
||||
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type);
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false);
|
||||
~PositionalAudioRingBuffer();
|
||||
|
||||
int parseData(const QByteArray& packet);
|
||||
|
@ -41,6 +41,8 @@ public:
|
|||
|
||||
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
||||
|
||||
bool isStereo() const { return _isStereo; }
|
||||
|
||||
PositionalAudioRingBuffer::Type getType() const { return _type; }
|
||||
const glm::vec3& getPosition() const { return _position; }
|
||||
const glm::quat& getOrientation() const { return _orientation; }
|
||||
|
@ -56,6 +58,7 @@ protected:
|
|||
bool _willBeAddedToMix;
|
||||
bool _shouldLoopbackForNode;
|
||||
bool _shouldOutputStarveDebug;
|
||||
bool _isStereo;
|
||||
|
||||
float _nextOutputTrailingLoudness;
|
||||
};
|
||||
|
|
|
@ -47,11 +47,19 @@ IDStreamer::IDStreamer(Bitstream& stream) :
|
|||
_bits(1) {
|
||||
}
|
||||
|
||||
void IDStreamer::setBitsFromValue(int value) {
|
||||
_bits = 1;
|
||||
while (value >= (1 << _bits) - 1) {
|
||||
_bits++;
|
||||
static int getBitsForHighestValue(int highestValue) {
|
||||
// if this turns out to be a bottleneck, there are fancier ways to do it (get the position of the highest set bit):
|
||||
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogObvious
|
||||
int bits = 0;
|
||||
while (highestValue != 0) {
|
||||
bits++;
|
||||
highestValue >>= 1;
|
||||
}
|
||||
return bits;
|
||||
}
|
||||
|
||||
void IDStreamer::setBitsFromValue(int value) {
|
||||
_bits = getBitsForHighestValue(value + 1);
|
||||
}
|
||||
|
||||
IDStreamer& IDStreamer::operator<<(int value) {
|
||||
|
@ -71,6 +79,10 @@ IDStreamer& IDStreamer::operator>>(int& value) {
|
|||
return *this;
|
||||
}
|
||||
|
||||
static QByteArray getEnumName(const QMetaEnum& metaEnum) {
|
||||
return QByteArray(metaEnum.scope()) + "::" + metaEnum.name();
|
||||
}
|
||||
|
||||
int Bitstream::registerMetaObject(const char* className, const QMetaObject* metaObject) {
|
||||
getMetaObjects().insert(className, metaObject);
|
||||
|
||||
|
@ -78,6 +90,17 @@ int Bitstream::registerMetaObject(const char* className, const QMetaObject* meta
|
|||
for (const QMetaObject* superClass = metaObject; superClass; superClass = superClass->superClass()) {
|
||||
getMetaObjectSubClasses().insert(superClass, metaObject);
|
||||
}
|
||||
|
||||
// register the streamers for all enumerators
|
||||
// temporarily disabled: crashes on Windows
|
||||
//for (int i = 0; i < metaObject->enumeratorCount(); i++) {
|
||||
// QMetaEnum metaEnum = metaObject->enumerator(i);
|
||||
// const TypeStreamer*& streamer = getEnumStreamers()[QPair<QByteArray, QByteArray>(metaEnum.scope(), metaEnum.name())];
|
||||
// if (!streamer) {
|
||||
// getEnumStreamersByName().insert(getEnumName(metaEnum), streamer = new EnumTypeStreamer(metaEnum));
|
||||
// }
|
||||
//}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -120,6 +143,14 @@ void Bitstream::addTypeSubstitution(const QByteArray& typeName, int type) {
|
|||
_typeStreamerSubstitutions.insert(typeName, getTypeStreamers().value(type));
|
||||
}
|
||||
|
||||
void Bitstream::addTypeSubstitution(const QByteArray& typeName, const char* replacementTypeName) {
|
||||
const TypeStreamer* streamer = getTypeStreamers().value(QMetaType::type(replacementTypeName));
|
||||
if (!streamer) {
|
||||
streamer = getEnumStreamersByName().value(replacementTypeName);
|
||||
}
|
||||
_typeStreamerSubstitutions.insert(typeName, streamer);
|
||||
}
|
||||
|
||||
const int LAST_BIT_POSITION = BITS_IN_BYTE - 1;
|
||||
|
||||
Bitstream& Bitstream::write(const void* data, int bits, int offset) {
|
||||
|
@ -193,7 +224,7 @@ void Bitstream::persistWriteMappings(const WriteMappings& mappings) {
|
|||
continue;
|
||||
}
|
||||
connect(it.key().data(), SIGNAL(destroyed(QObject*)), SLOT(clearSharedObject(QObject*)));
|
||||
QPointer<SharedObject>& reference = _sharedObjectReferences[it.key()->getID()];
|
||||
QPointer<SharedObject>& reference = _sharedObjectReferences[it.key()->getOriginID()];
|
||||
if (reference) {
|
||||
_sharedObjectStreamer.removePersistentID(reference);
|
||||
reference->disconnect(this);
|
||||
|
@ -227,7 +258,7 @@ void Bitstream::persistReadMappings(const ReadMappings& mappings) {
|
|||
if (!it.value()) {
|
||||
continue;
|
||||
}
|
||||
QPointer<SharedObject>& reference = _sharedObjectReferences[it.value()->getRemoteID()];
|
||||
QPointer<SharedObject>& reference = _sharedObjectReferences[it.value()->getRemoteOriginID()];
|
||||
if (reference) {
|
||||
_sharedObjectStreamer.removePersistentValue(reference.data());
|
||||
}
|
||||
|
@ -280,16 +311,8 @@ void Bitstream::writeRawDelta(const QObject* value, const QObject* reference) {
|
|||
}
|
||||
const QMetaObject* metaObject = value->metaObject();
|
||||
_metaObjectStreamer << metaObject;
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
QMetaProperty property = metaObject->property(i);
|
||||
if (!property.isStored(value)) {
|
||||
continue;
|
||||
}
|
||||
const TypeStreamer* streamer = getTypeStreamers().value(property.userType());
|
||||
if (streamer) {
|
||||
streamer->writeDelta(*this, property.read(value), reference && metaObject == reference->metaObject() ?
|
||||
property.read(reference) : QVariant());
|
||||
}
|
||||
foreach (const PropertyWriter& propertyWriter, getPropertyWriters(metaObject)) {
|
||||
propertyWriter.writeDelta(*this, value, reference);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -411,6 +434,10 @@ Bitstream& Bitstream::operator>>(QUrl& url) {
|
|||
}
|
||||
|
||||
Bitstream& Bitstream::operator<<(const QVariant& value) {
|
||||
if (!value.isValid()) {
|
||||
_typeStreamerStreamer << NULL;
|
||||
return *this;
|
||||
}
|
||||
const TypeStreamer* streamer = getTypeStreamers().value(value.userType());
|
||||
if (streamer) {
|
||||
_typeStreamerStreamer << streamer;
|
||||
|
@ -424,7 +451,11 @@ Bitstream& Bitstream::operator<<(const QVariant& value) {
|
|||
Bitstream& Bitstream::operator>>(QVariant& value) {
|
||||
TypeReader reader;
|
||||
_typeStreamerStreamer >> reader;
|
||||
value = reader.read(*this);
|
||||
if (reader.getTypeName().isEmpty()) {
|
||||
value = QVariant();
|
||||
} else {
|
||||
value = reader.read(*this);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -458,15 +489,8 @@ Bitstream& Bitstream::operator<<(const QObject* object) {
|
|||
}
|
||||
const QMetaObject* metaObject = object->metaObject();
|
||||
_metaObjectStreamer << metaObject;
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
QMetaProperty property = metaObject->property(i);
|
||||
if (!property.isStored(object)) {
|
||||
continue;
|
||||
}
|
||||
const TypeStreamer* streamer = getTypeStreamers().value(property.userType());
|
||||
if (streamer) {
|
||||
streamer->write(*this, property.read(object));
|
||||
}
|
||||
foreach (const PropertyWriter& propertyWriter, getPropertyWriters(metaObject)) {
|
||||
propertyWriter.write(*this, object);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
@ -550,25 +574,12 @@ Bitstream& Bitstream::operator<(const QMetaObject* metaObject) {
|
|||
if (_metadataType == NO_METADATA) {
|
||||
return *this;
|
||||
}
|
||||
int storedPropertyCount = 0;
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
QMetaProperty property = metaObject->property(i);
|
||||
if (property.isStored() && getTypeStreamers().contains(property.userType())) {
|
||||
storedPropertyCount++;
|
||||
}
|
||||
}
|
||||
*this << storedPropertyCount;
|
||||
const QVector<PropertyWriter>& propertyWriters = getPropertyWriters(metaObject);
|
||||
*this << propertyWriters.size();
|
||||
QCryptographicHash hash(QCryptographicHash::Md5);
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
QMetaProperty property = metaObject->property(i);
|
||||
if (!property.isStored()) {
|
||||
continue;
|
||||
}
|
||||
const TypeStreamer* typeStreamer = getTypeStreamers().value(property.userType());
|
||||
if (!typeStreamer) {
|
||||
continue;
|
||||
}
|
||||
_typeStreamerStreamer << typeStreamer;
|
||||
foreach (const PropertyWriter& propertyWriter, propertyWriters) {
|
||||
_typeStreamerStreamer << propertyWriter.getStreamer();
|
||||
const QMetaProperty& property = propertyWriter.getProperty();
|
||||
if (_metadataType == FULL_METADATA) {
|
||||
*this << QByteArray::fromRawData(property.name(), strlen(property.name()));
|
||||
} else {
|
||||
|
@ -621,25 +632,18 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
|
|||
QCryptographicHash hash(QCryptographicHash::Md5);
|
||||
bool matches = true;
|
||||
if (metaObject) {
|
||||
int propertyIndex = 0;
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
QMetaProperty property = metaObject->property(i);
|
||||
if (!property.isStored()) {
|
||||
continue;
|
||||
const QVector<PropertyWriter>& propertyWriters = getPropertyWriters(metaObject);
|
||||
if (propertyWriters.size() == properties.size()) {
|
||||
for (int i = 0; i < propertyWriters.size(); i++) {
|
||||
const PropertyWriter& propertyWriter = propertyWriters.at(i);
|
||||
if (!properties.at(i).getReader().matchesExactly(propertyWriter.getStreamer())) {
|
||||
matches = false;
|
||||
break;
|
||||
}
|
||||
const QMetaProperty& property = propertyWriter.getProperty();
|
||||
hash.addData(property.name(), strlen(property.name()) + 1);
|
||||
}
|
||||
const TypeStreamer* typeStreamer = getTypeStreamers().value(property.userType());
|
||||
if (!typeStreamer) {
|
||||
continue;
|
||||
}
|
||||
if (propertyIndex >= properties.size() ||
|
||||
!properties.at(propertyIndex).getReader().matchesExactly(typeStreamer)) {
|
||||
matches = false;
|
||||
break;
|
||||
}
|
||||
hash.addData(property.name(), strlen(property.name()) + 1);
|
||||
propertyIndex++;
|
||||
}
|
||||
if (propertyIndex != properties.size()) {
|
||||
} else {
|
||||
matches = false;
|
||||
}
|
||||
}
|
||||
|
@ -656,7 +660,11 @@ Bitstream& Bitstream::operator>(ObjectReader& objectReader) {
|
|||
}
|
||||
|
||||
Bitstream& Bitstream::operator<(const TypeStreamer* streamer) {
|
||||
const char* typeName = QMetaType::typeName(streamer->getType());
|
||||
if (!streamer) {
|
||||
*this << QByteArray();
|
||||
return *this;
|
||||
}
|
||||
const char* typeName = streamer->getName();
|
||||
*this << QByteArray::fromRawData(typeName, strlen(typeName));
|
||||
if (_metadataType == NO_METADATA) {
|
||||
return *this;
|
||||
|
@ -667,6 +675,27 @@ Bitstream& Bitstream::operator<(const TypeStreamer* streamer) {
|
|||
case TypeReader::SIMPLE_TYPE:
|
||||
return *this;
|
||||
|
||||
case TypeReader::ENUM_TYPE: {
|
||||
QMetaEnum metaEnum = streamer->getMetaEnum();
|
||||
if (_metadataType == FULL_METADATA) {
|
||||
*this << metaEnum.keyCount();
|
||||
for (int i = 0; i < metaEnum.keyCount(); i++) {
|
||||
*this << QByteArray::fromRawData(metaEnum.key(i), strlen(metaEnum.key(i)));
|
||||
*this << metaEnum.value(i);
|
||||
}
|
||||
} else {
|
||||
*this << streamer->getBits();
|
||||
QCryptographicHash hash(QCryptographicHash::Md5);
|
||||
for (int i = 0; i < metaEnum.keyCount(); i++) {
|
||||
hash.addData(metaEnum.key(i), strlen(metaEnum.key(i)) + 1);
|
||||
qint32 value = metaEnum.value(i);
|
||||
hash.addData((const char*)&value, sizeof(qint32));
|
||||
}
|
||||
QByteArray hashResult = hash.result();
|
||||
write(hashResult.constData(), hashResult.size() * BITS_IN_BYTE);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
case TypeReader::LIST_TYPE:
|
||||
case TypeReader::SET_TYPE:
|
||||
return *this << streamer->getValueStreamer();
|
||||
|
@ -702,9 +731,16 @@ Bitstream& Bitstream::operator<(const TypeStreamer* streamer) {
|
|||
Bitstream& Bitstream::operator>(TypeReader& reader) {
|
||||
QByteArray typeName;
|
||||
*this >> typeName;
|
||||
if (typeName.isEmpty()) {
|
||||
reader = TypeReader();
|
||||
return *this;
|
||||
}
|
||||
const TypeStreamer* streamer = _typeStreamerSubstitutions.value(typeName);
|
||||
if (!streamer) {
|
||||
streamer = getTypeStreamers().value(QMetaType::type(typeName.constData()));
|
||||
if (!streamer) {
|
||||
streamer = getEnumStreamersByName().value(typeName);
|
||||
}
|
||||
}
|
||||
if (!streamer) {
|
||||
qWarning() << "Unknown type name: " << typeName << "\n";
|
||||
|
@ -719,7 +755,55 @@ Bitstream& Bitstream::operator>(TypeReader& reader) {
|
|||
case TypeReader::SIMPLE_TYPE:
|
||||
reader = TypeReader(typeName, streamer);
|
||||
return *this;
|
||||
|
||||
|
||||
case TypeReader::ENUM_TYPE: {
|
||||
if (_metadataType == FULL_METADATA) {
|
||||
int keyCount;
|
||||
*this >> keyCount;
|
||||
QMetaEnum metaEnum = (streamer && streamer->getReaderType() == TypeReader::ENUM_TYPE) ?
|
||||
streamer->getMetaEnum() : QMetaEnum();
|
||||
QHash<int, int> mappings;
|
||||
bool matches = (keyCount == metaEnum.keyCount());
|
||||
int highestValue = 0;
|
||||
for (int i = 0; i < keyCount; i++) {
|
||||
QByteArray key;
|
||||
int value;
|
||||
*this >> key >> value;
|
||||
highestValue = qMax(value, highestValue);
|
||||
int localValue = metaEnum.keyToValue(key);
|
||||
if (localValue != -1) {
|
||||
mappings.insert(value, localValue);
|
||||
}
|
||||
matches &= (value == localValue);
|
||||
}
|
||||
if (matches) {
|
||||
reader = TypeReader(typeName, streamer);
|
||||
} else {
|
||||
reader = TypeReader(typeName, streamer, getBitsForHighestValue(highestValue), mappings);
|
||||
}
|
||||
} else {
|
||||
int bits;
|
||||
*this >> bits;
|
||||
QCryptographicHash hash(QCryptographicHash::Md5);
|
||||
if (streamer && streamer->getReaderType() == TypeReader::ENUM_TYPE) {
|
||||
QMetaEnum metaEnum = streamer->getMetaEnum();
|
||||
for (int i = 0; i < metaEnum.keyCount(); i++) {
|
||||
hash.addData(metaEnum.key(i), strlen(metaEnum.key(i)) + 1);
|
||||
qint32 value = metaEnum.value(i);
|
||||
hash.addData((const char*)&value, sizeof(qint32));
|
||||
}
|
||||
}
|
||||
QByteArray localHashResult = hash.result();
|
||||
QByteArray remoteHashResult(localHashResult.size(), 0);
|
||||
read(remoteHashResult.data(), remoteHashResult.size() * BITS_IN_BYTE);
|
||||
if (localHashResult == remoteHashResult) {
|
||||
reader = TypeReader(typeName, streamer);
|
||||
} else {
|
||||
reader = TypeReader(typeName, streamer, bits, QHash<int, int>());
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
case TypeReader::LIST_TYPE:
|
||||
case TypeReader::SET_TYPE: {
|
||||
TypeReader valueReader;
|
||||
|
@ -728,7 +812,7 @@ Bitstream& Bitstream::operator>(TypeReader& reader) {
|
|||
valueReader.matchesExactly(streamer->getValueStreamer())) {
|
||||
reader = TypeReader(typeName, streamer);
|
||||
} else {
|
||||
reader = TypeReader(typeName, streamer, false, (TypeReader::Type)type, TypeReaderPointer(),
|
||||
reader = TypeReader(typeName, streamer, (TypeReader::Type)type,
|
||||
TypeReaderPointer(new TypeReader(valueReader)));
|
||||
}
|
||||
return *this;
|
||||
|
@ -741,8 +825,8 @@ Bitstream& Bitstream::operator>(TypeReader& reader) {
|
|||
valueReader.matchesExactly(streamer->getValueStreamer())) {
|
||||
reader = TypeReader(typeName, streamer);
|
||||
} else {
|
||||
reader = TypeReader(typeName, streamer, false, TypeReader::MAP_TYPE,
|
||||
TypeReaderPointer(new TypeReader(keyReader)), TypeReaderPointer(new TypeReader(valueReader)));
|
||||
reader = TypeReader(typeName, streamer, TypeReaderPointer(new TypeReader(keyReader)),
|
||||
TypeReaderPointer(new TypeReader(valueReader)));
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
@ -800,23 +884,20 @@ Bitstream& Bitstream::operator>(TypeReader& reader) {
|
|||
// if all fields are the same type and in the right order, we can use the (more efficient) default streamer
|
||||
const QVector<MetaField>& localFields = streamer->getMetaFields();
|
||||
if (fieldCount != localFields.size()) {
|
||||
reader = TypeReader(typeName, streamer, false, TypeReader::STREAMABLE_TYPE,
|
||||
TypeReaderPointer(), TypeReaderPointer(), fields);
|
||||
reader = TypeReader(typeName, streamer, fields);
|
||||
return *this;
|
||||
}
|
||||
for (int i = 0; i < fieldCount; i++) {
|
||||
const FieldReader& fieldReader = fields.at(i);
|
||||
if (!fieldReader.getReader().matchesExactly(localFields.at(i).getStreamer()) || fieldReader.getIndex() != i) {
|
||||
reader = TypeReader(typeName, streamer, false, TypeReader::STREAMABLE_TYPE,
|
||||
TypeReaderPointer(), TypeReaderPointer(), fields);
|
||||
reader = TypeReader(typeName, streamer, fields);
|
||||
return *this;
|
||||
}
|
||||
}
|
||||
reader = TypeReader(typeName, streamer);
|
||||
return *this;
|
||||
}
|
||||
reader = TypeReader(typeName, streamer, false, TypeReader::STREAMABLE_TYPE,
|
||||
TypeReaderPointer(), TypeReaderPointer(), fields);
|
||||
reader = TypeReader(typeName, streamer, fields);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -847,9 +928,10 @@ Bitstream& Bitstream::operator<(const SharedObjectPointer& object) {
|
|||
return *this << (int)0;
|
||||
}
|
||||
*this << object->getID();
|
||||
QPointer<SharedObject> reference = _sharedObjectReferences.value(object->getID());
|
||||
*this << object->getOriginID();
|
||||
QPointer<SharedObject> reference = _sharedObjectReferences.value(object->getOriginID());
|
||||
if (reference) {
|
||||
writeRawDelta((QObject*)object.data(), (QObject*)reference.data());
|
||||
writeRawDelta((const QObject*)object.data(), (const QObject*)reference.data());
|
||||
} else {
|
||||
*this << (QObject*)object.data();
|
||||
}
|
||||
|
@ -863,7 +945,9 @@ Bitstream& Bitstream::operator>(SharedObjectPointer& object) {
|
|||
object = SharedObjectPointer();
|
||||
return *this;
|
||||
}
|
||||
QPointer<SharedObject> reference = _sharedObjectReferences.value(id);
|
||||
int originID;
|
||||
*this >> originID;
|
||||
QPointer<SharedObject> reference = _sharedObjectReferences.value(originID);
|
||||
QPointer<SharedObject>& pointer = _weakSharedObjectHash[id];
|
||||
if (pointer) {
|
||||
ObjectReader objectReader;
|
||||
|
@ -876,15 +960,19 @@ Bitstream& Bitstream::operator>(SharedObjectPointer& object) {
|
|||
} else {
|
||||
QObject* rawObject;
|
||||
if (reference) {
|
||||
readRawDelta(rawObject, (QObject*)reference.data());
|
||||
readRawDelta(rawObject, (const QObject*)reference.data());
|
||||
} else {
|
||||
*this >> rawObject;
|
||||
}
|
||||
pointer = static_cast<SharedObject*>(rawObject);
|
||||
if (pointer) {
|
||||
if (reference) {
|
||||
pointer->setOriginID(reference->getOriginID());
|
||||
}
|
||||
pointer->setRemoteID(id);
|
||||
pointer->setRemoteOriginID(originID);
|
||||
} else {
|
||||
qDebug() << "Null object" << pointer << reference;
|
||||
qDebug() << "Null object" << pointer << reference << id;
|
||||
}
|
||||
}
|
||||
object = static_cast<SharedObject*>(pointer.data());
|
||||
|
@ -893,13 +981,38 @@ Bitstream& Bitstream::operator>(SharedObjectPointer& object) {
|
|||
|
||||
void Bitstream::clearSharedObject(QObject* object) {
|
||||
SharedObject* sharedObject = static_cast<SharedObject*>(object);
|
||||
_sharedObjectReferences.remove(sharedObject->getID());
|
||||
_sharedObjectReferences.remove(sharedObject->getOriginID());
|
||||
int id = _sharedObjectStreamer.takePersistentID(sharedObject);
|
||||
if (id != 0) {
|
||||
emit sharedObjectCleared(id);
|
||||
}
|
||||
}
|
||||
|
||||
const QVector<PropertyWriter>& Bitstream::getPropertyWriters(const QMetaObject* metaObject) {
|
||||
QVector<PropertyWriter>& propertyWriters = _propertyWriters[metaObject];
|
||||
if (propertyWriters.isEmpty()) {
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
QMetaProperty property = metaObject->property(i);
|
||||
if (!property.isStored()) {
|
||||
continue;
|
||||
}
|
||||
const TypeStreamer* streamer;
|
||||
if (property.isEnumType()) {
|
||||
QMetaEnum metaEnum = property.enumerator();
|
||||
streamer = getEnumStreamers().value(QPair<QByteArray, QByteArray>(
|
||||
QByteArray::fromRawData(metaEnum.scope(), strlen(metaEnum.scope())),
|
||||
QByteArray::fromRawData(metaEnum.name(), strlen(metaEnum.name()))));
|
||||
} else {
|
||||
streamer = getTypeStreamers().value(property.userType());
|
||||
}
|
||||
if (streamer) {
|
||||
propertyWriters.append(PropertyWriter(property, streamer));
|
||||
}
|
||||
}
|
||||
}
|
||||
return propertyWriters;
|
||||
}
|
||||
|
||||
QHash<QByteArray, const QMetaObject*>& Bitstream::getMetaObjects() {
|
||||
static QHash<QByteArray, const QMetaObject*> metaObjects;
|
||||
return metaObjects;
|
||||
|
@ -915,6 +1028,16 @@ QHash<int, const TypeStreamer*>& Bitstream::getTypeStreamers() {
|
|||
return typeStreamers;
|
||||
}
|
||||
|
||||
QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*>& Bitstream::getEnumStreamers() {
|
||||
static QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*> enumStreamers;
|
||||
return enumStreamers;
|
||||
}
|
||||
|
||||
QHash<QByteArray, const TypeStreamer*>& Bitstream::getEnumStreamersByName() {
|
||||
static QHash<QByteArray, const TypeStreamer*> enumStreamersByName;
|
||||
return enumStreamersByName;
|
||||
}
|
||||
|
||||
QVector<PropertyReader> Bitstream::getPropertyReaders(const QMetaObject* metaObject) {
|
||||
QVector<PropertyReader> propertyReaders;
|
||||
if (!metaObject) {
|
||||
|
@ -925,31 +1048,78 @@ QVector<PropertyReader> Bitstream::getPropertyReaders(const QMetaObject* metaObj
|
|||
if (!property.isStored()) {
|
||||
continue;
|
||||
}
|
||||
const TypeStreamer* typeStreamer = getTypeStreamers().value(property.userType());
|
||||
if (typeStreamer) {
|
||||
propertyReaders.append(PropertyReader(TypeReader(QByteArray(), typeStreamer), property));
|
||||
const TypeStreamer* streamer;
|
||||
if (property.isEnumType()) {
|
||||
QMetaEnum metaEnum = property.enumerator();
|
||||
streamer = getEnumStreamers().value(QPair<QByteArray, QByteArray>(
|
||||
QByteArray::fromRawData(metaEnum.scope(), strlen(metaEnum.scope())),
|
||||
QByteArray::fromRawData(metaEnum.name(), strlen(metaEnum.name()))));
|
||||
} else {
|
||||
streamer = getTypeStreamers().value(property.userType());
|
||||
}
|
||||
if (streamer) {
|
||||
propertyReaders.append(PropertyReader(TypeReader(QByteArray(), streamer), property));
|
||||
}
|
||||
}
|
||||
return propertyReaders;
|
||||
}
|
||||
|
||||
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, bool exactMatch, Type type,
|
||||
const TypeReaderPointer& keyReader, const TypeReaderPointer& valueReader, const QVector<FieldReader>& fields) :
|
||||
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer) :
|
||||
_typeName(typeName),
|
||||
_streamer(streamer),
|
||||
_exactMatch(exactMatch),
|
||||
_type(type),
|
||||
_keyReader(keyReader),
|
||||
_valueReader(valueReader),
|
||||
_exactMatch(true) {
|
||||
}
|
||||
|
||||
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, int bits, const QHash<int, int>& mappings) :
|
||||
_typeName(typeName),
|
||||
_streamer(streamer),
|
||||
_exactMatch(false),
|
||||
_type(ENUM_TYPE),
|
||||
_bits(bits),
|
||||
_mappings(mappings) {
|
||||
}
|
||||
|
||||
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, const QVector<FieldReader>& fields) :
|
||||
_typeName(typeName),
|
||||
_streamer(streamer),
|
||||
_exactMatch(false),
|
||||
_type(STREAMABLE_TYPE),
|
||||
_fields(fields) {
|
||||
}
|
||||
|
||||
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer,
|
||||
Type type, const TypeReaderPointer& valueReader) :
|
||||
_typeName(typeName),
|
||||
_streamer(streamer),
|
||||
_exactMatch(false),
|
||||
_type(type),
|
||||
_valueReader(valueReader) {
|
||||
}
|
||||
|
||||
TypeReader::TypeReader(const QByteArray& typeName, const TypeStreamer* streamer,
|
||||
const TypeReaderPointer& keyReader, const TypeReaderPointer& valueReader) :
|
||||
_typeName(typeName),
|
||||
_streamer(streamer),
|
||||
_exactMatch(false),
|
||||
_type(MAP_TYPE),
|
||||
_keyReader(keyReader),
|
||||
_valueReader(valueReader) {
|
||||
}
|
||||
|
||||
QVariant TypeReader::read(Bitstream& in) const {
|
||||
if (_exactMatch) {
|
||||
return _streamer->read(in);
|
||||
}
|
||||
QVariant object = _streamer ? QVariant(_streamer->getType(), 0) : QVariant();
|
||||
switch (_type) {
|
||||
case ENUM_TYPE: {
|
||||
int value = 0;
|
||||
in.read(&value, _bits);
|
||||
if (_streamer) {
|
||||
_streamer->setEnumValue(object, value, _mappings);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case STREAMABLE_TYPE: {
|
||||
foreach (const FieldReader& field, _fields) {
|
||||
field.read(in, _streamer, object);
|
||||
|
@ -1006,6 +1176,14 @@ void TypeReader::readRawDelta(Bitstream& in, QVariant& object, const QVariant& r
|
|||
return;
|
||||
}
|
||||
switch (_type) {
|
||||
case ENUM_TYPE: {
|
||||
int value = 0;
|
||||
in.read(&value, _bits);
|
||||
if (_streamer) {
|
||||
_streamer->setEnumValue(object, value, _mappings);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case STREAMABLE_TYPE: {
|
||||
foreach (const FieldReader& field, _fields) {
|
||||
field.readDelta(in, _streamer, object, reference);
|
||||
|
@ -1099,6 +1277,10 @@ uint qHash(const TypeReader& typeReader, uint seed) {
|
|||
return qHash(typeReader.getTypeName(), seed);
|
||||
}
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const TypeReader& typeReader) {
|
||||
return debug << typeReader.getTypeName();
|
||||
}
|
||||
|
||||
FieldReader::FieldReader(const TypeReader& reader, int index) :
|
||||
_reader(reader),
|
||||
_index(index) {
|
||||
|
@ -1152,6 +1334,10 @@ uint qHash(const ObjectReader& objectReader, uint seed) {
|
|||
return qHash(objectReader.getClassName(), seed);
|
||||
}
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const ObjectReader& objectReader) {
|
||||
return debug << objectReader.getClassName();
|
||||
}
|
||||
|
||||
PropertyReader::PropertyReader(const TypeReader& reader, const QMetaProperty& property) :
|
||||
_reader(reader),
|
||||
_property(property) {
|
||||
|
@ -1172,6 +1358,20 @@ void PropertyReader::readDelta(Bitstream& in, QObject* object, const QObject* re
|
|||
}
|
||||
}
|
||||
|
||||
PropertyWriter::PropertyWriter(const QMetaProperty& property, const TypeStreamer* streamer) :
|
||||
_property(property),
|
||||
_streamer(streamer) {
|
||||
}
|
||||
|
||||
void PropertyWriter::write(Bitstream& out, const QObject* object) const {
|
||||
_streamer->write(out, _property.read(object));
|
||||
}
|
||||
|
||||
void PropertyWriter::writeDelta(Bitstream& out, const QObject* object, const QObject* reference) const {
|
||||
_streamer->writeDelta(out, _property.read(object), reference && object->metaObject() == reference->metaObject() ?
|
||||
_property.read(reference) : QVariant());
|
||||
}
|
||||
|
||||
MetaField::MetaField(const QByteArray& name, const TypeStreamer* streamer) :
|
||||
_name(name),
|
||||
_streamer(streamer) {
|
||||
|
@ -1180,6 +1380,14 @@ MetaField::MetaField(const QByteArray& name, const TypeStreamer* streamer) :
|
|||
TypeStreamer::~TypeStreamer() {
|
||||
}
|
||||
|
||||
const char* TypeStreamer::getName() const {
|
||||
return QMetaType::typeName(_type);
|
||||
}
|
||||
|
||||
void TypeStreamer::setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const {
|
||||
// nothing by default
|
||||
}
|
||||
|
||||
const QVector<MetaField>& TypeStreamer::getMetaFields() const {
|
||||
static QVector<MetaField> emptyMetaFields;
|
||||
return emptyMetaFields;
|
||||
|
@ -1201,6 +1409,14 @@ TypeReader::Type TypeStreamer::getReaderType() const {
|
|||
return TypeReader::SIMPLE_TYPE;
|
||||
}
|
||||
|
||||
int TypeStreamer::getBits() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
QMetaEnum TypeStreamer::getMetaEnum() const {
|
||||
return QMetaEnum();
|
||||
}
|
||||
|
||||
const TypeStreamer* TypeStreamer::getKeyStreamer() const {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1236,3 +1452,104 @@ QVariant TypeStreamer::getValue(const QVariant& object, int index) const {
|
|||
void TypeStreamer::setValue(QVariant& object, int index, const QVariant& value) const {
|
||||
// nothing by default
|
||||
}
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const TypeStreamer* typeStreamer) {
|
||||
return debug << (typeStreamer ? QMetaType::typeName(typeStreamer->getType()) : "null");
|
||||
}
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const QMetaObject* metaObject) {
|
||||
return debug << (metaObject ? metaObject->className() : "null");
|
||||
}
|
||||
|
||||
EnumTypeStreamer::EnumTypeStreamer(const QMetaEnum& metaEnum) :
|
||||
_metaEnum(metaEnum),
|
||||
_name(getEnumName(metaEnum)) {
|
||||
|
||||
setType(QMetaType::Int);
|
||||
|
||||
int highestValue = 0;
|
||||
for (int j = 0; j < metaEnum.keyCount(); j++) {
|
||||
highestValue = qMax(highestValue, metaEnum.value(j));
|
||||
}
|
||||
_bits = getBitsForHighestValue(highestValue);
|
||||
}
|
||||
|
||||
const char* EnumTypeStreamer::getName() const {
|
||||
return _name.constData();
|
||||
}
|
||||
|
||||
TypeReader::Type EnumTypeStreamer::getReaderType() const {
|
||||
return TypeReader::ENUM_TYPE;
|
||||
}
|
||||
|
||||
int EnumTypeStreamer::getBits() const {
|
||||
return _bits;
|
||||
}
|
||||
|
||||
QMetaEnum EnumTypeStreamer::getMetaEnum() const {
|
||||
return _metaEnum;
|
||||
}
|
||||
|
||||
bool EnumTypeStreamer::equal(const QVariant& first, const QVariant& second) const {
|
||||
return first.toInt() == second.toInt();
|
||||
}
|
||||
|
||||
void EnumTypeStreamer::write(Bitstream& out, const QVariant& value) const {
|
||||
int intValue = value.toInt();
|
||||
out.write(&intValue, _bits);
|
||||
}
|
||||
|
||||
QVariant EnumTypeStreamer::read(Bitstream& in) const {
|
||||
int intValue = 0;
|
||||
in.read(&intValue, _bits);
|
||||
return intValue;
|
||||
}
|
||||
|
||||
void EnumTypeStreamer::writeDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
|
||||
int intValue = value.toInt(), intReference = reference.toInt();
|
||||
if (intValue == intReference) {
|
||||
out << false;
|
||||
} else {
|
||||
out << true;
|
||||
out.write(&intValue, _bits);
|
||||
}
|
||||
}
|
||||
|
||||
void EnumTypeStreamer::readDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
|
||||
bool changed;
|
||||
in >> changed;
|
||||
if (changed) {
|
||||
int intValue = 0;
|
||||
in.read(&intValue, _bits);
|
||||
value = intValue;
|
||||
} else {
|
||||
value = reference;
|
||||
}
|
||||
}
|
||||
|
||||
void EnumTypeStreamer::writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
|
||||
int intValue = value.toInt();
|
||||
out.write(&intValue, _bits);
|
||||
}
|
||||
|
||||
void EnumTypeStreamer::readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
|
||||
int intValue = 0;
|
||||
in.read(&intValue, _bits);
|
||||
value = intValue;
|
||||
}
|
||||
|
||||
void EnumTypeStreamer::setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const {
|
||||
if (_metaEnum.isFlag()) {
|
||||
int combined = 0;
|
||||
for (QHash<int, int>::const_iterator it = mappings.constBegin(); it != mappings.constEnd(); it++) {
|
||||
if (value & it.key()) {
|
||||
combined |= it.value();
|
||||
}
|
||||
}
|
||||
object = combined;
|
||||
|
||||
} else {
|
||||
object = mappings.value(value);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ class FieldReader;
|
|||
class ObjectReader;
|
||||
class OwnedAttributeValue;
|
||||
class PropertyReader;
|
||||
class PropertyWriter;
|
||||
class TypeReader;
|
||||
class TypeStreamer;
|
||||
|
||||
|
@ -235,6 +236,9 @@ public:
|
|||
/// Substitutes the supplied type for the given type name's default mapping.
|
||||
void addTypeSubstitution(const QByteArray& typeName, int type);
|
||||
|
||||
/// Substitutes the named type for the given type name's default mapping.
|
||||
void addTypeSubstitution(const QByteArray& typeName, const char* replacementTypeName);
|
||||
|
||||
/// Writes a set of bits to the underlying stream.
|
||||
/// \param bits the number of bits to write
|
||||
/// \param offset the offset of the first bit
|
||||
|
@ -294,6 +298,9 @@ public:
|
|||
template<class T> void writeRawDelta(const QList<T>& value, const QList<T>& reference);
|
||||
template<class T> void readRawDelta(QList<T>& value, const QList<T>& reference);
|
||||
|
||||
template<class T> void writeRawDelta(const QVector<T>& value, const QVector<T>& reference);
|
||||
template<class T> void readRawDelta(QVector<T>& value, const QVector<T>& reference);
|
||||
|
||||
template<class T> void writeRawDelta(const QSet<T>& value, const QSet<T>& reference);
|
||||
template<class T> void readRawDelta(QSet<T>& value, const QSet<T>& reference);
|
||||
|
||||
|
@ -339,6 +346,9 @@ public:
|
|||
template<class T> Bitstream& operator<<(const QList<T>& list);
|
||||
template<class T> Bitstream& operator>>(QList<T>& list);
|
||||
|
||||
template<class T> Bitstream& operator<<(const QVector<T>& list);
|
||||
template<class T> Bitstream& operator>>(QVector<T>& list);
|
||||
|
||||
template<class T> Bitstream& operator<<(const QSet<T>& set);
|
||||
template<class T> Bitstream& operator>>(QSet<T>& set);
|
||||
|
||||
|
@ -390,6 +400,8 @@ private slots:
|
|||
|
||||
private:
|
||||
|
||||
const QVector<PropertyWriter>& getPropertyWriters(const QMetaObject* metaObject);
|
||||
|
||||
QDataStream& _underlying;
|
||||
quint8 _byte;
|
||||
int _position;
|
||||
|
@ -409,9 +421,13 @@ private:
|
|||
QHash<QByteArray, const QMetaObject*> _metaObjectSubstitutions;
|
||||
QHash<QByteArray, const TypeStreamer*> _typeStreamerSubstitutions;
|
||||
|
||||
QHash<const QMetaObject*, QVector<PropertyWriter> > _propertyWriters;
|
||||
|
||||
static QHash<QByteArray, const QMetaObject*>& getMetaObjects();
|
||||
static QMultiHash<const QMetaObject*, const QMetaObject*>& getMetaObjectSubClasses();
|
||||
static QHash<int, const TypeStreamer*>& getTypeStreamers();
|
||||
static QHash<QPair<QByteArray, QByteArray>, const TypeStreamer*>& getEnumStreamers();
|
||||
static QHash<QByteArray, const TypeStreamer*>& getEnumStreamersByName();
|
||||
static QVector<PropertyReader> getPropertyReaders(const QMetaObject* metaObject);
|
||||
};
|
||||
|
||||
|
@ -472,6 +488,36 @@ template<class T> inline void Bitstream::readRawDelta(QList<T>& value, const QLi
|
|||
}
|
||||
}
|
||||
|
||||
template<class T> inline void Bitstream::writeRawDelta(const QVector<T>& value, const QVector<T>& reference) {
|
||||
*this << value.size();
|
||||
*this << reference.size();
|
||||
for (int i = 0; i < value.size(); i++) {
|
||||
if (i < reference.size()) {
|
||||
writeDelta(value.at(i), reference.at(i));
|
||||
} else {
|
||||
*this << value.at(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class T> inline void Bitstream::readRawDelta(QVector<T>& value, const QVector<T>& reference) {
|
||||
value = reference;
|
||||
int size, referenceSize;
|
||||
*this >> size >> referenceSize;
|
||||
if (size < value.size()) {
|
||||
value.erase(value.begin() + size, value.end());
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
if (i < referenceSize) {
|
||||
readDelta(value[i], reference.at(i));
|
||||
} else {
|
||||
T element;
|
||||
*this >> element;
|
||||
value.append(element);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class T> inline void Bitstream::writeRawDelta(const QSet<T>& value, const QSet<T>& reference) {
|
||||
int addedOrRemoved = 0;
|
||||
foreach (const T& element, value) {
|
||||
|
@ -600,6 +646,27 @@ template<class T> inline Bitstream& Bitstream::operator>>(QList<T>& list) {
|
|||
return *this;
|
||||
}
|
||||
|
||||
template<class T> inline Bitstream& Bitstream::operator<<(const QVector<T>& vector) {
|
||||
*this << vector.size();
|
||||
foreach (const T& entry, vector) {
|
||||
*this << entry;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<class T> inline Bitstream& Bitstream::operator>>(QVector<T>& vector) {
|
||||
int size;
|
||||
*this >> size;
|
||||
vector.clear();
|
||||
vector.reserve(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
T entry;
|
||||
*this >> entry;
|
||||
vector.append(entry);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<class T> inline Bitstream& Bitstream::operator<<(const QSet<T>& set) {
|
||||
*this << set.size();
|
||||
foreach (const T& entry, set) {
|
||||
|
@ -651,13 +718,20 @@ typedef QSharedPointer<TypeReader> TypeReaderPointer;
|
|||
class TypeReader {
|
||||
public:
|
||||
|
||||
enum Type { SIMPLE_TYPE, STREAMABLE_TYPE, LIST_TYPE, SET_TYPE, MAP_TYPE };
|
||||
enum Type { SIMPLE_TYPE, ENUM_TYPE, STREAMABLE_TYPE, LIST_TYPE, SET_TYPE, MAP_TYPE };
|
||||
|
||||
TypeReader(const QByteArray& typeName = QByteArray(), const TypeStreamer* streamer = NULL, bool exactMatch = true,
|
||||
Type type = SIMPLE_TYPE, const TypeReaderPointer& keyReader = TypeReaderPointer(),
|
||||
const TypeReaderPointer& valueReader = TypeReaderPointer(),
|
||||
const QVector<FieldReader>& fields = QVector<FieldReader>());
|
||||
TypeReader(const QByteArray& typeName = QByteArray(), const TypeStreamer* streamer = NULL);
|
||||
|
||||
TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, int bits, const QHash<int, int>& mappings);
|
||||
|
||||
TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, const QVector<FieldReader>& fields);
|
||||
|
||||
TypeReader(const QByteArray& typeName, const TypeStreamer* streamer, Type type,
|
||||
const TypeReaderPointer& valueReader);
|
||||
|
||||
TypeReader(const QByteArray& typeName, const TypeStreamer* streamer,
|
||||
const TypeReaderPointer& keyReader, const TypeReaderPointer& valueReader);
|
||||
|
||||
const QByteArray& getTypeName() const { return _typeName; }
|
||||
const TypeStreamer* getStreamer() const { return _streamer; }
|
||||
|
||||
|
@ -676,6 +750,8 @@ private:
|
|||
const TypeStreamer* _streamer;
|
||||
bool _exactMatch;
|
||||
Type _type;
|
||||
int _bits;
|
||||
QHash<int, int> _mappings;
|
||||
TypeReaderPointer _keyReader;
|
||||
TypeReaderPointer _valueReader;
|
||||
QVector<FieldReader> _fields;
|
||||
|
@ -683,6 +759,8 @@ private:
|
|||
|
||||
uint qHash(const TypeReader& typeReader, uint seed = 0);
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const TypeReader& typeReader);
|
||||
|
||||
/// Contains the information required to read a metatype field from the stream and apply it.
|
||||
class FieldReader {
|
||||
public:
|
||||
|
@ -726,6 +804,8 @@ private:
|
|||
|
||||
uint qHash(const ObjectReader& objectReader, uint seed = 0);
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const ObjectReader& objectReader);
|
||||
|
||||
/// Contains the information required to read an object property from the stream and apply it.
|
||||
class PropertyReader {
|
||||
public:
|
||||
|
@ -743,6 +823,24 @@ private:
|
|||
QMetaProperty _property;
|
||||
};
|
||||
|
||||
/// Contains the information required to obtain an object property and write it to the stream.
|
||||
class PropertyWriter {
|
||||
public:
|
||||
|
||||
PropertyWriter(const QMetaProperty& property = QMetaProperty(), const TypeStreamer* streamer = NULL);
|
||||
|
||||
const QMetaProperty& getProperty() const { return _property; }
|
||||
const TypeStreamer* getStreamer() const { return _streamer; }
|
||||
|
||||
void write(Bitstream& out, const QObject* object) const;
|
||||
void writeDelta(Bitstream& out, const QObject* object, const QObject* reference) const;
|
||||
|
||||
private:
|
||||
|
||||
QMetaProperty _property;
|
||||
const TypeStreamer* _streamer;
|
||||
};
|
||||
|
||||
/// Describes a metatype field.
|
||||
class MetaField {
|
||||
public:
|
||||
|
@ -772,6 +870,8 @@ public:
|
|||
void setType(int type) { _type = type; }
|
||||
int getType() const { return _type; }
|
||||
|
||||
virtual const char* getName() const;
|
||||
|
||||
virtual bool equal(const QVariant& first, const QVariant& second) const = 0;
|
||||
|
||||
virtual void write(Bitstream& out, const QVariant& value) const = 0;
|
||||
|
@ -783,6 +883,8 @@ public:
|
|||
virtual void writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const = 0;
|
||||
virtual void readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const = 0;
|
||||
|
||||
virtual void setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const;
|
||||
|
||||
virtual const QVector<MetaField>& getMetaFields() const;
|
||||
virtual int getFieldIndex(const QByteArray& name) const;
|
||||
virtual void setField(QVariant& object, int index, const QVariant& value) const;
|
||||
|
@ -790,6 +892,9 @@ public:
|
|||
|
||||
virtual TypeReader::Type getReaderType() const;
|
||||
|
||||
virtual int getBits() const;
|
||||
virtual QMetaEnum getMetaEnum() const;
|
||||
|
||||
virtual const TypeStreamer* getKeyStreamer() const;
|
||||
virtual const TypeStreamer* getValueStreamer() const;
|
||||
|
||||
|
@ -808,6 +913,10 @@ private:
|
|||
int _type;
|
||||
};
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const TypeStreamer* typeStreamer);
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const QMetaObject* metaObject);
|
||||
|
||||
/// A streamer that works with Bitstream's operators.
|
||||
template<class T> class SimpleTypeStreamer : public TypeStreamer {
|
||||
public:
|
||||
|
@ -818,11 +927,37 @@ public:
|
|||
virtual void writeDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
|
||||
out.writeDelta(value.value<T>(), reference.value<T>()); }
|
||||
virtual void readDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
|
||||
in.readDelta(*static_cast<T*>(value.data()), reference.value<T>()); }
|
||||
T rawValue; in.readDelta(rawValue, reference.value<T>()); value = QVariant::fromValue(rawValue); }
|
||||
virtual void writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const {
|
||||
out.writeRawDelta(value.value<T>(), reference.value<T>()); }
|
||||
virtual void readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const {
|
||||
in.readRawDelta(*static_cast<T*>(value.data()), reference.value<T>()); }
|
||||
T rawValue; in.readRawDelta(rawValue, reference.value<T>()); value = QVariant::fromValue(rawValue); }
|
||||
};
|
||||
|
||||
/// A streamer class for enumerated types.
|
||||
class EnumTypeStreamer : public TypeStreamer {
|
||||
public:
|
||||
|
||||
EnumTypeStreamer(const QMetaEnum& metaEnum);
|
||||
|
||||
virtual const char* getName() const;
|
||||
virtual TypeReader::Type getReaderType() const;
|
||||
virtual int getBits() const;
|
||||
virtual QMetaEnum getMetaEnum() const;
|
||||
virtual bool equal(const QVariant& first, const QVariant& second) const;
|
||||
virtual void write(Bitstream& out, const QVariant& value) const;
|
||||
virtual QVariant read(Bitstream& in) const;
|
||||
virtual void writeDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const;
|
||||
virtual void readDelta(Bitstream& in, QVariant& value, const QVariant& reference) const;
|
||||
virtual void writeRawDelta(Bitstream& out, const QVariant& value, const QVariant& reference) const;
|
||||
virtual void readRawDelta(Bitstream& in, QVariant& value, const QVariant& reference) const;
|
||||
virtual void setEnumValue(QVariant& object, int value, const QHash<int, int>& mappings) const;
|
||||
|
||||
private:
|
||||
|
||||
QMetaEnum _metaEnum;
|
||||
QByteArray _name;
|
||||
int _bits;
|
||||
};
|
||||
|
||||
/// A streamer for types compiled by mtc.
|
||||
|
@ -858,6 +993,22 @@ public:
|
|||
static_cast<QList<T>*>(object.data())->replace(index, value.value<T>()); }
|
||||
};
|
||||
|
||||
/// A streamer for vector types.
|
||||
template<class T> class CollectionTypeStreamer<QVector<T> > : public SimpleTypeStreamer<QVector<T> > {
|
||||
public:
|
||||
|
||||
virtual TypeReader::Type getReaderType() const { return TypeReader::LIST_TYPE; }
|
||||
virtual const TypeStreamer* getValueStreamer() const { return Bitstream::getTypeStreamer(qMetaTypeId<T>()); }
|
||||
virtual void insert(QVariant& object, const QVariant& value) const {
|
||||
static_cast<QVector<T>*>(object.data())->append(value.value<T>()); }
|
||||
virtual void prune(QVariant& object, int size) const {
|
||||
QVector<T>* list = static_cast<QVector<T>*>(object.data()); list->erase(list->begin() + size, list->end()); }
|
||||
virtual QVariant getValue(const QVariant& object, int index) const {
|
||||
return QVariant::fromValue(static_cast<const QVector<T>*>(object.constData())->at(index)); }
|
||||
virtual void setValue(QVariant& object, int index, const QVariant& value) const {
|
||||
static_cast<QVector<T>*>(object.data())->replace(index, value.value<T>()); }
|
||||
};
|
||||
|
||||
/// A streamer for set types.
|
||||
template<class T> class CollectionTypeStreamer<QSet<T> > : public SimpleTypeStreamer<QSet<T> > {
|
||||
public:
|
||||
|
@ -940,6 +1091,13 @@ template<class T> int registerStreamableMetaType() {
|
|||
return type;
|
||||
}
|
||||
|
||||
/// Registers a collection type and its streamer.
|
||||
template<class T> int registerCollectionMetaType() {
|
||||
int type = qRegisterMetaType<T>();
|
||||
Bitstream::registerTypeStreamer(type, new CollectionTypeStreamer<T>());
|
||||
return type;
|
||||
}
|
||||
|
||||
/// Flags a class as streamable (use as you would Q_OBJECT).
|
||||
#define STREAMABLE public: \
|
||||
static const int Type; \
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <QSet>
|
||||
#include <QVector>
|
||||
|
||||
#include "Bitstream.h"
|
||||
#include "AttributeRegistry.h"
|
||||
|
||||
class ReliableChannel;
|
||||
|
||||
|
|
|
@ -23,7 +23,9 @@ REGISTER_META_OBJECT(SharedObject)
|
|||
|
||||
SharedObject::SharedObject() :
|
||||
_id(++_lastID),
|
||||
_remoteID(0) {
|
||||
_originID(_id),
|
||||
_remoteID(0),
|
||||
_remoteOriginID(0) {
|
||||
|
||||
_weakHash.insert(_id, this);
|
||||
}
|
||||
|
@ -39,26 +41,33 @@ void SharedObject::decrementReferenceCount() {
|
|||
}
|
||||
}
|
||||
|
||||
SharedObject* SharedObject::clone(bool withID) const {
|
||||
SharedObject* SharedObject::clone(bool withID, SharedObject* target) const {
|
||||
// default behavior is to make a copy using the no-arg constructor and copy the stored properties
|
||||
const QMetaObject* metaObject = this->metaObject();
|
||||
SharedObject* newObject = static_cast<SharedObject*>(metaObject->newInstance());
|
||||
if (!target) {
|
||||
target = static_cast<SharedObject*>(metaObject->newInstance());
|
||||
}
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
QMetaProperty property = metaObject->property(i);
|
||||
if (property.isStored()) {
|
||||
property.write(newObject, property.read(this));
|
||||
if (property.userType() == qMetaTypeId<SharedObjectPointer>()) {
|
||||
SharedObject* value = property.read(this).value<SharedObjectPointer>().data();
|
||||
property.write(target, QVariant::fromValue(value ? value->clone(withID) : value));
|
||||
} else {
|
||||
property.write(target, property.read(this));
|
||||
}
|
||||
}
|
||||
}
|
||||
foreach (const QByteArray& propertyName, dynamicPropertyNames()) {
|
||||
newObject->setProperty(propertyName, property(propertyName));
|
||||
target->setProperty(propertyName, property(propertyName));
|
||||
}
|
||||
if (withID) {
|
||||
newObject->setID(_id);
|
||||
target->setOriginID(_originID);
|
||||
}
|
||||
return newObject;
|
||||
return target;
|
||||
}
|
||||
|
||||
bool SharedObject::equals(const SharedObject* other) const {
|
||||
bool SharedObject::equals(const SharedObject* other, bool sharedAncestry) const {
|
||||
if (!other) {
|
||||
return false;
|
||||
}
|
||||
|
@ -67,7 +76,7 @@ bool SharedObject::equals(const SharedObject* other) const {
|
|||
}
|
||||
// default behavior is to compare the properties
|
||||
const QMetaObject* metaObject = this->metaObject();
|
||||
if (metaObject != other->metaObject()) {
|
||||
if (metaObject != other->metaObject() && !sharedAncestry) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
|
@ -92,13 +101,15 @@ void SharedObject::dump(QDebug debug) const {
|
|||
debug << this;
|
||||
const QMetaObject* metaObject = this->metaObject();
|
||||
for (int i = 0; i < metaObject->propertyCount(); i++) {
|
||||
debug << metaObject->property(i).name() << metaObject->property(i).read(this);
|
||||
QMetaProperty property = metaObject->property(i);
|
||||
if (property.isStored()) {
|
||||
debug << property.name() << property.read(this);
|
||||
}
|
||||
}
|
||||
QList<QByteArray> dynamicPropertyNames = this->dynamicPropertyNames();
|
||||
foreach (const QByteArray& propertyName, dynamicPropertyNames) {
|
||||
debug << propertyName << property(propertyName);
|
||||
}
|
||||
}
|
||||
|
||||
void SharedObject::setID(int id) {
|
||||
_weakHash.remove(_id);
|
||||
_weakHash.insert(_id = id, this);
|
||||
}
|
||||
|
||||
int SharedObject::_lastID = 0;
|
||||
|
|
|
@ -41,31 +41,44 @@ public:
|
|||
/// Returns the unique local ID for this object.
|
||||
int getID() const { return _id; }
|
||||
|
||||
/// Returns the local origin ID for this object.
|
||||
int getOriginID() const { return _originID; }
|
||||
|
||||
void setOriginID(int originID) { _originID = originID; }
|
||||
|
||||
/// Returns the unique remote ID for this object, or zero if this is a local object.
|
||||
int getRemoteID() const { return _remoteID; }
|
||||
|
||||
void setRemoteID(int remoteID) { _remoteID = remoteID; }
|
||||
|
||||
/// Returns the remote origin ID for this object, or zero if this is a local object.
|
||||
int getRemoteOriginID() const { return _remoteOriginID; }
|
||||
|
||||
void setRemoteOriginID(int remoteOriginID) { _remoteOriginID = remoteOriginID; }
|
||||
|
||||
int getReferenceCount() const { return _referenceCount.load(); }
|
||||
void incrementReferenceCount();
|
||||
void decrementReferenceCount();
|
||||
|
||||
/// Creates a new clone of this object.
|
||||
/// \param withID if true, give the clone the same ID as this object
|
||||
virtual SharedObject* clone(bool withID = false) const;
|
||||
/// \param withID if true, give the clone the same origin ID as this object
|
||||
/// \target if non-NULL, a target object to populate (as opposed to creating a new instance of this object's class)
|
||||
virtual SharedObject* clone(bool withID = false, SharedObject* target = NULL) const;
|
||||
|
||||
/// Tests this object for equality with another.
|
||||
virtual bool equals(const SharedObject* other) const;
|
||||
/// \param sharedAncestry if true and the classes of the objects differ, compare their shared ancestry (assuming that
|
||||
/// this is an instance of a superclass of the other object's class) rather than simply returning false.
|
||||
virtual bool equals(const SharedObject* other, bool sharedAncestry = false) const;
|
||||
|
||||
// Dumps the contents of this object to the debug output.
|
||||
virtual void dump(QDebug debug = QDebug(QtDebugMsg)) const;
|
||||
|
||||
private:
|
||||
|
||||
void setID(int id);
|
||||
|
||||
int _id;
|
||||
int _originID;
|
||||
int _remoteID;
|
||||
int _remoteOriginID;
|
||||
QAtomicInt _referenceCount;
|
||||
|
||||
static int _lastID;
|
||||
|
|
|
@ -50,11 +50,16 @@ ModelTreeElement* ModelTreeElement::addChildAtIndex(int index) {
|
|||
}
|
||||
|
||||
|
||||
// TODO: This will attempt to store as many models as will fit in the packetData, if an individual model won't
|
||||
// fit, but some models did fit, then the element outputs what can fit. Once the general Octree::encodeXXX()
|
||||
// process supports partial encoding of an octree element, this will need to be updated to handle spanning its
|
||||
// contents across multiple packets.
|
||||
bool ModelTreeElement::appendElementData(OctreePacketData* packetData, EncodeBitstreamParams& params) const {
|
||||
bool success = true; // assume the best...
|
||||
|
||||
// write our models out... first determine which of the models are in view based on our params
|
||||
uint16_t numberOfModels = 0;
|
||||
uint16_t actualNumberOfModels = 0;
|
||||
QVector<uint16_t> indexesOfModelsToInclude;
|
||||
|
||||
for (uint16_t i = 0; i < _modelItems->size(); i++) {
|
||||
|
@ -72,17 +77,33 @@ bool ModelTreeElement::appendElementData(OctreePacketData* packetData, EncodeBit
|
|||
}
|
||||
}
|
||||
|
||||
int numberOfModelsOffset = packetData->getUncompressedByteOffset();
|
||||
success = packetData->appendValue(numberOfModels);
|
||||
|
||||
if (success) {
|
||||
foreach (uint16_t i, indexesOfModelsToInclude) {
|
||||
const ModelItem& model = (*_modelItems)[i];
|
||||
|
||||
LevelDetails modelLevel = packetData->startLevel();
|
||||
|
||||
success = model.appendModelData(packetData);
|
||||
|
||||
if (success) {
|
||||
packetData->endLevel(modelLevel);
|
||||
actualNumberOfModels++;
|
||||
}
|
||||
if (!success) {
|
||||
packetData->discardLevel(modelLevel);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
success = packetData->updatePriorBytes(numberOfModelsOffset,
|
||||
(const unsigned char*)&actualNumberOfModels, sizeof(actualNumberOfModels));
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
|
@ -433,6 +454,7 @@ int ModelTreeElement::readElementDataFromBuffer(const unsigned char* data, int b
|
|||
if (bytesLeftToRead >= (int)sizeof(numberOfModels)) {
|
||||
// read our models in....
|
||||
numberOfModels = *(uint16_t*)dataAt;
|
||||
|
||||
dataAt += sizeof(numberOfModels);
|
||||
bytesLeftToRead -= (int)sizeof(numberOfModels);
|
||||
bytesRead += sizeof(numberOfModels);
|
||||
|
|
|
@ -47,6 +47,10 @@ int packArithmeticallyCodedValue(int value, char* destination) {
|
|||
|
||||
PacketVersion versionForPacketType(PacketType type) {
|
||||
switch (type) {
|
||||
case PacketTypeMicrophoneAudioNoEcho:
|
||||
case PacketTypeMicrophoneAudioWithEcho:
|
||||
case PacketTypeSilentAudioFrame:
|
||||
return 1;
|
||||
case PacketTypeAvatarData:
|
||||
return 3;
|
||||
case PacketTypeAvatarIdentity:
|
||||
|
|
|
@ -602,7 +602,6 @@ public:
|
|||
|
||||
bool findRayIntersectionOp(OctreeElement* element, void* extraData) {
|
||||
RayArgs* args = static_cast<RayArgs*>(extraData);
|
||||
|
||||
bool keepSearching = true;
|
||||
if (element->findRayIntersection(args->origin, args->direction, keepSearching,
|
||||
args->element, args->distance, args->face, args->intersectedObject)) {
|
||||
|
@ -1336,14 +1335,23 @@ int Octree::encodeTreeBitstreamRecursion(OctreeElement* element,
|
|||
}
|
||||
}
|
||||
|
||||
// write the color data...
|
||||
// write the child element data...
|
||||
if (continueThisLevel && params.includeColor) {
|
||||
for (int i = 0; i < NUMBER_OF_CHILDREN; i++) {
|
||||
if (oneAtBit(childrenColoredBits, i)) {
|
||||
OctreeElement* childElement = element->getChildAtIndex(i);
|
||||
if (childElement) {
|
||||
|
||||
int bytesBeforeChild = packetData->getUncompressedSize();
|
||||
|
||||
// TODO: we want to support the ability for a childElement to "partially" write it's data.
|
||||
// for example, consider the case of the model server where the entire contents of the
|
||||
// element may be larger than can fit in a single MTU/packetData. In this case, we want
|
||||
// to allow the appendElementData() to respond that it produced partial data, which should be
|
||||
// written, but that the childElement needs to be reprocessed in an additional pass or passes
|
||||
// to be completed. In the case that an element was partially written, we need to
|
||||
continueThisLevel = childElement->appendElementData(packetData, params);
|
||||
|
||||
int bytesAfterChild = packetData->getUncompressedSize();
|
||||
|
||||
if (!continueThisLevel) {
|
||||
|
|
418
libraries/shared/src/PropertyFlags.h
Normal file
418
libraries/shared/src/PropertyFlags.h
Normal file
|
@ -0,0 +1,418 @@
|
|||
//
|
||||
// PropertyFlags.h
|
||||
// libraries/shared/src
|
||||
//
|
||||
// Created by Brad Hefta-Gaub on 6/3/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
//
|
||||
// TODO:
|
||||
// * consider adding iterator to enumerate the properties that have been set?
|
||||
|
||||
#ifndef hifi_PropertyFlags_h
|
||||
#define hifi_PropertyFlags_h
|
||||
|
||||
#include <algorithm>
|
||||
#include <climits>
|
||||
|
||||
#include <QBitArray>
|
||||
#include <QByteArray>
|
||||
|
||||
#include <SharedUtil.h>
|
||||
|
||||
template<typename Enum>class PropertyFlags {
|
||||
public:
|
||||
typedef Enum enum_type;
|
||||
inline PropertyFlags() :
|
||||
_maxFlag(INT_MIN), _minFlag(INT_MAX), _trailingFlipped(false) { };
|
||||
inline PropertyFlags(const PropertyFlags& other) :
|
||||
_flags(other._flags), _maxFlag(other._maxFlag), _minFlag(other._minFlag),
|
||||
_trailingFlipped(other._trailingFlipped) {}
|
||||
inline PropertyFlags(Enum flag) :
|
||||
_maxFlag(INT_MIN), _minFlag(INT_MAX), _trailingFlipped(false) { setHasProperty(flag); }
|
||||
|
||||
void clear() { _flags.clear(); _maxFlag = INT_MIN; _minFlag = INT_MAX; _trailingFlipped = false; }
|
||||
|
||||
Enum firstFlag() const { return (Enum)_minFlag; }
|
||||
Enum lastFlag() const { return (Enum)_maxFlag; }
|
||||
|
||||
void setHasProperty(Enum flag, bool value = true);
|
||||
bool getHasProperty(Enum flag);
|
||||
QByteArray encode();
|
||||
void decode(const QByteArray& fromEncoded);
|
||||
|
||||
|
||||
bool operator==(const PropertyFlags& other) const { return _flags == other._flags; }
|
||||
bool operator!=(const PropertyFlags& other) const { return _flags != other._flags; }
|
||||
bool operator!() const { return _flags.size() == 0; }
|
||||
|
||||
PropertyFlags& operator=(const PropertyFlags& other);
|
||||
|
||||
PropertyFlags& operator|=(PropertyFlags other);
|
||||
PropertyFlags& operator|=(Enum flag);
|
||||
|
||||
PropertyFlags& operator&=(PropertyFlags other);
|
||||
PropertyFlags& operator&=(Enum flag);
|
||||
|
||||
PropertyFlags& operator+=(PropertyFlags other);
|
||||
PropertyFlags& operator+=(Enum flag);
|
||||
|
||||
PropertyFlags& operator-=(PropertyFlags other);
|
||||
PropertyFlags& operator-=(Enum flag);
|
||||
|
||||
PropertyFlags& operator<<=(PropertyFlags other);
|
||||
PropertyFlags& operator<<=(Enum flag);
|
||||
|
||||
PropertyFlags operator|(PropertyFlags other) const;
|
||||
PropertyFlags operator|(Enum flag) const;
|
||||
|
||||
PropertyFlags operator&(PropertyFlags other) const;
|
||||
PropertyFlags operator&(Enum flag) const;
|
||||
|
||||
PropertyFlags operator+(PropertyFlags other) const;
|
||||
PropertyFlags operator+(Enum flag) const;
|
||||
|
||||
PropertyFlags operator-(PropertyFlags other) const;
|
||||
PropertyFlags operator-(Enum flag) const;
|
||||
|
||||
PropertyFlags operator<<(PropertyFlags other) const;
|
||||
PropertyFlags operator<<(Enum flag) const;
|
||||
|
||||
// NOTE: due to the nature of the compact storage of these property flags, and the fact that the upper bound of the
|
||||
// enum is not know, these operators will only perform their bitwise operations on the set of properties that have
|
||||
// been previously set
|
||||
PropertyFlags& operator^=(PropertyFlags other);
|
||||
PropertyFlags& operator^=(Enum flag);
|
||||
PropertyFlags operator^(PropertyFlags other) const;
|
||||
PropertyFlags operator^(Enum flag) const;
|
||||
PropertyFlags operator~() const;
|
||||
|
||||
void debugDumpBits();
|
||||
|
||||
|
||||
private:
|
||||
void shinkIfNeeded();
|
||||
|
||||
QBitArray _flags;
|
||||
int _maxFlag;
|
||||
int _minFlag;
|
||||
bool _trailingFlipped; /// are the trailing properties flipping in their state (e.g. assumed true, instead of false)
|
||||
};
|
||||
|
||||
template<typename Enum> PropertyFlags<Enum>& operator<<(PropertyFlags<Enum>& out, const PropertyFlags<Enum>& other) {
|
||||
return out <<= other;
|
||||
}
|
||||
|
||||
template<typename Enum> PropertyFlags<Enum>& operator<<(PropertyFlags<Enum>& out, Enum flag) {
|
||||
return out <<= flag;
|
||||
}
|
||||
|
||||
|
||||
template<typename Enum> inline void PropertyFlags<Enum>::setHasProperty(Enum flag, bool value) {
|
||||
// keep track of our min flag
|
||||
if (flag < _minFlag) {
|
||||
if (value) {
|
||||
_minFlag = flag;
|
||||
}
|
||||
}
|
||||
if (flag > _maxFlag) {
|
||||
if (value) {
|
||||
_maxFlag = flag;
|
||||
_flags.resize(_maxFlag + 1);
|
||||
} else {
|
||||
return; // bail early, we're setting a flag outside of our current _maxFlag to false, which is already the default
|
||||
}
|
||||
}
|
||||
_flags.setBit(flag, value);
|
||||
|
||||
if (flag == _maxFlag && !value) {
|
||||
shinkIfNeeded();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Enum> inline bool PropertyFlags<Enum>::getHasProperty(Enum flag) {
|
||||
if (flag > _maxFlag) {
|
||||
return _trailingFlipped; // usually false
|
||||
}
|
||||
return _flags.testBit(flag);
|
||||
}
|
||||
|
||||
const int BITS_PER_BYTE = 8;
|
||||
|
||||
template<typename Enum> inline QByteArray PropertyFlags<Enum>::encode() {
|
||||
QByteArray output;
|
||||
|
||||
if (_maxFlag < _minFlag) {
|
||||
output.fill(0, 1);
|
||||
return output; // no flags... nothing to encode
|
||||
}
|
||||
|
||||
// we should size the array to the correct size.
|
||||
int lengthInBytes = (_maxFlag / (BITS_PER_BYTE - 1)) + 1;
|
||||
|
||||
output.fill(0, lengthInBytes);
|
||||
|
||||
// next pack the number of header bits in, the first N-1 to be set to 1, the last to be set to 0
|
||||
for(int i = 0; i < lengthInBytes; i++) {
|
||||
int outputIndex = i;
|
||||
int bitValue = (i < (lengthInBytes - 1) ? 1 : 0);
|
||||
char original = output.at(outputIndex / BITS_PER_BYTE);
|
||||
int shiftBy = BITS_PER_BYTE - ((outputIndex % BITS_PER_BYTE) + 1);
|
||||
char thisBit = ( bitValue << shiftBy);
|
||||
output[i / BITS_PER_BYTE] = (original | thisBit);
|
||||
}
|
||||
|
||||
// finally pack the the actual bits from the bit array
|
||||
for(int i = lengthInBytes; i < (lengthInBytes + _maxFlag + 1); i++) {
|
||||
int flagIndex = i - lengthInBytes;
|
||||
int outputIndex = i;
|
||||
int bitValue = ( _flags[flagIndex] ? 1 : 0);
|
||||
char original = output.at(outputIndex / BITS_PER_BYTE);
|
||||
int shiftBy = BITS_PER_BYTE - ((outputIndex % BITS_PER_BYTE) + 1);
|
||||
char thisBit = ( bitValue << shiftBy);
|
||||
output[i / BITS_PER_BYTE] = (original | thisBit);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
template<typename Enum> inline void PropertyFlags<Enum>::decode(const QByteArray& fromEncodedBytes) {
|
||||
|
||||
clear(); // we are cleared out!
|
||||
|
||||
// first convert the ByteArray into a BitArray...
|
||||
QBitArray encodedBits;
|
||||
int bitCount = BITS_PER_BYTE * fromEncodedBytes.count();
|
||||
encodedBits.resize(bitCount);
|
||||
|
||||
for(int byte = 0; byte < fromEncodedBytes.count(); byte++) {
|
||||
char originalByte = fromEncodedBytes.at(byte);
|
||||
for(int bit = 0; bit < BITS_PER_BYTE; bit++) {
|
||||
int shiftBy = BITS_PER_BYTE - (bit + 1);
|
||||
char maskBit = ( 1 << shiftBy);
|
||||
bool bitValue = originalByte & maskBit;
|
||||
encodedBits.setBit(byte * BITS_PER_BYTE + bit, bitValue);
|
||||
}
|
||||
}
|
||||
|
||||
// next, read the leading bits to determine the correct number of bytes to decode (may not match the QByteArray)
|
||||
int encodedByteCount = 0;
|
||||
int bitAt;
|
||||
for (bitAt = 0; bitAt < bitCount; bitAt++) {
|
||||
if (encodedBits.at(bitAt)) {
|
||||
encodedByteCount++;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
encodedByteCount++; // always at least one byte
|
||||
int expectedBitCount = encodedByteCount * BITS_PER_BYTE;
|
||||
|
||||
// Now, keep reading...
|
||||
int flagsStartAt = bitAt + 1;
|
||||
for (bitAt = flagsStartAt; bitAt < expectedBitCount; bitAt++) {
|
||||
if (encodedBits.at(bitAt)) {
|
||||
setHasProperty((Enum)(bitAt - flagsStartAt));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Enum> inline void PropertyFlags<Enum>::debugDumpBits() {
|
||||
qDebug() << "_minFlag=" << _minFlag;
|
||||
qDebug() << "_maxFlag=" << _maxFlag;
|
||||
qDebug() << "_trailingFlipped=" << _trailingFlipped;
|
||||
for(int i = 0; i < _flags.size(); i++) {
|
||||
qDebug() << "bit[" << i << "]=" << _flags.at(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator=(const PropertyFlags& other) {
|
||||
_flags = other._flags;
|
||||
_maxFlag = other._maxFlag;
|
||||
_minFlag = other._minFlag;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator|=(PropertyFlags other) {
|
||||
_flags |= other._flags;
|
||||
_maxFlag = std::max(_maxFlag, other._maxFlag);
|
||||
_minFlag = std::min(_minFlag, other._minFlag);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator|=(Enum flag) {
|
||||
PropertyFlags other(flag);
|
||||
_flags |= other._flags;
|
||||
_maxFlag = std::max(_maxFlag, other._maxFlag);
|
||||
_minFlag = std::min(_minFlag, other._minFlag);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator&=(PropertyFlags other) {
|
||||
_flags &= other._flags;
|
||||
shinkIfNeeded();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator&=(Enum flag) {
|
||||
PropertyFlags other(flag);
|
||||
_flags &= other._flags;
|
||||
shinkIfNeeded();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator^=(PropertyFlags other) {
|
||||
_flags ^= other._flags;
|
||||
shinkIfNeeded();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator^=(Enum flag) {
|
||||
PropertyFlags other(flag);
|
||||
_flags ^= other._flags;
|
||||
shinkIfNeeded();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator+=(PropertyFlags other) {
|
||||
for(int flag = (int)other.firstFlag(); flag <= (int)other.lastFlag(); flag++) {
|
||||
if (other.getHasProperty((Enum)flag)) {
|
||||
setHasProperty((Enum)flag, true);
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator+=(Enum flag) {
|
||||
setHasProperty(flag, true);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator-=(PropertyFlags other) {
|
||||
for(int flag = (int)other.firstFlag(); flag <= (int)other.lastFlag(); flag++) {
|
||||
if (other.getHasProperty((Enum)flag)) {
|
||||
setHasProperty((Enum)flag, false);
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator-=(Enum flag) {
|
||||
setHasProperty(flag, false);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator<<=(PropertyFlags other) {
|
||||
for(int flag = (int)other.firstFlag(); flag <= (int)other.lastFlag(); flag++) {
|
||||
if (other.getHasProperty((Enum)flag)) {
|
||||
setHasProperty((Enum)flag, true);
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum>& PropertyFlags<Enum>::operator<<=(Enum flag) {
|
||||
setHasProperty(flag, true);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator|(PropertyFlags other) const {
|
||||
PropertyFlags result(*this);
|
||||
result |= other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator|(Enum flag) const {
|
||||
PropertyFlags result(*this);
|
||||
PropertyFlags other(flag);
|
||||
result |= other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator&(PropertyFlags other) const {
|
||||
PropertyFlags result(*this);
|
||||
result &= other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator&(Enum flag) const {
|
||||
PropertyFlags result(*this);
|
||||
PropertyFlags other(flag);
|
||||
result &= other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator^(PropertyFlags other) const {
|
||||
PropertyFlags result(*this);
|
||||
result ^= other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator^(Enum flag) const {
|
||||
PropertyFlags result(*this);
|
||||
PropertyFlags other(flag);
|
||||
result ^= other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator+(PropertyFlags other) const {
|
||||
PropertyFlags result(*this);
|
||||
result += other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator+(Enum flag) const {
|
||||
PropertyFlags result(*this);
|
||||
result.setHasProperty(flag, true);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator-(PropertyFlags other) const {
|
||||
PropertyFlags result(*this);
|
||||
result -= other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator-(Enum flag) const {
|
||||
PropertyFlags result(*this);
|
||||
result.setHasProperty(flag, false);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator<<(PropertyFlags other) const {
|
||||
PropertyFlags result(*this);
|
||||
result <<= other;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator<<(Enum flag) const {
|
||||
PropertyFlags result(*this);
|
||||
result.setHasProperty(flag, true);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline PropertyFlags<Enum> PropertyFlags<Enum>::operator~() const {
|
||||
PropertyFlags result(*this);
|
||||
result._flags = ~_flags;
|
||||
result._trailingFlipped = !_trailingFlipped;
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Enum> inline void PropertyFlags<Enum>::shinkIfNeeded() {
|
||||
bool maxFlagWas = _maxFlag;
|
||||
while (_maxFlag >= 0) {
|
||||
if (_flags.testBit(_maxFlag)) {
|
||||
break;
|
||||
}
|
||||
_maxFlag--;
|
||||
}
|
||||
if (maxFlagWas != _maxFlag) {
|
||||
_flags.resize(_maxFlag + 1);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // hifi_PropertyFlags_h
|
||||
|
|
@ -51,6 +51,29 @@ static QByteArray createRandomBytes() {
|
|||
return createRandomBytes(MIN_BYTES, MAX_BYTES);
|
||||
}
|
||||
|
||||
static TestSharedObjectA::TestEnum getRandomTestEnum() {
|
||||
switch (randIntInRange(0, 2)) {
|
||||
case 0: return TestSharedObjectA::FIRST_TEST_ENUM;
|
||||
case 1: return TestSharedObjectA::SECOND_TEST_ENUM;
|
||||
case 2:
|
||||
default: return TestSharedObjectA::THIRD_TEST_ENUM;
|
||||
}
|
||||
}
|
||||
|
||||
static TestSharedObjectA::TestFlags getRandomTestFlags() {
|
||||
TestSharedObjectA::TestFlags flags = 0;
|
||||
if (randomBoolean()) {
|
||||
flags |= TestSharedObjectA::FIRST_TEST_FLAG;
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
flags |= TestSharedObjectA::SECOND_TEST_FLAG;
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
flags |= TestSharedObjectA::THIRD_TEST_FLAG;
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
static TestMessageC createRandomMessageC() {
|
||||
TestMessageC message;
|
||||
message.foo = randomBoolean();
|
||||
|
@ -64,9 +87,11 @@ static bool testSerialization(Bitstream::MetadataType metadataType) {
|
|||
QByteArray array;
|
||||
QDataStream outStream(&array, QIODevice::WriteOnly);
|
||||
Bitstream out(outStream, metadataType);
|
||||
SharedObjectPointer testObjectWrittenA = new TestSharedObjectA(randFloat());
|
||||
SharedObjectPointer testObjectWrittenA = new TestSharedObjectA(randFloat(), TestSharedObjectA::SECOND_TEST_ENUM,
|
||||
TestSharedObjectA::TestFlags(TestSharedObjectA::FIRST_TEST_FLAG | TestSharedObjectA::THIRD_TEST_FLAG));
|
||||
out << testObjectWrittenA;
|
||||
SharedObjectPointer testObjectWrittenB = new TestSharedObjectB(randFloat(), createRandomBytes());
|
||||
SharedObjectPointer testObjectWrittenB = new TestSharedObjectB(randFloat(), createRandomBytes(),
|
||||
TestSharedObjectB::THIRD_TEST_ENUM, TestSharedObjectB::SECOND_TEST_FLAG);
|
||||
out << testObjectWrittenB;
|
||||
TestMessageC messageWritten = createRandomMessageC();
|
||||
out << QVariant::fromValue(messageWritten);
|
||||
|
@ -79,6 +104,10 @@ static bool testSerialization(Bitstream::MetadataType metadataType) {
|
|||
in.addMetaObjectSubstitution("TestSharedObjectA", &TestSharedObjectB::staticMetaObject);
|
||||
in.addMetaObjectSubstitution("TestSharedObjectB", &TestSharedObjectA::staticMetaObject);
|
||||
in.addTypeSubstitution("TestMessageC", TestMessageA::Type);
|
||||
in.addTypeSubstitution("TestSharedObjectA::TestEnum", "TestSharedObjectB::TestEnum");
|
||||
in.addTypeSubstitution("TestSharedObjectB::TestEnum", "TestSharedObjectA::TestEnum");
|
||||
in.addTypeSubstitution("TestSharedObjectA::TestFlags", "TestSharedObjectB::TestFlags");
|
||||
in.addTypeSubstitution("TestSharedObjectB::TestFlags", "TestSharedObjectA::TestFlags");
|
||||
SharedObjectPointer testObjectReadA;
|
||||
in >> testObjectReadA;
|
||||
|
||||
|
@ -86,8 +115,11 @@ static bool testSerialization(Bitstream::MetadataType metadataType) {
|
|||
qDebug() << "Wrong class for A" << testObjectReadA << metadataType;
|
||||
return true;
|
||||
}
|
||||
if (metadataType == Bitstream::FULL_METADATA && static_cast<TestSharedObjectA*>(testObjectWrittenA.data())->getFoo() !=
|
||||
static_cast<TestSharedObjectB*>(testObjectReadA.data())->getFoo()) {
|
||||
if (metadataType == Bitstream::FULL_METADATA && (static_cast<TestSharedObjectA*>(testObjectWrittenA.data())->getFoo() !=
|
||||
static_cast<TestSharedObjectB*>(testObjectReadA.data())->getFoo() ||
|
||||
static_cast<TestSharedObjectB*>(testObjectReadA.data())->getBaz() != TestSharedObjectB::SECOND_TEST_ENUM ||
|
||||
static_cast<TestSharedObjectB*>(testObjectReadA.data())->getBong() !=
|
||||
TestSharedObjectB::TestFlags(TestSharedObjectB::FIRST_TEST_FLAG | TestSharedObjectB::THIRD_TEST_FLAG))) {
|
||||
QDebug debug = qDebug() << "Failed to transfer shared field from A to B";
|
||||
testObjectWrittenA->dump(debug);
|
||||
testObjectReadA->dump(debug);
|
||||
|
@ -100,8 +132,10 @@ static bool testSerialization(Bitstream::MetadataType metadataType) {
|
|||
qDebug() << "Wrong class for B" << testObjectReadB << metadataType;
|
||||
return true;
|
||||
}
|
||||
if (metadataType == Bitstream::FULL_METADATA && static_cast<TestSharedObjectB*>(testObjectWrittenB.data())->getFoo() !=
|
||||
static_cast<TestSharedObjectA*>(testObjectReadB.data())->getFoo()) {
|
||||
if (metadataType == Bitstream::FULL_METADATA && (static_cast<TestSharedObjectB*>(testObjectWrittenB.data())->getFoo() !=
|
||||
static_cast<TestSharedObjectA*>(testObjectReadB.data())->getFoo() ||
|
||||
static_cast<TestSharedObjectA*>(testObjectReadB.data())->getBaz() != TestSharedObjectA::THIRD_TEST_ENUM ||
|
||||
static_cast<TestSharedObjectA*>(testObjectReadB.data())->getBong() != TestSharedObjectA::SECOND_TEST_FLAG)) {
|
||||
QDebug debug = qDebug() << "Failed to transfer shared field from B to A";
|
||||
testObjectWrittenB->dump(debug);
|
||||
testObjectReadB->dump(debug);
|
||||
|
@ -175,7 +209,7 @@ bool MetavoxelTests::run() {
|
|||
|
||||
static SharedObjectPointer createRandomSharedObject() {
|
||||
switch (randIntInRange(0, 2)) {
|
||||
case 0: return new TestSharedObjectA(randFloat());
|
||||
case 0: return new TestSharedObjectA(randFloat(), getRandomTestEnum(), getRandomTestFlags());
|
||||
case 1: return new TestSharedObjectB();
|
||||
case 2:
|
||||
default: return SharedObjectPointer();
|
||||
|
@ -393,8 +427,10 @@ void Endpoint::readReliableChannel() {
|
|||
streamedBytesReceived += bytes.size();
|
||||
}
|
||||
|
||||
TestSharedObjectA::TestSharedObjectA(float foo) :
|
||||
_foo(foo) {
|
||||
TestSharedObjectA::TestSharedObjectA(float foo, TestEnum baz, TestFlags bong) :
|
||||
_foo(foo),
|
||||
_baz(baz),
|
||||
_bong(bong) {
|
||||
sharedObjectsCreated++;
|
||||
}
|
||||
|
||||
|
@ -408,9 +444,11 @@ void TestSharedObjectA::setFoo(float foo) {
|
|||
}
|
||||
}
|
||||
|
||||
TestSharedObjectB::TestSharedObjectB(float foo, const QByteArray& bar) :
|
||||
TestSharedObjectB::TestSharedObjectB(float foo, const QByteArray& bar, TestEnum baz, TestFlags bong) :
|
||||
_foo(foo),
|
||||
_bar(bar) {
|
||||
_bar(bar),
|
||||
_baz(baz),
|
||||
_bong(bong) {
|
||||
sharedObjectsCreated++;
|
||||
}
|
||||
|
||||
|
|
|
@ -70,16 +70,31 @@ private:
|
|||
/// A simple shared object.
|
||||
class TestSharedObjectA : public SharedObject {
|
||||
Q_OBJECT
|
||||
Q_ENUMS(TestEnum)
|
||||
Q_FLAGS(TestFlag TestFlags)
|
||||
Q_PROPERTY(float foo READ getFoo WRITE setFoo NOTIFY fooChanged)
|
||||
Q_PROPERTY(TestEnum baz READ getBaz WRITE setBaz)
|
||||
Q_PROPERTY(TestFlags bong READ getBong WRITE setBong)
|
||||
|
||||
public:
|
||||
|
||||
Q_INVOKABLE TestSharedObjectA(float foo = 0.0f);
|
||||
enum TestEnum { FIRST_TEST_ENUM, SECOND_TEST_ENUM, THIRD_TEST_ENUM };
|
||||
|
||||
enum TestFlag { NO_TEST_FLAGS = 0x0, FIRST_TEST_FLAG = 0x01, SECOND_TEST_FLAG = 0x02, THIRD_TEST_FLAG = 0x04 };
|
||||
Q_DECLARE_FLAGS(TestFlags, TestFlag)
|
||||
|
||||
Q_INVOKABLE TestSharedObjectA(float foo = 0.0f, TestEnum baz = FIRST_TEST_ENUM, TestFlags bong = 0);
|
||||
virtual ~TestSharedObjectA();
|
||||
|
||||
void setFoo(float foo);
|
||||
float getFoo() const { return _foo; }
|
||||
|
||||
void setBaz(TestEnum baz) { _baz = baz; }
|
||||
TestEnum getBaz() const { return _baz; }
|
||||
|
||||
void setBong(TestFlags bong) { _bong = bong; }
|
||||
TestFlags getBong() const { return _bong; }
|
||||
|
||||
signals:
|
||||
|
||||
void fooChanged(float foo);
|
||||
|
@ -87,17 +102,30 @@ signals:
|
|||
private:
|
||||
|
||||
float _foo;
|
||||
TestEnum _baz;
|
||||
TestFlags _bong;
|
||||
};
|
||||
|
||||
/// Another simple shared object.
|
||||
class TestSharedObjectB : public SharedObject {
|
||||
Q_OBJECT
|
||||
Q_ENUMS(TestEnum)
|
||||
Q_FLAGS(TestFlag TestFlags)
|
||||
Q_PROPERTY(float foo READ getFoo WRITE setFoo)
|
||||
Q_PROPERTY(QByteArray bar READ getBar WRITE setBar)
|
||||
|
||||
Q_PROPERTY(TestEnum baz READ getBaz WRITE setBaz)
|
||||
Q_PROPERTY(TestFlags bong READ getBong WRITE setBong)
|
||||
|
||||
public:
|
||||
|
||||
Q_INVOKABLE TestSharedObjectB(float foo = 0.0f, const QByteArray& bar = QByteArray());
|
||||
enum TestEnum { ZEROTH_TEST_ENUM, FIRST_TEST_ENUM, SECOND_TEST_ENUM, THIRD_TEST_ENUM, FOURTH_TEST_ENUM };
|
||||
|
||||
enum TestFlag { NO_TEST_FLAGS = 0x0, ZEROTH_TEST_FLAG = 0x01, FIRST_TEST_FLAG = 0x02,
|
||||
SECOND_TEST_FLAG = 0x04, THIRD_TEST_FLAG = 0x08, FOURTH_TEST_FLAG = 0x10 };
|
||||
Q_DECLARE_FLAGS(TestFlags, TestFlag)
|
||||
|
||||
Q_INVOKABLE TestSharedObjectB(float foo = 0.0f, const QByteArray& bar = QByteArray(),
|
||||
TestEnum baz = FIRST_TEST_ENUM, TestFlags bong = 0);
|
||||
virtual ~TestSharedObjectB();
|
||||
|
||||
void setFoo(float foo) { _foo = foo; }
|
||||
|
@ -106,10 +134,18 @@ public:
|
|||
void setBar(const QByteArray& bar) { _bar = bar; }
|
||||
const QByteArray& getBar() const { return _bar; }
|
||||
|
||||
void setBaz(TestEnum baz) { _baz = baz; }
|
||||
TestEnum getBaz() const { return _baz; }
|
||||
|
||||
void setBong(TestFlags bong) { _bong = bong; }
|
||||
TestFlags getBong() const { return _bong; }
|
||||
|
||||
private:
|
||||
|
||||
float _foo;
|
||||
QByteArray _bar;
|
||||
TestEnum _baz;
|
||||
TestFlags _bong;
|
||||
};
|
||||
|
||||
/// A simple test message.
|
||||
|
|
39
tests/octree/CMakeLists.txt
Normal file
39
tests/octree/CMakeLists.txt
Normal file
|
@ -0,0 +1,39 @@
|
|||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
if (WIN32)
|
||||
cmake_policy (SET CMP0020 NEW)
|
||||
endif (WIN32)
|
||||
|
||||
set(TARGET_NAME octree-tests)
|
||||
|
||||
set(ROOT_DIR ../..)
|
||||
set(MACRO_DIR ${ROOT_DIR}/cmake/macros)
|
||||
|
||||
# setup for find modules
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../cmake/modules/")
|
||||
|
||||
#find_package(Qt5Network REQUIRED)
|
||||
#find_package(Qt5Script REQUIRED)
|
||||
#find_package(Qt5Widgets REQUIRED)
|
||||
|
||||
include(${MACRO_DIR}/SetupHifiProject.cmake)
|
||||
setup_hifi_project(${TARGET_NAME} TRUE)
|
||||
|
||||
include(${MACRO_DIR}/AutoMTC.cmake)
|
||||
auto_mtc(${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
#qt5_use_modules(${TARGET_NAME} Network Script Widgets)
|
||||
|
||||
#include glm
|
||||
include(${MACRO_DIR}/IncludeGLM.cmake)
|
||||
include_glm(${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
# link in the shared libraries
|
||||
include(${MACRO_DIR}/LinkHifiLibrary.cmake)
|
||||
link_hifi_library(shared ${TARGET_NAME} ${ROOT_DIR})
|
||||
link_hifi_library(octree ${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
IF (WIN32)
|
||||
#target_link_libraries(${TARGET_NAME} Winmm Ws2_32)
|
||||
ENDIF(WIN32)
|
||||
|
416
tests/octree/src/OctreeTests.cpp
Normal file
416
tests/octree/src/OctreeTests.cpp
Normal file
|
@ -0,0 +1,416 @@
|
|||
//
|
||||
// OctreeTests.h
|
||||
// tests/physics/src
|
||||
//
|
||||
// Created by Brad Hefta-Gaub on 06/04/2014.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <QDebug>
|
||||
|
||||
#include <PropertyFlags.h>
|
||||
#include <SharedUtil.h>
|
||||
|
||||
#include "OctreeTests.h"
|
||||
|
||||
enum ModelPropertyList {
|
||||
PROP_PAGED_PROPERTY,
|
||||
PROP_CUSTOM_PROPERTIES_INCLUDED,
|
||||
PROP_VISIBLE,
|
||||
PROP_POSITION,
|
||||
PROP_RADIUS,
|
||||
PROP_MODEL_URL,
|
||||
PROP_ROTATION,
|
||||
PROP_COLOR,
|
||||
PROP_SCRIPT,
|
||||
PROP_ANIMATION_URL,
|
||||
PROP_ANIMATION_FPS,
|
||||
PROP_ANIMATION_FRAME_INDEX,
|
||||
PROP_ANIMATION_PLAYING,
|
||||
PROP_SHOULD_BE_DELETED
|
||||
};
|
||||
|
||||
typedef PropertyFlags<ModelPropertyList> ModelPropertyFlags;
|
||||
|
||||
enum ParticlePropertyList {
|
||||
PARTICLE_PROP_PAGED_PROPERTY,
|
||||
PARTICLE_PROP_CUSTOM_PROPERTIES_INCLUDED,
|
||||
PARTICLE_PROP_VISIBLE,
|
||||
PARTICLE_PROP_POSITION,
|
||||
PARTICLE_PROP_RADIUS,
|
||||
PARTICLE_PROP_MODEL_URL,
|
||||
PARTICLE_PROP_ROTATION,
|
||||
PARTICLE_PROP_COLOR,
|
||||
PARTICLE_PROP_SCRIPT,
|
||||
PARTICLE_PROP_ANIMATION_URL,
|
||||
PARTICLE_PROP_ANIMATION_FPS,
|
||||
PARTICLE_PROP_ANIMATION_FRAME_INDEX,
|
||||
PARTICLE_PROP_ANIMATION_PLAYING,
|
||||
PARTICLE_PROP_SHOULD_BE_DELETED,
|
||||
PARTICLE_PROP_VELOCITY,
|
||||
PARTICLE_PROP_GRAVITY,
|
||||
PARTICLE_PROP_DAMPING,
|
||||
PARTICLE_PROP_MASS,
|
||||
PARTICLE_PROP_LIFETIME,
|
||||
PARTICLE_PROP_PAUSE_SIMULATION,
|
||||
};
|
||||
|
||||
typedef PropertyFlags<ParticlePropertyList> ParticlePropertyFlags;
|
||||
|
||||
|
||||
void OctreeTests::propertyFlagsTests() {
|
||||
qDebug() << "******************************************************************************************";
|
||||
qDebug() << "OctreeTests::propertyFlagsTests()";
|
||||
|
||||
{
|
||||
qDebug() << "Test 1: ModelProperties: using setHasProperty()";
|
||||
ModelPropertyFlags props;
|
||||
props.setHasProperty(PROP_VISIBLE);
|
||||
props.setHasProperty(PROP_POSITION);
|
||||
props.setHasProperty(PROP_RADIUS);
|
||||
props.setHasProperty(PROP_MODEL_URL);
|
||||
props.setHasProperty(PROP_ROTATION);
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 2: ParticlePropertyFlags: using setHasProperty()";
|
||||
ParticlePropertyFlags props2;
|
||||
props2.setHasProperty(PARTICLE_PROP_VISIBLE);
|
||||
props2.setHasProperty(PARTICLE_PROP_ANIMATION_URL);
|
||||
props2.setHasProperty(PARTICLE_PROP_ANIMATION_FPS);
|
||||
props2.setHasProperty(PARTICLE_PROP_ANIMATION_FRAME_INDEX);
|
||||
props2.setHasProperty(PARTICLE_PROP_ANIMATION_PLAYING);
|
||||
props2.setHasProperty(PARTICLE_PROP_PAUSE_SIMULATION);
|
||||
|
||||
QByteArray encoded = props2.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
|
||||
qDebug() << "Test 2b: remove flag with setHasProperty() PARTICLE_PROP_PAUSE_SIMULATION";
|
||||
|
||||
props2.setHasProperty(PARTICLE_PROP_PAUSE_SIMULATION, false);
|
||||
|
||||
encoded = props2.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 3: ParticlePropertyFlags: using | operator";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
props = ParticlePropertyFlags(PARTICLE_PROP_VISIBLE)
|
||||
| ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_URL)
|
||||
| ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_FPS)
|
||||
| ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_FRAME_INDEX)
|
||||
| ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_PLAYING)
|
||||
| ParticlePropertyFlags(PARTICLE_PROP_PAUSE_SIMULATION);
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
|
||||
qDebug() << "Test 3b: remove flag with -= PARTICLE_PROP_PAUSE_SIMULATION";
|
||||
props -= PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 3c: ParticlePropertyFlags: using |= operator";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
props |= PARTICLE_PROP_VISIBLE;
|
||||
props |= PARTICLE_PROP_ANIMATION_URL;
|
||||
props |= PARTICLE_PROP_ANIMATION_FPS;
|
||||
props |= PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
props |= PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
props |= PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 4: ParticlePropertyFlags: using + operator";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
props = ParticlePropertyFlags(PARTICLE_PROP_VISIBLE)
|
||||
+ ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_URL)
|
||||
+ ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_FPS)
|
||||
+ ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_FRAME_INDEX)
|
||||
+ ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_PLAYING)
|
||||
+ ParticlePropertyFlags(PARTICLE_PROP_PAUSE_SIMULATION);
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 4b: ParticlePropertyFlags: using += operator";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
props += PARTICLE_PROP_VISIBLE;
|
||||
props += PARTICLE_PROP_ANIMATION_URL;
|
||||
props += PARTICLE_PROP_ANIMATION_FPS;
|
||||
props += PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
props += PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
props += PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 5: ParticlePropertyFlags: using = ... << operator";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
props = ParticlePropertyFlags(PARTICLE_PROP_VISIBLE)
|
||||
<< ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_URL)
|
||||
<< ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_FPS)
|
||||
<< ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_FRAME_INDEX)
|
||||
<< ParticlePropertyFlags(PARTICLE_PROP_ANIMATION_PLAYING)
|
||||
<< ParticlePropertyFlags(PARTICLE_PROP_PAUSE_SIMULATION);
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 5b: ParticlePropertyFlags: using <<= operator";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
props <<= PARTICLE_PROP_VISIBLE;
|
||||
props <<= PARTICLE_PROP_ANIMATION_URL;
|
||||
props <<= PARTICLE_PROP_ANIMATION_FPS;
|
||||
props <<= PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
props <<= PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
props <<= PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 5c: ParticlePropertyFlags: using << enum operator";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
props << PARTICLE_PROP_VISIBLE;
|
||||
props << PARTICLE_PROP_ANIMATION_URL;
|
||||
props << PARTICLE_PROP_ANIMATION_FPS;
|
||||
props << PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
props << PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
props << PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 5d: ParticlePropertyFlags: using << flags operator ";
|
||||
ParticlePropertyFlags props;
|
||||
ParticlePropertyFlags props2;
|
||||
|
||||
props << PARTICLE_PROP_VISIBLE;
|
||||
props << PARTICLE_PROP_ANIMATION_URL;
|
||||
props << PARTICLE_PROP_ANIMATION_FPS;
|
||||
|
||||
props2 << PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
props2 << PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
props2 << PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
props << props2;
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 6: ParticlePropertyFlags comparison";
|
||||
ParticlePropertyFlags propsA;
|
||||
|
||||
qDebug() << "!propsA:" << (!propsA) << "{ expect true }";
|
||||
|
||||
propsA << PARTICLE_PROP_VISIBLE;
|
||||
propsA << PARTICLE_PROP_ANIMATION_URL;
|
||||
propsA << PARTICLE_PROP_ANIMATION_FPS;
|
||||
propsA << PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
propsA << PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
propsA << PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
qDebug() << "!propsA:" << (!propsA) << "{ expect false }";
|
||||
|
||||
ParticlePropertyFlags propsB;
|
||||
qDebug() << "!propsB:" << (!propsB) << "{ expect true }";
|
||||
|
||||
|
||||
propsB << PARTICLE_PROP_VISIBLE;
|
||||
propsB << PARTICLE_PROP_ANIMATION_URL;
|
||||
propsB << PARTICLE_PROP_ANIMATION_FPS;
|
||||
propsB << PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
propsB << PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
propsB << PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
qDebug() << "!propsB:" << (!propsB) << "{ expect false }";
|
||||
|
||||
qDebug() << "propsA == propsB:" << (propsA == propsB) << "{ expect true }";
|
||||
qDebug() << "propsA != propsB:" << (propsA != propsB) << "{ expect false }";
|
||||
|
||||
|
||||
qDebug() << "AFTER propsB -= PARTICLE_PROP_PAUSE_SIMULATION...";
|
||||
propsB -= PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
qDebug() << "propsA == propsB:" << (propsA == propsB) << "{ expect false }";
|
||||
qDebug() << "propsA != propsB:" << (propsA != propsB) << "{ expect true }";
|
||||
|
||||
qDebug() << "AFTER propsB = propsA...";
|
||||
propsB = propsA;
|
||||
|
||||
qDebug() << "propsA == propsB:" << (propsA == propsB) << "{ expect true }";
|
||||
qDebug() << "propsA != propsB:" << (propsA != propsB) << "{ expect false }";
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 7: ParticlePropertyFlags testing individual properties";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
qDebug() << "ParticlePropertyFlags props;";
|
||||
QByteArray encoded = props.encode();
|
||||
qDebug() << "props... encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
|
||||
qDebug() << "props.getHasProperty(PARTICLE_PROP_VISIBLE)" << (props.getHasProperty(PARTICLE_PROP_VISIBLE))
|
||||
<< "{ expect false }";
|
||||
|
||||
qDebug() << "props << PARTICLE_PROP_VISIBLE;";
|
||||
props << PARTICLE_PROP_VISIBLE;
|
||||
|
||||
encoded = props.encode();
|
||||
qDebug() << "props... encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
qDebug() << "props.getHasProperty(PARTICLE_PROP_VISIBLE)" << (props.getHasProperty(PARTICLE_PROP_VISIBLE))
|
||||
<< "{ expect true }";
|
||||
|
||||
qDebug() << "props << PARTICLE_PROP_ANIMATION_URL;";
|
||||
props << PARTICLE_PROP_ANIMATION_URL;
|
||||
|
||||
encoded = props.encode();
|
||||
qDebug() << "props... encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
qDebug() << "props.getHasProperty(PARTICLE_PROP_VISIBLE)" << (props.getHasProperty(PARTICLE_PROP_VISIBLE))
|
||||
<< "{ expect true }";
|
||||
|
||||
qDebug() << "props << ... more ...";
|
||||
props << PARTICLE_PROP_ANIMATION_FPS;
|
||||
props << PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
props << PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
props << PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
encoded = props.encode();
|
||||
qDebug() << "props... encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
qDebug() << "props.getHasProperty(PARTICLE_PROP_VISIBLE)" << (props.getHasProperty(PARTICLE_PROP_VISIBLE))
|
||||
<< "{ expect true }";
|
||||
|
||||
qDebug() << "ParticlePropertyFlags propsB = props & PARTICLE_PROP_VISIBLE;";
|
||||
ParticlePropertyFlags propsB = props & PARTICLE_PROP_VISIBLE;
|
||||
|
||||
qDebug() << "propsB.getHasProperty(PARTICLE_PROP_VISIBLE)" << (propsB.getHasProperty(PARTICLE_PROP_VISIBLE))
|
||||
<< "{ expect true }";
|
||||
|
||||
encoded = propsB.encode();
|
||||
qDebug() << "propsB... encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
|
||||
qDebug() << "ParticlePropertyFlags propsC = ~propsB;";
|
||||
ParticlePropertyFlags propsC = ~propsB;
|
||||
|
||||
qDebug() << "propsC.getHasProperty(PARTICLE_PROP_VISIBLE)" << (propsC.getHasProperty(PARTICLE_PROP_VISIBLE))
|
||||
<< "{ expect false }";
|
||||
|
||||
encoded = propsC.encode();
|
||||
qDebug() << "propsC... encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
}
|
||||
|
||||
{
|
||||
qDebug() << "Test 8: ParticlePropertyFlags: decode tests";
|
||||
ParticlePropertyFlags props;
|
||||
|
||||
props << PARTICLE_PROP_VISIBLE;
|
||||
props << PARTICLE_PROP_ANIMATION_URL;
|
||||
props << PARTICLE_PROP_ANIMATION_FPS;
|
||||
props << PARTICLE_PROP_ANIMATION_FRAME_INDEX;
|
||||
props << PARTICLE_PROP_ANIMATION_PLAYING;
|
||||
props << PARTICLE_PROP_PAUSE_SIMULATION;
|
||||
|
||||
QByteArray encoded = props.encode();
|
||||
qDebug() << "encoded=";
|
||||
outputBufferBits((const unsigned char*)encoded.constData(), encoded.size());
|
||||
|
||||
qDebug() << "encoded.size()=" << encoded.size();
|
||||
|
||||
ParticlePropertyFlags propsDecoded;
|
||||
propsDecoded.decode(encoded);
|
||||
|
||||
qDebug() << "propsDecoded == props:" << (propsDecoded == props) << "{ expect true }";
|
||||
|
||||
QByteArray encodedAfterDecoded = propsDecoded.encode();
|
||||
|
||||
qDebug() << "encodedAfterDecoded=";
|
||||
outputBufferBits((const unsigned char*)encodedAfterDecoded.constData(), encodedAfterDecoded.size());
|
||||
|
||||
qDebug() << "fill encoded byte array with extra garbage (as if it was bitstream with more content)";
|
||||
QByteArray extraContent;
|
||||
extraContent.fill(0xba, 10);
|
||||
encoded.append(extraContent);
|
||||
qDebug() << "encoded.size()=" << encoded.size() << "includes extra garbage";
|
||||
|
||||
ParticlePropertyFlags propsDecodedExtra;
|
||||
propsDecodedExtra.decode(encoded);
|
||||
|
||||
qDebug() << "propsDecodedExtra == props:" << (propsDecodedExtra == props) << "{ expect true }";
|
||||
|
||||
QByteArray encodedAfterDecodedExtra = propsDecodedExtra.encode();
|
||||
|
||||
qDebug() << "encodedAfterDecodedExtra=";
|
||||
outputBufferBits((const unsigned char*)encodedAfterDecodedExtra.constData(), encodedAfterDecodedExtra.size());
|
||||
|
||||
}
|
||||
|
||||
qDebug() << "******************************************************************************************";
|
||||
}
|
||||
|
||||
void OctreeTests::runAllTests() {
|
||||
propertyFlagsTests();
|
||||
}
|
22
tests/octree/src/OctreeTests.h
Normal file
22
tests/octree/src/OctreeTests.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
//
|
||||
// OctreeTests.h
|
||||
// tests/physics/src
|
||||
//
|
||||
// Created by Brad Hefta-Gaub on 06/04/2014.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_OctreeTests_h
|
||||
#define hifi_OctreeTests_h
|
||||
|
||||
namespace OctreeTests {
|
||||
|
||||
void propertyFlagsTests();
|
||||
|
||||
void runAllTests();
|
||||
}
|
||||
|
||||
#endif // hifi_OctreeTests_h
|
16
tests/octree/src/main.cpp
Normal file
16
tests/octree/src/main.cpp
Normal file
|
@ -0,0 +1,16 @@
|
|||
//
|
||||
// main.cpp
|
||||
// tests/octree/src
|
||||
//
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "OctreeTests.h"
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
OctreeTests::runAllTests();
|
||||
return 0;
|
||||
}
|
Loading…
Reference in a new issue