Merge branch 'master' of https://github.com/worklist/hifi into 19755

This commit is contained in:
Stojce Slavkovski 2014-06-07 10:33:41 +02:00
commit 9810335adf
26 changed files with 1036 additions and 460 deletions

View file

@ -173,134 +173,151 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
}
}
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
const int16_t* nextOutputStart = bufferToAdd->getNextOutput();
const int16_t* bufferStart = bufferToAdd->getBuffer();
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
int16_t correctBufferSample[2], delayBufferSample[2];
int delayedChannelIndex = 0;
const int SINGLE_STEREO_OFFSET = 2;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
if (!bufferToAdd->isStereo()) {
// this is a mono buffer, which means it gets full attenuation and spatialization
// setup the int16_t variables for the two sample sets
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
const int16_t* bufferStart = bufferToAdd->getBuffer();
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
int16_t correctBufferSample[2], delayBufferSample[2];
int delayedChannelIndex = 0;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
_clientSamples[delayedChannelIndex],
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
delayBufferSample[0], delayBufferSample[1]);
const int SINGLE_STEREO_OFFSET = 2;
// perform the MMX add (with saturation) of two correct and delayed samples
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
// assign the results from the result of the mmx arithmetic
_clientSamples[s + goodChannelOffset] = shortResults[3];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
_clientSamples[delayedChannelIndex] = shortResults[1];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
}
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
// Basically we try to take the samples in batches of four, and then handle the remainder
// conditionally to get rid of the rest.
const int DOUBLE_STEREO_OFFSET = 4;
const int TRIPLE_STEREO_OFFSET = 6;
if (numSamplesDelay > 0) {
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
if (delayNextOutputStart < bufferStart) {
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// setup the int16_t variables for the two sample sets
correctBufferSample[0] = nextOutputStart[s / 2] * attenuationCoefficient;
correctBufferSample[1] = nextOutputStart[(s / 2) + 1] * attenuationCoefficient;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
delayBufferSample[0] = correctBufferSample[0] * weakChannelAmplitudeRatio;
delayBufferSample[1] = correctBufferSample[1] * weakChannelAmplitudeRatio;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s + goodChannelOffset],
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET],
_clientSamples[delayedChannelIndex],
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET]);
__m64 addedSamples = _mm_set_pi16(correctBufferSample[0], correctBufferSample[1],
delayBufferSample[0], delayBufferSample[1]);
// perform the MMX add (with saturation) of two correct and delayed samples
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addedSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
// assign the results from the result of the mmx arithmetic
_clientSamples[s + goodChannelOffset] = shortResults[3];
_clientSamples[s + goodChannelOffset + SINGLE_STEREO_OFFSET] = shortResults[2];
_clientSamples[delayedChannelIndex] = shortResults[1];
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] = shortResults[0];
}
int i = 0;
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
// Basically we try to take the samples in batches of four, and then handle the remainder
// conditionally to get rid of the rest.
while (i + 3 < numSamplesDelay) {
// handle the first cases where we can MMX add four samples at once
const int DOUBLE_STEREO_OFFSET = 4;
const int TRIPLE_STEREO_OFFSET = 6;
if (numSamplesDelay > 0) {
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
const int16_t* delayNextOutputStart = nextOutputStart - numSamplesDelay;
if (delayNextOutputStart < bufferStart) {
delayNextOutputStart = bufferStart + ringBufferSampleCapacity - numSamplesDelay;
}
int i = 0;
while (i + 3 < numSamplesDelay) {
// handle the first cases where we can MMX add four samples at once
int parentIndex = i * 2;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
// push the index
i += 4;
}
int parentIndex = i * 2;
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset]);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 3] * attenuationAndWeakChannelRatio);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
_clientSamples[parentIndex + TRIPLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[0];
// push the index
i += 4;
if (i + 2 < numSamplesDelay) {
// MMX add only three delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
} else if (i + 1 < numSamplesDelay) {
// MMX add two delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
} else if (i < numSamplesDelay) {
// MMX add a single delayed sample
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
}
}
int parentIndex = i * 2;
if (i + 2 < numSamplesDelay) {
// MMX add only three delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset],
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset],
0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 2] * attenuationAndWeakChannelRatio,
0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
_clientSamples[parentIndex + DOUBLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[1];
} else if (i + 1 < numSamplesDelay) {
// MMX add two delayed samples
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset],
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset], 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio,
delayNextOutputStart[i + 1] * attenuationAndWeakChannelRatio, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
_clientSamples[parentIndex + SINGLE_STEREO_OFFSET + delayedChannelOffset] = shortResults[2];
} else if (i < numSamplesDelay) {
// MMX add a single delayed sample
__m64 bufferSamples = _mm_set_pi16(_clientSamples[parentIndex + delayedChannelOffset], 0, 0, 0);
__m64 addSamples = _mm_set_pi16(delayNextOutputStart[i] * attenuationAndWeakChannelRatio, 0, 0, 0);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
} else {
// stereo buffer - do attenuation but no sample delay for spatialization
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// use MMX to clamp four additions at a time
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int) (nextOutputStart[s] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 1] = glm::clamp(_clientSamples[s + 1] + (int) (nextOutputStart[s + 1] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 2] = glm::clamp(_clientSamples[s + 2] + (int) (nextOutputStart[s + 2] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + 3] = glm::clamp(_clientSamples[s + 3] + (int) (nextOutputStart[s + 3] * attenuationCoefficient),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
}
}
}

View file

@ -50,10 +50,22 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
// read the first byte after the header to see if this is a stereo or mono buffer
quint8 channelFlag = packet.at(numBytesForPacketHeader(packet));
bool isStereo = channelFlag == 1;
if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) {
// there's a mismatch in the buffer channels for the incoming and current buffer
// so delete our current buffer and create a new one
_ringBuffers.removeOne(avatarRingBuffer);
avatarRingBuffer->deleteLater();
avatarRingBuffer = NULL;
}
if (!avatarRingBuffer) {
// we don't have an AvatarAudioRingBuffer yet, so add it
avatarRingBuffer = new AvatarAudioRingBuffer();
avatarRingBuffer = new AvatarAudioRingBuffer(isStereo);
_ringBuffers.push_back(avatarRingBuffer);
}
@ -106,7 +118,8 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
if (audioBuffer->willBeAddedToMix()) {
audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
audioBuffer->shiftReadPosition(audioBuffer->isStereo()
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
audioBuffer->setWillBeAddedToMix(false);
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector

View file

@ -24,14 +24,14 @@ public:
AudioMixerClientData();
~AudioMixerClientData();
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
const QList<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
int parseData(const QByteArray& packet);
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
void pushBuffersAfterFrameSend();
private:
std::vector<PositionalAudioRingBuffer*> _ringBuffers;
QList<PositionalAudioRingBuffer*> _ringBuffers;
};
#endif // hifi_AudioMixerClientData_h

View file

@ -13,8 +13,8 @@
#include "AvatarAudioRingBuffer.h"
AvatarAudioRingBuffer::AvatarAudioRingBuffer() :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone) {
AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo) :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo) {
}

View file

@ -18,7 +18,7 @@
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
public:
AvatarAudioRingBuffer();
AvatarAudioRingBuffer(bool isStereo = false);
int parseData(const QByteArray& packet);
private:

View file

@ -9,8 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
var rightHandAnimation = "https://s3-us-west-1.amazonaws.com/highfidelity-public/animations/HandAnim.fbx";
var leftHandAnimation = "";
var rightHandAnimation = "https://s3-us-west-1.amazonaws.com/highfidelity-public/animations/RightHandAnim.fbx";
var leftHandAnimation = "https://s3-us-west-1.amazonaws.com/highfidelity-public/animations/LeftHandAnim.fbx";
var LEFT = 0;
var RIGHT = 1;
@ -18,17 +18,20 @@ var RIGHT = 1;
var lastLeftFrame = 0;
var lastRightFrame = 0;
var LAST_FRAME = 15.0; // What is the number of the last frame we want to use in the animation?
var LAST_FRAME = 11.0; // What is the number of the last frame we want to use in the animation?
var SMOOTH_FACTOR = 0.80;
Script.update.connect(function(deltaTime) {
var leftTriggerValue = Controller.getTriggerValue(LEFT);
var rightTriggerValue = Controller.getTriggerValue(RIGHT);
var leftTriggerValue = Math.sqrt(Controller.getTriggerValue(LEFT));
var rightTriggerValue = Math.sqrt(Controller.getTriggerValue(RIGHT));
var leftFrame, rightFrame;
// Average last two trigger frames together for a bit of smoothing
leftFrame = (leftTriggerValue * LAST_FRAME) * 0.5 + lastLeftFrame * 0.5;
rightFrame = (rightTriggerValue * LAST_FRAME) * 0.5 + lastRightFrame * 0.5;
// Average last few trigger frames together for a bit of smoothing
leftFrame = (leftTriggerValue * LAST_FRAME) * (1.0 - SMOOTH_FACTOR) + lastLeftFrame * SMOOTH_FACTOR;
rightFrame = (rightTriggerValue * LAST_FRAME) * (1.0 - SMOOTH_FACTOR) + lastRightFrame * SMOOTH_FACTOR;
if ((leftFrame != lastLeftFrame) && leftHandAnimation.length){
MyAvatar.stopAnimation(leftHandAnimation);

View file

@ -1007,12 +1007,6 @@ void Application::keyPressEvent(QKeyEvent* event) {
case Qt::Key_At:
Menu::getInstance()->goTo();
break;
case Qt::Key_B:
_applicationOverlay.setOculusAngle(_applicationOverlay.getOculusAngle() - RADIANS_PER_DEGREE);
break;
case Qt::Key_N:
_applicationOverlay.setOculusAngle(_applicationOverlay.getOculusAngle() + RADIANS_PER_DEGREE);
break;
default:
event->ignore();
break;

View file

@ -68,6 +68,7 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
_proceduralOutputDevice(NULL),
_inputRingBuffer(0),
_ringBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL),
_isStereoInput(false),
_averagedLatency(0.0),
_measuredJitter(0),
_jitterBufferSamples(initialJitterBufferSamples),
@ -289,20 +290,27 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
if (sourceToDestinationFactor >= 2) {
// we need to downsample from 48 to 24
// for now this only supports a mono output - this would be the case for audio input
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
if (destinationAudioFormat.channelCount() == 1) {
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
(sourceSamples[i - sourceAudioFormat.channelCount()] / 2)
+ (sourceSamples[i] / 2);
} else {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
} else {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
(sourceSamples[i - sourceAudioFormat.channelCount()] / 4)
+ (sourceSamples[i] / 2)
+ (sourceSamples[i + sourceAudioFormat.channelCount()] / 4);
}
}
} else {
// this is a 48 to 24 resampling but both source and destination are two channels
// squish two samples into one in each channel
for (int i = 0; i < numSourceSamples; i += 4) {
destinationSamples[i / 2] = (sourceSamples[i] / 2) + (sourceSamples[i + 2] / 2);
destinationSamples[(i / 2) + 1] = (sourceSamples[i + 1] / 2) + (sourceSamples[i + 3] / 2);
}
}
} else {
if (sourceAudioFormat.sampleRate() == destinationAudioFormat.sampleRate()) {
// mono to stereo, same sample rate
@ -405,12 +413,12 @@ bool Audio::switchOutputToAudioDevice(const QString& outputDeviceName) {
}
void Audio::handleAudioInput() {
static char monoAudioDataPacket[MAX_PACKET_SIZE];
static char audioDataPacket[MAX_PACKET_SIZE];
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat);
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes);
static int16_t* networkAudioSamples = (int16_t*) (audioDataPacket + leadingBytes);
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
@ -452,126 +460,139 @@ void Audio::handleAudioInput() {
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
int numNetworkBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
int numNetworkSamples = _isStereoInput ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
// zero out the monoAudioSamples array and the locally injected audio
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
memset(networkAudioSamples, 0, numNetworkBytes);
if (!_muted) {
// we aren't muted, downsample the input audio
linearResampling((int16_t*) inputAudioSamples,
monoAudioSamples,
inputSamplesRequired,
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
linearResampling((int16_t*) inputAudioSamples, networkAudioSamples,
inputSamplesRequired, numNetworkSamples,
_inputFormat, _desiredInputFormat);
//
// Impose Noise Gate
//
// The Noise Gate is used to reject constant background noise by measuring the noise
// floor observed at the microphone and then opening the 'gate' to allow microphone
// signals to be transmitted when the microphone samples average level exceeds a multiple
// of the noise floor.
//
// NOISE_GATE_HEIGHT: How loud you have to speak relative to noise background to open the gate.
// Make this value lower for more sensitivity and less rejection of noise.
// NOISE_GATE_WIDTH: The number of samples in an audio frame for which the height must be exceeded
// to open the gate.
// NOISE_GATE_CLOSE_FRAME_DELAY: Once the noise is below the gate height for the frame, how many frames
// will we wait before closing the gate.
// NOISE_GATE_FRAMES_TO_AVERAGE: How many audio frames should we average together to compute noise floor.
// More means better rejection but also can reject continuous things like singing.
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
float loudness = 0;
float thisSample = 0;
int samplesOverNoiseGate = 0;
const float NOISE_GATE_HEIGHT = 7.0f;
const int NOISE_GATE_WIDTH = 5;
const int NOISE_GATE_CLOSE_FRAME_DELAY = 5;
const int NOISE_GATE_FRAMES_TO_AVERAGE = 5;
const float DC_OFFSET_AVERAGING = 0.99f;
const float CLIPPING_THRESHOLD = 0.90f;
//
// Check clipping, adjust DC offset, and check if should open noise gate
//
float measuredDcOffset = 0.0f;
// Increment the time since the last clip
if (_timeSinceLastClip >= 0.0f) {
_timeSinceLastClip += (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE;
}
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
measuredDcOffset += monoAudioSamples[i];
monoAudioSamples[i] -= (int16_t) _dcOffset;
thisSample = fabsf(monoAudioSamples[i]);
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
_timeSinceLastClip = 0.0f;
// only impose the noise gate and perform tone injection if we sending mono audio
if (!_isStereoInput) {
//
// Impose Noise Gate
//
// The Noise Gate is used to reject constant background noise by measuring the noise
// floor observed at the microphone and then opening the 'gate' to allow microphone
// signals to be transmitted when the microphone samples average level exceeds a multiple
// of the noise floor.
//
// NOISE_GATE_HEIGHT: How loud you have to speak relative to noise background to open the gate.
// Make this value lower for more sensitivity and less rejection of noise.
// NOISE_GATE_WIDTH: The number of samples in an audio frame for which the height must be exceeded
// to open the gate.
// NOISE_GATE_CLOSE_FRAME_DELAY: Once the noise is below the gate height for the frame, how many frames
// will we wait before closing the gate.
// NOISE_GATE_FRAMES_TO_AVERAGE: How many audio frames should we average together to compute noise floor.
// More means better rejection but also can reject continuous things like singing.
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
float loudness = 0;
float thisSample = 0;
int samplesOverNoiseGate = 0;
const float NOISE_GATE_HEIGHT = 7.0f;
const int NOISE_GATE_WIDTH = 5;
const int NOISE_GATE_CLOSE_FRAME_DELAY = 5;
const int NOISE_GATE_FRAMES_TO_AVERAGE = 5;
const float DC_OFFSET_AVERAGING = 0.99f;
const float CLIPPING_THRESHOLD = 0.90f;
//
// Check clipping, adjust DC offset, and check if should open noise gate
//
float measuredDcOffset = 0.0f;
// Increment the time since the last clip
if (_timeSinceLastClip >= 0.0f) {
_timeSinceLastClip += (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE;
}
loudness += thisSample;
// Noise Reduction: Count peaks above the average loudness
if (_noiseGateEnabled && (thisSample > (_noiseGateMeasuredFloor * NOISE_GATE_HEIGHT))) {
samplesOverNoiseGate++;
}
}
measuredDcOffset /= NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
if (_dcOffset == 0.0f) {
// On first frame, copy over measured offset
_dcOffset = measuredDcOffset;
} else {
_dcOffset = DC_OFFSET_AVERAGING * _dcOffset + (1.0f - DC_OFFSET_AVERAGING) * measuredDcOffset;
}
// Add tone injection if enabled
const float TONE_FREQ = 220.0f / SAMPLE_RATE * TWO_PI;
const float QUARTER_VOLUME = 8192.0f;
if (_toneInjectionEnabled) {
loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
monoAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
loudness += fabsf(monoAudioSamples[i]);
}
}
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
// If Noise Gate is enabled, check and turn the gate on and off
if (!_toneInjectionEnabled && _noiseGateEnabled) {
float averageOfAllSampleFrames = 0.0f;
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
float smallestSample = FLT_MAX;
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
float thisAverage = 0.0f;
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
thisAverage += _noiseSampleFrames[j];
averageOfAllSampleFrames += _noiseSampleFrames[j];
}
thisAverage /= NOISE_GATE_FRAMES_TO_AVERAGE;
if (thisAverage < smallestSample) {
smallestSample = thisAverage;
}
measuredDcOffset += networkAudioSamples[i];
networkAudioSamples[i] -= (int16_t) _dcOffset;
thisSample = fabsf(networkAudioSamples[i]);
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
_timeSinceLastClip = 0.0f;
}
loudness += thisSample;
// Noise Reduction: Count peaks above the average loudness
if (_noiseGateEnabled && (thisSample > (_noiseGateMeasuredFloor * NOISE_GATE_HEIGHT))) {
samplesOverNoiseGate++;
}
averageOfAllSampleFrames /= NUMBER_OF_NOISE_SAMPLE_FRAMES;
_noiseGateMeasuredFloor = smallestSample;
_noiseGateSampleCounter = 0;
}
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
_noiseGateOpen = true;
_noiseGateFramesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
measuredDcOffset /= NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
if (_dcOffset == 0.0f) {
// On first frame, copy over measured offset
_dcOffset = measuredDcOffset;
} else {
if (--_noiseGateFramesToClose == 0) {
_noiseGateOpen = false;
_dcOffset = DC_OFFSET_AVERAGING * _dcOffset + (1.0f - DC_OFFSET_AVERAGING) * measuredDcOffset;
}
// Add tone injection if enabled
const float TONE_FREQ = 220.0f / SAMPLE_RATE * TWO_PI;
const float QUARTER_VOLUME = 8192.0f;
if (_toneInjectionEnabled) {
loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
networkAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
loudness += fabsf(networkAudioSamples[i]);
}
}
if (!_noiseGateOpen) {
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
_lastInputLoudness = 0;
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
// If Noise Gate is enabled, check and turn the gate on and off
if (!_toneInjectionEnabled && _noiseGateEnabled) {
float averageOfAllSampleFrames = 0.0f;
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
float smallestSample = FLT_MAX;
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
float thisAverage = 0.0f;
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
thisAverage += _noiseSampleFrames[j];
averageOfAllSampleFrames += _noiseSampleFrames[j];
}
thisAverage /= NOISE_GATE_FRAMES_TO_AVERAGE;
if (thisAverage < smallestSample) {
smallestSample = thisAverage;
}
}
averageOfAllSampleFrames /= NUMBER_OF_NOISE_SAMPLE_FRAMES;
_noiseGateMeasuredFloor = smallestSample;
_noiseGateSampleCounter = 0;
}
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
_noiseGateOpen = true;
_noiseGateFramesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
} else {
if (--_noiseGateFramesToClose == 0) {
_noiseGateOpen = false;
}
}
if (!_noiseGateOpen) {
memset(networkAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
_lastInputLoudness = 0;
}
}
} else {
float loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; i++) {
loudness += fabsf(networkAudioSamples[i]);
}
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
}
} else {
// our input loudness is 0, since we're muted
@ -580,19 +601,19 @@ void Audio::handleAudioInput() {
// at this point we have clean monoAudioSamples, which match our target output...
// this is what we should send to our interested listeners
if (_processSpatialAudio && !_muted && _audioOutput) {
QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
if (_processSpatialAudio && !_muted && !_isStereoInput && _audioOutput) {
QByteArray monoInputData((char*)networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat);
}
if (_proceduralAudioOutput) {
processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
if (!_isStereoInput && _proceduralAudioOutput) {
processProceduralAudio(networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
}
if (_scopeEnabled && !_scopeEnabledPause) {
if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
unsigned int numMonoAudioChannels = 1;
unsigned int monoAudioChannel = 0;
addBufferToScope(_scopeInput, _scopeInputOffset, monoAudioSamples, monoAudioChannel, numMonoAudioChannels);
addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, monoAudioChannel, numMonoAudioChannels);
_scopeInputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeInputOffset %= _samplesPerScope;
}
@ -604,9 +625,7 @@ void Audio::handleAudioInput() {
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
glm::vec3 headPosition = interfaceAvatar->getHead()->getPosition();
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
// we need the amount of bytes in the buffer + 1 for type
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
quint8 isStereo = _isStereoInput ? 1 : 0;
int numAudioBytes = 0;
@ -615,11 +634,12 @@ void Audio::handleAudioInput() {
packetType = PacketTypeSilentAudioFrame;
// we need to indicate how many silent samples this is to the audio mixer
monoAudioSamples[0] = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
audioDataPacket[0] = _isStereoInput
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
: NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
numAudioBytes = sizeof(int16_t);
} else {
numAudioBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
numAudioBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
packetType = PacketTypeMicrophoneAudioWithEcho;
@ -628,7 +648,10 @@ void Audio::handleAudioInput() {
}
}
char* currentPacketPtr = monoAudioDataPacket + populatePacketHeader(monoAudioDataPacket, packetType);
char* currentPacketPtr = audioDataPacket + populatePacketHeader(audioDataPacket, packetType);
// set the mono/stereo byte
*currentPacketPtr++ = isStereo;
// memcpy the three float positions
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
@ -638,7 +661,7 @@ void Audio::handleAudioInput() {
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation);
nodeList->writeDatagram(monoAudioDataPacket, numAudioBytes + leadingBytes, audioMixer);
nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
.updateValue(numAudioBytes + leadingBytes);
@ -761,6 +784,24 @@ void Audio::toggleAudioNoiseReduction() {
_noiseGateEnabled = !_noiseGateEnabled;
}
void Audio::toggleStereoInput() {
int oldChannelCount = _desiredInputFormat.channelCount();
QAction* stereoAudioOption = Menu::getInstance()->getActionForOption(MenuOption::StereoAudio);
if (stereoAudioOption->isChecked()) {
_desiredInputFormat.setChannelCount(2);
_isStereoInput = true;
} else {
_desiredInputFormat.setChannelCount(1);
_isStereoInput = false;
}
if (oldChannelCount != _desiredInputFormat.channelCount()) {
// change in channel count for desired input format, restart the input device
switchInputToAudioDevice(_inputAudioDeviceName);
}
}
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
_ringBuffer.parseData(audioByteArray);
@ -1300,18 +1341,21 @@ bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) {
qDebug() << "The format to be used for audio input is" << _inputFormat;
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
_audioInput->setBufferSize(_numInputCallbackBytes);
// how do we want to handle input working, but output not working?
int numFrameSamples = calculateNumberOfFrameSamples(_numInputCallbackBytes);
_inputRingBuffer.resizeForFrameSize(numFrameSamples);
_inputDevice = _audioInput->start();
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
supportedFormat = true;
// if the user wants stereo but this device can't provide then bail
if (!_isStereoInput || _inputFormat.channelCount() == 2) {
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
_audioInput->setBufferSize(_numInputCallbackBytes);
// how do we want to handle input working, but output not working?
int numFrameSamples = calculateNumberOfFrameSamples(_numInputCallbackBytes);
_inputRingBuffer.resizeForFrameSize(numFrameSamples);
_inputDevice = _audioInput->start();
connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
supportedFormat = true;
}
}
}
return supportedFormat;

View file

@ -85,6 +85,7 @@ public slots:
void toggleScope();
void toggleScopePause();
void toggleAudioSpatialProcessing();
void toggleStereoInput();
void selectAudioScopeFiveFrames();
void selectAudioScopeTwentyFrames();
void selectAudioScopeFiftyFrames();
@ -127,6 +128,7 @@ private:
QIODevice* _proceduralOutputDevice;
AudioRingBuffer _inputRingBuffer;
AudioRingBuffer _ringBuffer;
bool _isStereoInput;
QString _inputAudioDeviceName;
QString _outputAudioDeviceName;

View file

@ -432,6 +432,8 @@ Menu::Menu() :
SLOT(toggleAudioNoiseReduction()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoLocalAudio);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::StereoAudio, 0, false,
appInstance->getAudio(), SLOT(toggleStereoInput()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::MuteAudio,
Qt::CTRL | Qt::Key_M,
false,

View file

@ -402,6 +402,7 @@ namespace MenuOption {
const QString StandOnNearbyFloors = "Stand on nearby floors";
const QString Stars = "Stars";
const QString Stats = "Stats";
const QString StereoAudio = "Stereo Audio";
const QString StopAllScripts = "Stop All Scripts";
const QString SuppressShortTimings = "Suppress Timings Less than 10ms";
const QString TestPing = "Test Ping";

View file

@ -21,6 +21,11 @@ SkeletonModel::SkeletonModel(Avatar* owningAvatar) :
_owningAvatar(owningAvatar) {
}
void SkeletonModel::setJointStates(QVector<JointState> states) {
Model::setJointStates(states);
_ragDoll.init(_jointStates);
}
const float PALM_PRIORITY = 3.0f;
void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
@ -78,6 +83,21 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
applyPalmData(geometry.leftHandJointIndex, hand->getPalms()[leftPalmIndex]);
applyPalmData(geometry.rightHandJointIndex, hand->getPalms()[rightPalmIndex]);
}
simulateRagDoll(deltaTime);
}
void SkeletonModel::simulateRagDoll(float deltaTime) {
_ragDoll.slaveToSkeleton(_jointStates, 0.5f);
float MIN_CONSTRAINT_ERROR = 0.005f; // 5mm
int MAX_ITERATIONS = 4;
int iterations = 0;
float delta = 0.0f;
do {
delta = _ragDoll.enforceConstraints();
++iterations;
} while (delta > MIN_CONSTRAINT_ERROR && iterations < MAX_ITERATIONS);
}
void SkeletonModel::getHandShapes(int jointIndex, QVector<const Shape*>& shapes) const {
@ -121,6 +141,7 @@ void SkeletonModel::getBodyShapes(QVector<const Shape*>& shapes) const {
void SkeletonModel::renderIKConstraints() {
renderJointConstraints(getRightHandJointIndex());
renderJointConstraints(getLeftHandJointIndex());
renderRagDoll();
}
class IndexValue {
@ -452,3 +473,30 @@ bool SkeletonModel::getEyePositions(glm::vec3& firstEyePosition, glm::vec3& seco
return false;
}
void SkeletonModel::renderRagDoll() {
const int BALL_SUBDIVISIONS = 6;
glDisable(GL_DEPTH_TEST);
glDisable(GL_LIGHTING);
glPushMatrix();
Application::getInstance()->loadTranslatedViewMatrix(_translation);
QVector<glm::vec3> points = _ragDoll.getPoints();
int numPoints = points.size();
float alpha = 0.3f;
float radius1 = 0.008f;
float radius2 = 0.01f;
for (int i = 0; i < numPoints; ++i) {
glPushMatrix();
// draw each point as a yellow hexagon with black border
glm::vec3 position = _rotation * points[i];
glTranslatef(position.x, position.y, position.z);
glColor4f(0.0f, 0.0f, 0.0f, alpha);
glutSolidSphere(radius2, BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
glColor4f(1.0f, 1.0f, 0.0f, alpha);
glutSolidSphere(radius1, BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
glPopMatrix();
}
glPopMatrix();
glEnable(GL_DEPTH_TEST);
glEnable(GL_LIGHTING);
}

View file

@ -13,6 +13,7 @@
#define hifi_SkeletonModel_h
#include "renderer/Model.h"
#include "renderer/RagDoll.h"
class Avatar;
@ -23,8 +24,11 @@ class SkeletonModel : public Model {
public:
SkeletonModel(Avatar* owningAvatar);
void setJointStates(QVector<JointState> states);
void simulate(float deltaTime, bool fullUpdate = true);
void simulateRagDoll(float deltaTime);
/// \param jointIndex index of hand joint
/// \param shapes[out] list in which is stored pointers to hand shapes
@ -89,6 +93,7 @@ public:
/// \return whether or not both eye meshes were found
bool getEyePositions(glm::vec3& firstEyePosition, glm::vec3& secondEyePosition) const;
void renderRagDoll();
protected:
/// \param jointIndex index of joint in model
@ -114,6 +119,7 @@ private:
void setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation);
Avatar* _owningAvatar;
RagDoll _ragDoll;
};
#endif // hifi_SkeletonModel_h

View file

@ -76,20 +76,21 @@ static void setPalm(float deltaTime, int index) {
}
}
// NOTE: this math is done in the worl-frame with unecessary complexity.
// TODO: transfom this to stay in the model-frame.
glm::vec3 position;
glm::quat rotation;
SkeletonModel* skeletonModel = &Application::getInstance()->getAvatar()->getSkeletonModel();
int jointIndex;
glm::quat inverseRotation = glm::inverse(Application::getInstance()->getAvatar()->getOrientation());
if (index == LEFT_HAND_INDEX) {
jointIndex = skeletonModel->getLeftHandJointIndex();
skeletonModel->getJointRotation(jointIndex, rotation, true);
skeletonModel->getJointRotationInWorldFrame(jointIndex, rotation);
rotation = inverseRotation * rotation * glm::quat(glm::vec3(0.0f, PI_OVER_TWO, 0.0f));
} else {
jointIndex = skeletonModel->getRightHandJointIndex();
skeletonModel->getJointRotation(jointIndex, rotation, true);
skeletonModel->getJointRotationInWorldFrame(jointIndex, rotation);
rotation = inverseRotation * rotation * glm::quat(glm::vec3(0.0f, -PI_OVER_TWO, 0.0f));
}
skeletonModel->getJointPositionInWorldFrame(jointIndex, position);

View file

@ -0,0 +1,101 @@
//
// JointState.cpp
// interface/src/renderer
//
// Created by Andrzej Kapolka on 10/18/13.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <glm/gtx/norm.hpp>
//#include <GeometryUtil.h>
#include <SharedUtil.h>
#include "JointState.h"
JointState::JointState() :
_animationPriority(0.0f),
_fbxJoint(NULL) {
}
void JointState::setFBXJoint(const FBXJoint* joint) {
assert(joint != NULL);
_rotationInParentFrame = joint->rotation;
// NOTE: JointState does not own the FBXJoint to which it points.
_fbxJoint = joint;
}
void JointState::copyState(const JointState& state) {
_rotationInParentFrame = state._rotationInParentFrame;
_transform = state._transform;
_rotation = extractRotation(_transform);
_animationPriority = state._animationPriority;
// DO NOT copy _fbxJoint
}
void JointState::computeTransform(const glm::mat4& parentTransform) {
glm::quat modifiedRotation = _fbxJoint->preRotation * _rotationInParentFrame * _fbxJoint->postRotation;
glm::mat4 modifiedTransform = _fbxJoint->preTransform * glm::mat4_cast(modifiedRotation) * _fbxJoint->postTransform;
_transform = parentTransform * glm::translate(_fbxJoint->translation) * modifiedTransform;
_rotation = extractRotation(_transform);
}
glm::quat JointState::getRotationFromBindToModelFrame() const {
return _rotation * _fbxJoint->inverseBindRotation;
}
void JointState::restoreRotation(float fraction, float priority) {
assert(_fbxJoint != NULL);
if (priority == _animationPriority || _animationPriority == 0.0f) {
_rotationInParentFrame = safeMix(_rotationInParentFrame, _fbxJoint->rotation, fraction);
_animationPriority = 0.0f;
}
}
void JointState::setRotationFromBindFrame(const glm::quat& rotation, float priority) {
assert(_fbxJoint != NULL);
if (priority >= _animationPriority) {
// rotation is from bind- to model-frame
_rotationInParentFrame = _rotationInParentFrame * glm::inverse(_rotation) * rotation * glm::inverse(_fbxJoint->inverseBindRotation);
_animationPriority = priority;
}
}
void JointState::clearTransformTranslation() {
_transform[3][0] = 0.0f;
_transform[3][1] = 0.0f;
_transform[3][2] = 0.0f;
}
void JointState::setRotation(const glm::quat& rotation, bool constrain, float priority) {
applyRotationDelta(rotation * glm::inverse(_rotation), true, priority);
}
void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) {
// NOTE: delta is in jointParent-frame
assert(_fbxJoint != NULL);
if (priority < _animationPriority) {
return;
}
_animationPriority = priority;
if (!constrain || (_fbxJoint->rotationMin == glm::vec3(-PI, -PI, -PI) &&
_fbxJoint->rotationMax == glm::vec3(PI, PI, PI))) {
// no constraints
_rotationInParentFrame = _rotationInParentFrame * glm::inverse(_rotation) * delta * _rotation;
_rotation = delta * _rotation;
return;
}
glm::quat targetRotation = delta * _rotation;
glm::vec3 eulers = safeEulerAngles(_rotationInParentFrame * glm::inverse(_rotation) * targetRotation);
glm::quat newRotation = glm::quat(glm::clamp(eulers, _fbxJoint->rotationMin, _fbxJoint->rotationMax));
_rotation = _rotation * glm::inverse(_rotationInParentFrame) * newRotation;
_rotationInParentFrame = newRotation;
}
const glm::vec3& JointState::getDefaultTranslationInParentFrame() const {
assert(_fbxJoint != NULL);
return _fbxJoint->translation;
}

View file

@ -0,0 +1,66 @@
//
// JointState.h
// interface/src/renderer
//
// Created by Andrzej Kapolka on 10/18/13.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_JointState_h
#define hifi_JointState_h
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <glm/gtx/transform.hpp>
#include <FBXReader.h>
class JointState {
public:
JointState();
void setFBXJoint(const FBXJoint* joint);
const FBXJoint& getFBXJoint() const { return *_fbxJoint; }
void copyState(const JointState& state);
void computeTransform(const glm::mat4& parentTransform);
const glm::mat4& getTransform() const { return _transform; }
glm::quat getRotation() const { return _rotation; }
glm::vec3 getPosition() const { return extractTranslation(_transform); }
/// \return rotation from bind to model frame
glm::quat getRotationFromBindToModelFrame() const;
/// \param rotation rotation of joint in model-frame
void setRotation(const glm::quat& rotation, bool constrain, float priority);
/// \param delta is in the jointParent-frame
void applyRotationDelta(const glm::quat& delta, bool constrain = true, float priority = 1.0f);
const glm::vec3& getDefaultTranslationInParentFrame() const;
void restoreRotation(float fraction, float priority);
/// \param rotation is from bind- to model-frame
/// computes and sets new _rotationInParentFrame
/// NOTE: the JointState's model-frame transform/rotation are NOT updated!
void setRotationFromBindFrame(const glm::quat& rotation, float priority);
void clearTransformTranslation();
glm::quat _rotationInParentFrame; // joint- to parentJoint-frame
float _animationPriority; // the priority of the animation affecting this joint
private:
glm::mat4 _transform; // joint- to model-frame
glm::quat _rotation; // joint- to model-frame
const FBXJoint* _fbxJoint; // JointState does NOT own its FBXJoint
};
#endif // hifi_JointState_h

View file

@ -510,12 +510,12 @@ bool Model::updateGeometry() {
deleteGeometry();
_dilatedTextures.clear();
_geometry = geometry;
_jointStates = newJointStates;
setJointStates(newJointStates);
needToRebuild = true;
} else if (_jointStates.isEmpty()) {
const FBXGeometry& fbxGeometry = geometry->getFBXGeometry();
if (fbxGeometry.joints.size() > 0) {
_jointStates = createJointStates(fbxGeometry);
setJointStates(createJointStates(fbxGeometry));
needToRebuild = true;
}
} else if (!geometry->isLoaded()) {
@ -557,6 +557,11 @@ bool Model::updateGeometry() {
return needFullUpdate;
}
// virtual
void Model::setJointStates(QVector<JointState> states) {
_jointStates = states;
}
bool Model::render(float alpha, RenderMode mode, bool receiveShadows) {
// render the attachments
foreach (Model* attachment, _attachments) {
@ -1974,89 +1979,3 @@ void AnimationHandle::replaceMatchingPriorities(float newPriority) {
}
}
// ----------------------------------------------------------------------------
// JointState TODO: move this class to its own files
// ----------------------------------------------------------------------------
JointState::JointState() :
_animationPriority(0.0f),
_fbxJoint(NULL) {
}
void JointState::setFBXJoint(const FBXJoint* joint) {
assert(joint != NULL);
_rotationInParentFrame = joint->rotation;
// NOTE: JointState does not own the FBXJoint to which it points.
_fbxJoint = joint;
}
void JointState::copyState(const JointState& state) {
_rotationInParentFrame = state._rotationInParentFrame;
_transform = state._transform;
_rotation = extractRotation(_transform);
_animationPriority = state._animationPriority;
// DO NOT copy _fbxJoint
}
void JointState::computeTransform(const glm::mat4& parentTransform) {
glm::quat modifiedRotation = _fbxJoint->preRotation * _rotationInParentFrame * _fbxJoint->postRotation;
glm::mat4 modifiedTransform = _fbxJoint->preTransform * glm::mat4_cast(modifiedRotation) * _fbxJoint->postTransform;
_transform = parentTransform * glm::translate(_fbxJoint->translation) * modifiedTransform;
_rotation = extractRotation(_transform);
}
glm::quat JointState::getRotationFromBindToModelFrame() const {
return _rotation * _fbxJoint->inverseBindRotation;
}
void JointState::restoreRotation(float fraction, float priority) {
assert(_fbxJoint != NULL);
if (priority == _animationPriority || _animationPriority == 0.0f) {
_rotationInParentFrame = safeMix(_rotationInParentFrame, _fbxJoint->rotation, fraction);
_animationPriority = 0.0f;
}
}
void JointState::setRotationFromBindFrame(const glm::quat& rotation, float priority) {
assert(_fbxJoint != NULL);
if (priority >= _animationPriority) {
// rotation is from bind- to model-frame
_rotationInParentFrame = _rotationInParentFrame * glm::inverse(_rotation) * rotation * glm::inverse(_fbxJoint->inverseBindRotation);
_animationPriority = priority;
}
}
void JointState::clearTransformTranslation() {
_transform[3][0] = 0.0f;
_transform[3][1] = 0.0f;
_transform[3][2] = 0.0f;
}
void JointState::setRotation(const glm::quat& rotation, bool constrain, float priority) {
applyRotationDelta(rotation * glm::inverse(_rotation), true, priority);
}
void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) {
// NOTE: delta is in jointParent-frame
assert(_fbxJoint != NULL);
if (priority < _animationPriority) {
return;
}
_animationPriority = priority;
if (!constrain || (_fbxJoint->rotationMin == glm::vec3(-PI, -PI, -PI) &&
_fbxJoint->rotationMax == glm::vec3(PI, PI, PI))) {
// no constraints
_rotationInParentFrame = _rotationInParentFrame * glm::inverse(_rotation) * delta * _rotation;
_rotation = delta * _rotation;
return;
}
glm::quat targetRotation = delta * _rotation;
glm::vec3 eulers = safeEulerAngles(_rotationInParentFrame * glm::inverse(_rotation) * targetRotation);
glm::quat newRotation = glm::quat(glm::clamp(eulers, _fbxJoint->rotationMin, _fbxJoint->rotationMax));
_rotation = _rotation * glm::inverse(_rotationInParentFrame) * newRotation;
_rotationInParentFrame = newRotation;
}
const glm::vec3& JointState::getDefaultTranslationInParentFrame() const {
assert(_fbxJoint != NULL);
return _fbxJoint->translation;
}

View file

@ -22,6 +22,7 @@
#include "GeometryCache.h"
#include "InterfaceConfig.h"
#include "JointState.h"
#include "ProgramObject.h"
#include "TextureCache.h"
@ -30,51 +31,6 @@ class Shape;
typedef QSharedPointer<AnimationHandle> AnimationHandlePointer;
typedef QWeakPointer<AnimationHandle> WeakAnimationHandlePointer;
class JointState {
public:
JointState();
void setFBXJoint(const FBXJoint* joint);
const FBXJoint& getFBXJoint() const { return *_fbxJoint; }
void copyState(const JointState& state);
void computeTransform(const glm::mat4& parentTransform);
const glm::mat4& getTransform() const { return _transform; }
glm::quat getRotation() const { return _rotation; }
glm::vec3 getPosition() const { return extractTranslation(_transform); }
/// \return rotation from bind to model frame
glm::quat getRotationFromBindToModelFrame() const;
/// \param rotation rotation of joint in model-frame
void setRotation(const glm::quat& rotation, bool constrain, float priority);
/// \param delta is in the jointParent-frame
void applyRotationDelta(const glm::quat& delta, bool constrain = true, float priority = 1.0f);
const glm::vec3& getDefaultTranslationInParentFrame() const;
void restoreRotation(float fraction, float priority);
/// \param rotation is from bind- to model-frame
/// computes and sets new _rotationInParentFrame
/// NOTE: the JointState's model-frame transform/rotation are NOT updated!
void setRotationFromBindFrame(const glm::quat& rotation, float priority);
void clearTransformTranslation();
glm::quat _rotationInParentFrame; // joint- to parentJoint-frame
float _animationPriority; // the priority of the animation affecting this joint
private:
glm::mat4 _transform; // joint- to model-frame
glm::quat _rotation; // joint- to model-frame
const FBXJoint* _fbxJoint; // JointState does NOT own its FBXJoint
};
/// A generic 3D model displaying geometry loaded from a URL.
class Model : public QObject {
@ -250,6 +206,8 @@ protected:
// returns 'true' if needs fullUpdate after geometry change
bool updateGeometry();
virtual void setJointStates(QVector<JointState> states);
void setScaleInternal(const glm::vec3& scale);
void scaleToFit();

View file

@ -0,0 +1,131 @@
//
// RagDoll.cpp
// interface/src/avatar
//
// Created by Andrew Meadows 2014.05.30
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <glm/gtx/transform.hpp>
#include <CollisionInfo.h>
#include <SharedUtil.h>
#include "RagDoll.h"
// ----------------------------------------------------------------------------
// FixedConstraint
// ----------------------------------------------------------------------------
FixedConstraint::FixedConstraint() : _point(NULL), _anchor(0.0f, 0.0f, 0.0f) {
}
float FixedConstraint::enforce() {
assert(_point != NULL);
float distance = glm::distance(_anchor, *_point);
*_point = _anchor;
return distance;
}
void FixedConstraint::setPoint(glm::vec3* point) {
_point = point;
}
void FixedConstraint::setAnchor(const glm::vec3& anchor) {
_anchor = anchor;
}
// ----------------------------------------------------------------------------
// DistanceConstraint
// ----------------------------------------------------------------------------
DistanceConstraint::DistanceConstraint(glm::vec3* pointA, glm::vec3* pointB) : _distance(-1.0f) {
_points[0] = pointA;
_points[1] = pointB;
_distance = glm::distance(*(_points[0]), *(_points[1]));
}
DistanceConstraint::DistanceConstraint(const DistanceConstraint& other) {
_distance = other._distance;
_points[0] = other._points[0];
_points[1] = other._points[1];
}
void DistanceConstraint::setDistance(float distance) {
_distance = fabsf(distance);
}
float DistanceConstraint::enforce() {
float newDistance = glm::distance(*(_points[0]), *(_points[1]));
glm::vec3 direction(0.0f, 1.0f, 0.0f);
if (newDistance > EPSILON) {
direction = (*(_points[0]) - *(_points[1])) / newDistance;
}
glm::vec3 center = 0.5f * (*(_points[0]) + *(_points[1]));
*(_points[0]) = center + (0.5f * _distance) * direction;
*(_points[1]) = center - (0.5f * _distance) * direction;
return glm::abs(newDistance - _distance);
}
// ----------------------------------------------------------------------------
// RagDoll
// ----------------------------------------------------------------------------
RagDoll::RagDoll() {
}
RagDoll::~RagDoll() {
clear();
}
void RagDoll::init(const QVector<JointState>& states) {
clear();
const int numStates = states.size();
_points.reserve(numStates);
for (int i = 0; i < numStates; ++i) {
const JointState& state = states[i];
_points.push_back(state.getPosition());
int parentIndex = state.getFBXJoint().parentIndex;
assert(parentIndex < i);
if (parentIndex != -1) {
DistanceConstraint* stick = new DistanceConstraint(&(_points[i]), &(_points[parentIndex]));
_constraints.push_back(stick);
}
}
}
/// Delete all data.
void RagDoll::clear() {
int numConstraints = _constraints.size();
for (int i = 0; i < numConstraints; ++i) {
delete _constraints[i];
}
_constraints.clear();
_points.clear();
}
float RagDoll::slaveToSkeleton(const QVector<JointState>& states, float fraction) {
const int numStates = states.size();
assert(numStates == _points.size());
fraction = glm::clamp(fraction, 0.0f, 1.0f);
float maxDistance = 0.0f;
for (int i = 0; i < numStates; ++i) {
glm::vec3 oldPoint = _points[i];
_points[i] = (1.0f - fraction) * _points[i] + fraction * states[i].getPosition();
maxDistance = glm::max(maxDistance, glm::distance(oldPoint, _points[i]));
}
return maxDistance;
}
float RagDoll::enforceConstraints() {
float maxDistance = 0.0f;
const int numConstraints = _constraints.size();
for (int i = 0; i < numConstraints; ++i) {
DistanceConstraint* c = static_cast<DistanceConstraint*>(_constraints[i]);
//maxDistance = glm::max(maxDistance, _constraints[i]->enforce());
maxDistance = glm::max(maxDistance, c->enforce());
}
return maxDistance;
}

View file

@ -0,0 +1,78 @@
//
// RagDoll.h
// interface/src/avatar
//
// Created by Andrew Meadows 2014.05.30
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_RagDoll_h
#define hifi_RagDoll_h
#include "renderer/Model.h"
class Constraint {
public:
Constraint() {}
virtual ~Constraint() {}
/// Enforce contraint by moving relevant points.
/// \return max distance of point movement
virtual float enforce() = 0;
};
class FixedConstraint : public Constraint {
public:
FixedConstraint();
float enforce();
void setPoint(glm::vec3* point);
void setAnchor(const glm::vec3& anchor);
private:
glm::vec3* _point;
glm::vec3 _anchor;
};
class DistanceConstraint : public Constraint {
public:
DistanceConstraint(glm::vec3* pointA, glm::vec3* pointB);
DistanceConstraint(const DistanceConstraint& other);
float enforce();
void setDistance(float distance);
private:
float _distance;
glm::vec3* _points[2];
};
class RagDoll {
public:
RagDoll();
virtual ~RagDoll();
/// Create points and constraints based on topology of collection of joints
/// \param joints list of connected joint states
void init(const QVector<JointState>& states);
/// Delete all data.
void clear();
/// \param states list of joint states
/// \param fraction range from 0.0 (no movement) to 1.0 (use joint locations)
/// \return max distance of point movement
float slaveToSkeleton(const QVector<JointState>& states, float fraction);
/// Enforce contraints.
/// \return max distance of point movement
float enforceConstraints();
const QVector<glm::vec3>& getPoints() const { return _points; }
private:
QVector<Constraint*> _constraints;
QVector<glm::vec3> _points;
};
#endif // hifi_RagDoll_h

View file

@ -22,7 +22,8 @@
ApplicationOverlay::ApplicationOverlay() :
_framebufferObject(NULL),
_oculusAngle(65.0f * RADIANS_PER_DEGREE),
_distance(0.5f) {
_distance(0.5f),
_uiType(HEMISPHERE) {
}
@ -305,6 +306,8 @@ inline float min(float a, float b) {
return (a < b) ? a : b;
}
const float textureFov = PI / 2.5f;
// Draws the FBO texture for Oculus rift. TODO: Draw a curved texture instead of plane.
void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
@ -316,8 +319,8 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
int mouseX = application->getMouseX();
int mouseY = application->getMouseY();
int widgetWidth = glWidget->width();
int widgetHeight = glWidget->height();
const int widgetWidth = glWidget->width();
const int widgetHeight = glWidget->height();
float magnifyWidth = 80.0f;
float magnifyHeight = 60.0f;
const float magnification = 4.0f;
@ -326,17 +329,22 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
const float halfVerticalAngle = _oculusAngle / 2.0f;
const float overlayAspectRatio = glWidget->width() / (float)glWidget->height();
const float halfOverlayHeight = _distance * tan(halfVerticalAngle);
const float overlayHeight = halfOverlayHeight * 2.0f;
// The more vertices, the better the curve
const int numHorizontalVertices = 20;
const int numVerticalVertices = 20;
// U texture coordinate width at each quad
const float quadTexWidth = 1.0f / (numHorizontalVertices - 1);
const float quadTexHeight = 1.0f / (numVerticalVertices - 1);
// Get horizontal angle and angle increment from vertical angle and aspect ratio
const float horizontalAngle = halfVerticalAngle * 2.0f * overlayAspectRatio;
const float angleIncrement = horizontalAngle / (numHorizontalVertices - 1);
const float halfHorizontalAngle = horizontalAngle / 2;
const float verticalAngleIncrement = _oculusAngle / (numVerticalVertices - 1);
glActiveTexture(GL_TEXTURE0);
glEnable(GL_BLEND);
@ -390,6 +398,8 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
magnifyHeight = widgetHeight - mouseY;
}
const float halfMagnifyHeight = magnifyHeight / 2.0f;
float newWidth = magnifyWidth * magnification;
float newHeight = magnifyHeight * magnification;
@ -406,53 +416,118 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
// Get angle on the UI
float leftAngle = (newMouseX / (float)widgetWidth) * horizontalAngle - halfHorizontalAngle;
float rightAngle = ((newMouseX + newWidth) / (float)widgetWidth) * horizontalAngle - halfHorizontalAngle;
float leftX, rightX, leftZ, rightZ;
float bottomAngle = (newMouseY / (float)widgetHeight) * _oculusAngle - halfVerticalAngle;
float topAngle = ((newMouseY - newHeight) / (float)widgetHeight) * _oculusAngle - halfVerticalAngle;
float leftX, rightX, leftZ, rightZ, topZ, bottomZ;
// Get position on hemisphere using angle
leftX = sin(leftAngle) * _distance;
rightX = sin(rightAngle) * _distance;
leftZ = -cos(leftAngle) * _distance;
rightZ = -cos(rightAngle) * _distance;
float bottomY = (1.0 - newMouseY / (float)widgetHeight) * halfOverlayHeight * 2.0f - halfOverlayHeight;
float topY = bottomY + (newHeight / widgetHeight) * halfOverlayHeight * 2;
if (_uiType == HEMISPHERE) {
//TODO: Remove immediate mode in favor of VBO
glBegin(GL_QUADS);
//Get new UV coordinates from our magnification window
float newULeft = newMouseX / widgetWidth;
float newURight = (newMouseX + newWidth) / widgetWidth;
float newVBottom = 1.0 - newMouseY / widgetHeight;
float newVTop = 1.0 - (newMouseY - newHeight) / widgetHeight;
glTexCoord2f(magnifyULeft, magnifyVBottom); glVertex3f(leftX, topY, leftZ);
glTexCoord2f(magnifyURight, magnifyVBottom); glVertex3f(rightX, topY, rightZ);
glTexCoord2f(magnifyURight, magnifyVTop); glVertex3f(rightX, bottomY, rightZ);
glTexCoord2f(magnifyULeft, magnifyVTop); glVertex3f(leftX, bottomY, leftZ);
// Project our position onto the hemisphere using the UV coordinates
float lX = sin((newULeft - 0.5f) * textureFov);
float rX = sin((newURight - 0.5f) * textureFov);
float bY = sin((newVBottom - 0.5f) * textureFov);
float tY = sin((newVTop - 0.5f) * textureFov);
float dist;
//Bottom Left
dist = sqrt(lX * lX + bY * bY);
float blZ = sqrt(1.0f - dist * dist);
//Top Left
dist = sqrt(lX * lX + tY * tY);
float tlZ = sqrt(1.0f - dist * dist);
//Bottom Right
dist = sqrt(rX * rX + bY * bY);
float brZ = sqrt(1.0f - dist * dist);
//Top Right
dist = sqrt(rX * rX + tY * tY);
float trZ = sqrt(1.0f - dist * dist);
glEnd();
glBegin(GL_QUADS);
glTexCoord2f(magnifyULeft, magnifyVBottom); glVertex3f(lX, tY, -tlZ);
glTexCoord2f(magnifyURight, magnifyVBottom); glVertex3f(rX, tY, -trZ);
glTexCoord2f(magnifyURight, magnifyVTop); glVertex3f(rX, bY, -brZ);
glTexCoord2f(magnifyULeft, magnifyVTop); glVertex3f(lX, bY, -blZ);
glEnd();
} else {
leftX = sin(leftAngle) * _distance;
rightX = sin(rightAngle) * _distance;
leftZ = -cos(leftAngle) * _distance;
rightZ = -cos(rightAngle) * _distance;
if (_uiType == CURVED_SEMICIRCLE) {
topZ = -cos(topAngle * overlayAspectRatio) * _distance;
bottomZ = -cos(bottomAngle * overlayAspectRatio) * _distance;
} else {
// Dont want to use topZ or bottomZ for SEMICIRCLE
topZ = -99999;
bottomZ = -99999;
}
float bottomY = (1.0 - newMouseY / (float)widgetHeight) * halfOverlayHeight * 2.0f - halfOverlayHeight;
float topY = bottomY + (newHeight / widgetHeight) * halfOverlayHeight * 2;
//TODO: Remove immediate mode in favor of VBO
glBegin(GL_QUADS);
glTexCoord2f(magnifyULeft, magnifyVBottom); glVertex3f(leftX, topY, max(topZ, leftZ));
glTexCoord2f(magnifyURight, magnifyVBottom); glVertex3f(rightX, topY, max(topZ, rightZ));
glTexCoord2f(magnifyURight, magnifyVTop); glVertex3f(rightX, bottomY, max(bottomZ, rightZ));
glTexCoord2f(magnifyULeft, magnifyVTop); glVertex3f(leftX, bottomY, max(bottomZ, leftZ));
glEnd();
}
glDepthMask(GL_FALSE);
glDisable(GL_ALPHA_TEST);
//TODO: Remove immediate mode in favor of VBO
glBegin(GL_QUADS);
// Place the vertices in a semicircle curve around the camera
for (int i = 0; i < numHorizontalVertices-1; i++) {
if (_uiType == HEMISPHERE) {
renderTexturedHemisphere();
} else{
glBegin(GL_QUADS);
// Place the vertices in a semicircle curve around the camera
for (int i = 0; i < numHorizontalVertices - 1; i++) {
for (int j = 0; j < numVerticalVertices - 1; j++) {
// Calculate the X and Z coordinates from the angles and radius from camera
leftX = sin(angleIncrement * i - halfHorizontalAngle) * _distance;
rightX = sin(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
leftZ = -cos(angleIncrement * i - halfHorizontalAngle) * _distance;
rightZ = -cos(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
// Calculate the X and Z coordinates from the angles and radius from camera
leftX = sin(angleIncrement * i - halfHorizontalAngle) * _distance;
rightX = sin(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
leftZ = -cos(angleIncrement * i - halfHorizontalAngle) * _distance;
rightZ = -cos(angleIncrement * (i + 1) - halfHorizontalAngle) * _distance;
if (_uiType == 2) {
topZ = -cos((verticalAngleIncrement * (j + 1) - halfVerticalAngle) * overlayAspectRatio) * _distance;
bottomZ = -cos((verticalAngleIncrement * j - halfVerticalAngle) * overlayAspectRatio) * _distance;
} else {
topZ = -99999;
bottomZ = -99999;
}
glTexCoord2f(quadTexWidth * i, 1); glVertex3f(leftX, halfOverlayHeight, leftZ);
glTexCoord2f(quadTexWidth * (i + 1), 1); glVertex3f(rightX, halfOverlayHeight, rightZ);
glTexCoord2f(quadTexWidth * (i + 1), 0); glVertex3f(rightX, -halfOverlayHeight, rightZ);
glTexCoord2f(quadTexWidth * i, 0); glVertex3f(leftX, -halfOverlayHeight, leftZ);
glTexCoord2f(quadTexWidth * i, (j + 1) * quadTexHeight);
glVertex3f(leftX, (j + 1) * quadTexHeight * overlayHeight - halfOverlayHeight, max(topZ, leftZ));
glTexCoord2f(quadTexWidth * (i + 1), (j + 1) * quadTexHeight);
glVertex3f(rightX, (j + 1) * quadTexHeight * overlayHeight - halfOverlayHeight, max(topZ, rightZ));
glTexCoord2f(quadTexWidth * (i + 1), j * quadTexHeight);
glVertex3f(rightX, j * quadTexHeight * overlayHeight - halfOverlayHeight, max(bottomZ, rightZ));
glTexCoord2f(quadTexWidth * i, j * quadTexHeight);
glVertex3f(leftX, j * quadTexHeight * overlayHeight - halfOverlayHeight, max(bottomZ, leftZ));
}
}
glEnd();
}
glEnd();
glPopMatrix();
glDepthMask(GL_TRUE);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
@ -462,13 +537,106 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
}
void ApplicationOverlay::renderTexturedHemisphere() {
const int slices = 80;
const int stacks = 80;
static VerticesIndices vbo(0, 0);
int vertices = slices * (stacks - 1) + 1;
int indices = slices * 2 * 3 * (stacks - 2) + slices * 3;
if (vbo.first == 0) {
TextureVertex* vertexData = new TextureVertex[vertices];
TextureVertex* vertex = vertexData;
for (int i = 0; i < stacks - 1; i++) {
float phi = PI_OVER_TWO * (float)i / (float)(stacks - 1);
float z = -sinf(phi), radius = cosf(phi);
for (int j = 0; j < slices; j++) {
float theta = TWO_PI * (float)j / (float)slices;
vertex->position.x = sinf(theta) * radius;
vertex->position.y = cosf(theta) * radius;
vertex->position.z = z;
vertex->uv.x = asin(vertex->position.x) / (textureFov) + 0.5f;
vertex->uv.y = asin(vertex->position.y) / (textureFov) + 0.5f;
vertex++;
}
}
vertex->position.x = 0.0f;
vertex->position.y = 0.0f;
vertex->position.z = -1.0f;
vertex->uv.x = 0.5f;
vertex->uv.y = 0.5f;
vertex++;
glGenBuffers(1, &vbo.first);
glBindBuffer(GL_ARRAY_BUFFER, vbo.first);
const int BYTES_PER_VERTEX = sizeof(TextureVertex);
glBufferData(GL_ARRAY_BUFFER, vertices * BYTES_PER_VERTEX, vertexData, GL_STATIC_DRAW);
delete[] vertexData;
GLushort* indexData = new GLushort[indices];
GLushort* index = indexData;
for (int i = 0; i < stacks - 2; i++) {
GLushort bottom = i * slices;
GLushort top = bottom + slices;
for (int j = 0; j < slices; j++) {
int next = (j + 1) % slices;
*(index++) = bottom + j;
*(index++) = top + next;
*(index++) = top + j;
*(index++) = bottom + j;
*(index++) = bottom + next;
*(index++) = top + next;
}
}
GLushort bottom = (stacks - 2) * slices;
GLushort top = bottom + slices;
for (int i = 0; i < slices; i++) {
*(index++) = bottom + i;
*(index++) = bottom + (i + 1) % slices;
*(index++) = top;
}
glGenBuffers(1, &vbo.second);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo.second);
const int BYTES_PER_INDEX = sizeof(GLushort);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices * BYTES_PER_INDEX, indexData, GL_STATIC_DRAW);
delete[] indexData;
} else {
glBindBuffer(GL_ARRAY_BUFFER, vbo.first);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo.second);
}
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(TextureVertex), (void*)0);
glTexCoordPointer(2, GL_FLOAT, sizeof(TextureVertex), (void*)12);
glDrawRangeElements(GL_TRIANGLES, 0, vertices - 1, indices, GL_UNSIGNED_SHORT, 0);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
QOpenGLFramebufferObject* ApplicationOverlay::getFramebufferObject() {
if (!_framebufferObject) {
_framebufferObject = new QOpenGLFramebufferObject(Application::getInstance()->getGLWidget()->size());
glBindTexture(GL_TEXTURE_2D, _framebufferObject->texture());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
GLfloat borderColor[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
glBindTexture(GL_TEXTURE_2D, 0);
}
return _framebufferObject;

View file

@ -19,6 +19,8 @@ class QOpenGLFramebufferObject;
class ApplicationOverlay {
public:
enum UIType { HEMISPHERE, SEMICIRCLE, CURVED_SEMICIRCLE };
ApplicationOverlay();
~ApplicationOverlay();
@ -32,14 +34,24 @@ public:
// Setters
void setOculusAngle(float oculusAngle) { _oculusAngle = oculusAngle; }
void setUIType(UIType uiType) { _uiType = uiType; }
private:
// Interleaved vertex data
struct TextureVertex {
glm::vec3 position;
glm::vec2 uv;
};
typedef QPair<GLuint, GLuint> VerticesIndices;
void renderTexturedHemisphere();
ProgramObject _textureProgram;
QOpenGLFramebufferObject* _framebufferObject;
float _trailingAudioLoudness;
float _oculusAngle;
float _distance;
UIType _uiType;
};
#endif // hifi_ApplicationOverlay_h

View file

@ -20,14 +20,15 @@
#include "PositionalAudioRingBuffer.h"
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type) :
AudioRingBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo) :
AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
_type(type),
_position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
_willBeAddedToMix(false),
_shouldLoopbackForNode(false),
_shouldOutputStarveDebug(true)
_shouldOutputStarveDebug(true),
_isStereo(isStereo)
{
}
@ -40,6 +41,9 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
// skip the packet header (includes the source UUID)
int readBytes = numBytesForPacketHeader(packet);
// hop over the channel flag that has already been read in AudioMixerClientData
readBytes += sizeof(quint8);
// read the positional data
readBytes += parsePositionalData(packet.mid(readBytes));
if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) {

View file

@ -24,7 +24,7 @@ public:
Injector
};
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type);
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false);
~PositionalAudioRingBuffer();
int parseData(const QByteArray& packet);
@ -41,6 +41,8 @@ public:
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; }
PositionalAudioRingBuffer::Type getType() const { return _type; }
const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; }
@ -56,6 +58,7 @@ protected:
bool _willBeAddedToMix;
bool _shouldLoopbackForNode;
bool _shouldOutputStarveDebug;
bool _isStereo;
float _nextOutputTrailingLoudness;
};

View file

@ -92,13 +92,14 @@ int Bitstream::registerMetaObject(const char* className, const QMetaObject* meta
}
// register the streamers for all enumerators
for (int i = 0; i < metaObject->enumeratorCount(); i++) {
QMetaEnum metaEnum = metaObject->enumerator(i);
const TypeStreamer*& streamer = getEnumStreamers()[QPair<QByteArray, QByteArray>(metaEnum.scope(), metaEnum.name())];
if (!streamer) {
getEnumStreamersByName().insert(getEnumName(metaEnum), streamer = new EnumTypeStreamer(metaEnum));
}
}
// temporarily disabled: crashes on Windows
//for (int i = 0; i < metaObject->enumeratorCount(); i++) {
// QMetaEnum metaEnum = metaObject->enumerator(i);
// const TypeStreamer*& streamer = getEnumStreamers()[QPair<QByteArray, QByteArray>(metaEnum.scope(), metaEnum.name())];
// if (!streamer) {
// getEnumStreamersByName().insert(getEnumName(metaEnum), streamer = new EnumTypeStreamer(metaEnum));
// }
//}
return 0;
}

View file

@ -47,6 +47,10 @@ int packArithmeticallyCodedValue(int value, char* destination) {
PacketVersion versionForPacketType(PacketType type) {
switch (type) {
case PacketTypeMicrophoneAudioNoEcho:
case PacketTypeMicrophoneAudioWithEcho:
case PacketTypeSilentAudioFrame:
return 1;
case PacketTypeAvatarData:
return 3;
case PacketTypeAvatarIdentity: