merge upstream/master into andrew/ragdoll

This commit is contained in:
Andrew Meadows 2014-08-07 14:37:10 -07:00
commit bb33266635
43 changed files with 1119 additions and 309 deletions

View file

@ -104,7 +104,7 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, A
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
PositionalAudioStream* stream = i.value();
if (stream->popFrames(1)) {
if (stream->popFrames(1, true) > 0) {
// this is a ring buffer that is ready to go
// calculate the trailing avg loudness for the next frame

View file

@ -30,7 +30,7 @@ if (APPLE)
find_library(VISAGE_CORE_LIBRARY NAME vscore PATH_SUFFIXES lib HINTS ${VISAGE_SEARCH_DIRS})
find_library(VISAGE_VISION_LIBRARY NAME vsvision PATH_SUFFIXES lib HINTS ${VISAGE_SEARCH_DIRS})
find_library(VISAGE_OPENCV_LIBRARY NAME OpenCV PATH_SUFFIXES dependencies/OpenCV_MacOSX/lib ${VISAGE_SEARCH_DIRS})
find_library(VISAGE_OPENCV_LIBRARY NAME OpenCV PATH_SUFFIXES dependencies/OpenCV_MacOSX/lib HINTS ${VISAGE_SEARCH_DIRS})
elseif (WIN32)
find_path(VISAGE_XML_INCLUDE_DIR libxml/xmlreader.h PATH_SUFFIXES dependencies/libxml2/include HINTS ${VISAGE_SEARCH_DIRS})

View file

@ -752,7 +752,7 @@ function calculateVoxelFromIntersection(intersection, operation) {
highlightAt.z = z + zFightingSizeAdjust;
voxelSize -= 2 * zFightingSizeAdjust;
if (wantAddAdjust) {
resultVoxel.y += voxelSize;
resultVoxel.y += resultVoxel.s;
}
resultVoxel.bottomRight = {x: highlightAt.x, y: highlightAt.y, z: highlightAt.z };
@ -782,7 +782,7 @@ function calculateVoxelFromIntersection(intersection, operation) {
highlightAt.z = z + voxelSize + zFightingSizeAdjust;
voxelSize -= 2 * zFightingSizeAdjust;
if (wantAddAdjust) {
resultVoxel.z += voxelSize;
resultVoxel.z += resultVoxel.s;
}
resultVoxel.bottomLeft = {x: highlightAt.x, y: highlightAt.y, z: highlightAt.z };

View file

@ -89,7 +89,12 @@ var sittingDownAnimation = function(deltaTime) {
var pos = { x: startPosition.x - 0.3 * factor, y: startPosition.y - 0.5 * factor, z: startPosition.z};
MyAvatar.position = pos;
}
} else {
Script.update.disconnect(sittingDownAnimation);
if (seat.model) {
MyAvatar.setModelReferential(seat.model.id);
}
}
}
var standingUpAnimation = function(deltaTime) {
@ -103,7 +108,10 @@ var standingUpAnimation = function(deltaTime) {
var pos = { x: startPosition.x + 0.3 * (passedTime/animationLenght), y: startPosition.y + 0.5 * (passedTime/animationLenght), z: startPosition.z};
MyAvatar.position = pos;
}
} else {
Script.update.disconnect(standingUpAnimation);
}
}
var goToSeatAnimation = function(deltaTime) {
@ -147,7 +155,8 @@ function standUp() {
print("standUp sitting status: " + Settings.getValue(sittingSettingsHandle, false));
passedTime = 0.0;
startPosition = MyAvatar.position;
try{
MyAvatar.clearReferential();
try{
Script.update.disconnect(sittingDownAnimation);
} catch (e){}
Script.update.connect(standingUpAnimation);
@ -197,8 +206,10 @@ Controller.mousePressEvent.connect(function(event) {
var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y});
if (clickedOverlay == sitDownButton) {
seat.model = null;
sitDown();
} else if (clickedOverlay == standUpButton) {
seat.model = null;
standUp();
} else {
var pickRay = Camera.computePickRay(event.x, event.y);
@ -214,6 +225,7 @@ Controller.mousePressEvent.connect(function(event) {
model.properties.sittingPoints[i].indicator.position,
model.properties.sittingPoints[i].indicator.scale / 2)) {
clickedOnSeat = true;
seat.model = model;
seat.position = model.properties.sittingPoints[i].indicator.position;
seat.rotation = model.properties.sittingPoints[i].indicator.orientation;
}
@ -355,6 +367,7 @@ Script.update.connect(update);
Controller.keyPressEvent.connect(keyPressEvent);
Script.scriptEnding.connect(function() {
MyAvatar.clearReferential();
for (var i = 0; i < pose.length; i++){
MyAvatar.clearJointData(pose[i].joint);
}

View file

@ -14,18 +14,26 @@
// the height texture
uniform sampler2D heightMap;
// the distance between height points in texture space
uniform float heightScale;
// the interpolated normal
varying vec4 normal;
void main(void) {
// transform and store the normal for interpolation
normal = normalize(gl_ModelViewMatrix * vec4(0.0, 1.0, 0.0, 0.0));
vec2 heightCoord = gl_MultiTexCoord0.st;
float deltaX = texture2D(heightMap, heightCoord - vec2(heightScale, 0.0)).r -
texture2D(heightMap, heightCoord + vec2(heightScale, 0.0)).r;
float deltaZ = texture2D(heightMap, heightCoord - vec2(0.0, heightScale)).r -
texture2D(heightMap, heightCoord + vec2(0.0, heightScale)).r;
normal = normalize(gl_ModelViewMatrix * vec4(deltaX, heightScale, deltaZ, 0.0));
// pass along the texture coordinates
gl_TexCoord[0] = gl_MultiTexCoord0;
// add the height to the position
float height = texture2D(heightMap, gl_MultiTexCoord0.st).r;
float height = texture2D(heightMap, heightCoord).r;
gl_Position = gl_ModelViewProjectionMatrix * (gl_Vertex + vec4(0.0, height, 0.0, 0.0));
// the zero height should be invisible

View file

@ -2139,12 +2139,6 @@ void Application::update(float deltaTime) {
_prioVR.update(deltaTime);
}
{
PerformanceTimer perfTimer("myAvatar");
updateMyAvatarLookAtPosition();
updateMyAvatar(deltaTime); // Sample hardware, update view frustum if needed, and send avatar data to mixer/nodes
}
// Dispatch input events
_controllerScriptingInterface.updateInputControllers();
@ -2176,6 +2170,12 @@ void Application::update(float deltaTime) {
PerformanceTimer perfTimer("overlays");
_overlays.update(deltaTime);
}
{
PerformanceTimer perfTimer("myAvatar");
updateMyAvatarLookAtPosition();
updateMyAvatar(deltaTime); // Sample hardware, update view frustum if needed, and send avatar data to mixer/nodes
}
{
PerformanceTimer perfTimer("emitSimulating");

View file

@ -54,6 +54,8 @@ static const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.
// Mute icon configration
static const int MUTE_ICON_SIZE = 24;
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 100;
Audio::Audio(QObject* parent) :
AbstractAudioInterface(parent),
@ -64,19 +66,14 @@ Audio::Audio(QObject* parent) :
_audioOutput(NULL),
_desiredOutputFormat(),
_outputFormat(),
_outputDevice(NULL),
_outputFrameSize(0),
_numOutputCallbackBytes(0),
_loopbackAudioOutput(NULL),
_loopbackOutputDevice(NULL),
_proceduralAudioOutput(NULL),
_proceduralOutputDevice(NULL),
// NOTE: Be very careful making changes to the initializers of these ring buffers. There is a known problem with some
// Mac audio devices that slowly introduce additional delay in the audio device because they play out audio slightly
// slower than real time (or at least the desired sample rate). If you increase the size of the ring buffer, then it
// this delay will slowly add up and the longer someone runs, they more delayed their audio will be.
_inputRingBuffer(0),
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 0, 0, true),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, true, 0, 0, true),
_isStereoInput(false),
_averagedLatency(0.0),
_lastInputLoudness(0),
@ -115,13 +112,15 @@ Audio::Audio(QObject* parent) :
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_lastSentAudioPacket(0),
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS),
_audioOutputIODevice(*this)
{
// clear the array of locally injected samples
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
// Create the noise sample array
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedAudioStreamSamples, Qt::DirectConnection);
}
void Audio::init(QGLWidget *parent) {
@ -312,7 +311,7 @@ bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
}
}
void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
void linearResampling(const int16_t* sourceSamples, int16_t* destinationSamples,
unsigned int numSourceSamples, unsigned int numDestinationSamples,
const QAudioFormat& sourceAudioFormat, const QAudioFormat& destinationAudioFormat) {
if (sourceAudioFormat == destinationAudioFormat) {
@ -723,21 +722,92 @@ void Audio::handleAudioInput() {
}
delete[] inputAudioSamples;
}
}
if (_receivedAudioStream.getPacketsReceived() > 0) {
pushAudioToOutput();
void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
const int16_t* receivedSamples;
if (_processSpatialAudio) {
unsigned int sampleTime = _spatialAudioStart;
QByteArray buffer = inputBuffer;
// Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
}
// Send audio off for spatial processing
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
// copy the samples we'll resample from the spatial audio ring buffer - this also
// pushes the read pointer of the spatial audio ring buffer forwards
_spatialAudioRingBuffer.readSamples(_outputProcessingBuffer, numNetworkOutputSamples);
// Advance the start point for the next packet of audio to arrive
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
receivedSamples = _outputProcessingBuffer;
} else {
// copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards
//receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples);
receivedSamples = reinterpret_cast<const int16_t*>(inputBuffer.data());
}
// copy the packet from the RB to the output
linearResampling(receivedSamples,
(int16_t*)outputBuffer.data(),
numNetworkOutputSamples,
numDeviceOutputSamples,
_desiredOutputFormat, _outputFormat);
if (_scopeEnabled && !_scopeEnabledPause) {
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
const int16_t* samples = receivedSamples;
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
unsigned int audioChannel = 0;
addBufferToScope(
_scopeOutputLeft,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
audioChannel = 1;
addBufferToScope(
_scopeOutputRight,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeOutputOffset %= _samplesPerScope;
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
}
}
}
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
if (_audioOutput) {
// Audio output must exist and be correctly set up if we're going to process received audio
processReceivedAudio(audioByteArray);
_receivedAudioStream.parseData(audioByteArray);
}
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
}
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
@ -901,119 +971,6 @@ void Audio::toggleStereoInput() {
}
}
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
// parse audio data
_receivedAudioStream.parseData(audioByteArray);
// This call has been moved to handleAudioInput. handleAudioInput is called at a much more regular interval
// than processReceivedAudio since handleAudioInput does not experience network-related jitter.
// This way, we reduce the jitter of the frames being pushed to the audio output, allowing us to use a reduced
// buffer size for it, which reduces latency.
//pushAudioToOutput();
}
void Audio::pushAudioToOutput() {
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
// the audio output has no samples to play. set the downstream audio to starved so that it
// refills to its desired size before pushing frames
_receivedAudioStream.setToStarved();
}
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
int numFramesToPush;
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
numFramesToPush = _receivedAudioStream.getFramesAvailable();
} else {
// make sure to push a whole number of frames to the audio output
int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _receivedAudioStream.getNumFrameSamples();
numFramesToPush = std::min(_receivedAudioStream.getFramesAvailable(), numFramesAudioOutputRoomFor);
}
// if there is data in the received stream and room in the audio output, decide what to do
if (numFramesToPush > 0 && _receivedAudioStream.popFrames(numFramesToPush, false)) {
int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
QByteArray outputBuffer;
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
AudioRingBuffer::ConstIterator receivedAudioStreamPopOutput = _receivedAudioStream.getLastPopOutput();
int16_t* receivedSamples = new int16_t[numNetworkOutputSamples];
if (_processSpatialAudio) {
unsigned int sampleTime = _spatialAudioStart;
QByteArray buffer;
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
receivedAudioStreamPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
// Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
}
// Send audio off for spatial processing
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
// copy the samples we'll resample from the spatial audio ring buffer - this also
// pushes the read pointer of the spatial audio ring buffer forwards
_spatialAudioRingBuffer.readSamples(receivedSamples, numNetworkOutputSamples);
// Advance the start point for the next packet of audio to arrive
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
} else {
// copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards
receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples);
}
// copy the packet from the RB to the output
linearResampling(receivedSamples,
(int16_t*)outputBuffer.data(),
numNetworkOutputSamples,
numDeviceOutputSamples,
_desiredOutputFormat, _outputFormat);
if (_outputDevice) {
_outputDevice->write(outputBuffer);
}
if (_scopeEnabled && !_scopeEnabledPause) {
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
int16_t* samples = receivedSamples;
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
unsigned int audioChannel = 0;
addBufferToScope(
_scopeOutputLeft,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
audioChannel = 1;
addBufferToScope(
_scopeOutputRight,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeOutputOffset %= _samplesPerScope;
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
}
}
delete[] receivedSamples;
}
}
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
// zero out the locally injected audio in preparation for audio procedural sounds
@ -1514,11 +1471,11 @@ void Audio::renderScope(int width, int height) {
if (!_scopeEnabled)
return;
static const float backgroundColor[4] = { 0.2f, 0.2f, 0.2f, 0.6f };
static const float backgroundColor[4] = { 0.4f, 0.4f, 0.4f, 0.6f };
static const float gridColor[4] = { 0.3f, 0.3f, 0.3f, 0.6f };
static const float inputColor[4] = { 0.3f, .7f, 0.3f, 0.6f };
static const float outputLeftColor[4] = { 0.7f, .3f, 0.3f, 0.6f };
static const float outputRightColor[4] = { 0.3f, .3f, 0.7f, 0.6f };
static const float inputColor[4] = { 0.3f, 1.0f, 0.3f, 1.0f };
static const float outputLeftColor[4] = { 1.0f, 0.3f, 0.3f, 1.0f };
static const float outputRightColor[4] = { 0.3f, 0.3f, 1.0f, 1.0f };
static const int gridRows = 2;
int gridCols = _framesPerScope;
@ -1631,6 +1588,12 @@ void Audio::renderLineStrip(const float* color, int x, int y, int n, int offset,
}
void Audio::outputFormatChanged() {
int outputFormatChannelCountTimesSampleRate = _outputFormat.channelCount() * _outputFormat.sampleRate();
_outputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * outputFormatChannelCountTimesSampleRate / _desiredOutputFormat.sampleRate();
_receivedAudioStream.outputFormatChanged(outputFormatChannelCountTimesSampleRate);
}
bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
bool supportedFormat = false;
@ -1681,7 +1644,6 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
// cleanup any previously initialized device
if (_audioOutput) {
_audioOutput->stop();
_outputDevice = NULL;
delete _audioOutput;
_audioOutput = NULL;
@ -1703,13 +1665,17 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
qDebug() << "The format to be used for audio output is" << _outputFormat;
const int AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 10;
outputFormatChanged();
const int AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 3;
// setup our general output device for audio-mixer audio
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS));
qDebug() << "Ring Buffer capacity in frames: " << AUDIO_OUTPUT_BUFFER_SIZE_FRAMES;
_outputDevice = _audioOutput->start();
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFrameSize * sizeof(int16_t));
qDebug() << "Ring Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize;
_audioOutputIODevice.start();
_audioOutput->start(&_audioOutputIODevice);
// setup a loopback audio output device
_loopbackAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
@ -1779,3 +1745,21 @@ float Audio::getInputRingBufferMsecsAvailable() const {
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
return msecsInInputRingBuffer;
}
qint64 Audio::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
MixedProcessedAudioStream& receivedAUdioStream = _parent._receivedAudioStream;
int samplesRequested = maxSize / sizeof(int16_t);
int samplesPopped;
int bytesWritten;
if ((samplesPopped = receivedAUdioStream.popSamples(samplesRequested, false)) > 0) {
AudioRingBuffer::ConstIterator lastPopOutput = receivedAUdioStream.getLastPopOutput();
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
bytesWritten = samplesPopped * sizeof(int16_t);
} else {
memset(data, 0, maxSize);
bytesWritten = maxSize;
}
return bytesWritten;
}

View file

@ -33,7 +33,7 @@
#include <AbstractAudioInterface.h>
#include <StdDev.h>
#include "MixedAudioStream.h"
#include "MixedProcessedAudioStream.h"
static const int NUM_AUDIO_CHANNELS = 2;
@ -45,6 +45,20 @@ class QIODevice;
class Audio : public AbstractAudioInterface {
Q_OBJECT
public:
class AudioOutputIODevice : public QIODevice {
public:
AudioOutputIODevice(Audio& parent) : _parent(parent) {};
void start() { open(QIODevice::ReadOnly); }
void stop() { close(); }
qint64 readData(char * data, qint64 maxSize);
qint64 writeData(const char * data, qint64 maxSize) { return 0; }
private:
Audio& _parent;
};
// setup for audio I/O
Audio(QObject* parent = 0);
@ -94,6 +108,7 @@ public slots:
void addReceivedAudioToStream(const QByteArray& audioByteArray);
void parseAudioStreamStatsPacket(const QByteArray& packet);
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
void processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
void handleAudioInput();
void reset();
void resetStats();
@ -133,7 +148,10 @@ signals:
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
private:
void outputFormatChanged();
private:
QByteArray firstInputFrame;
@ -146,14 +164,15 @@ private:
QAudioOutput* _audioOutput;
QAudioFormat _desiredOutputFormat;
QAudioFormat _outputFormat;
QIODevice* _outputDevice;
int _outputFrameSize;
int16_t _outputProcessingBuffer[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
int _numOutputCallbackBytes;
QAudioOutput* _loopbackAudioOutput;
QIODevice* _loopbackOutputDevice;
QAudioOutput* _proceduralAudioOutput;
QIODevice* _proceduralOutputDevice;
AudioRingBuffer _inputRingBuffer;
MixedAudioStream _receivedAudioStream;
MixedProcessedAudioStream _receivedAudioStream;
bool _isStereoInput;
QString _inputAudioDeviceName;
@ -211,12 +230,6 @@ private:
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
void addProceduralSounds(int16_t* monoInput, int numSamples);
// Process received audio
void processReceivedAudio(const QByteArray& audioByteArray);
// Pushes frames from the output ringbuffer to the audio output device
void pushAudioToOutput();
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
@ -282,6 +295,8 @@ private:
quint64 _lastSentAudioPacket;
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
AudioOutputIODevice _audioOutputIODevice;
};

View file

@ -606,8 +606,6 @@ Menu::Menu() :
appInstance->getAudio(),
SLOT(toggleStatsShowInjectedStreams()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, false);
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
this,

View file

@ -353,7 +353,6 @@ namespace MenuOption {
const QString DisableActivityLogger = "Disable Activity Logger";
const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD";
const QString DisableNackPackets = "Disable NACK Packets";
const QString DisableQAudioOutputOverflowCheck = "Disable Audio Output Device Overflow Check";
const QString DisplayFrustum = "Display Frustum";
const QString DisplayHands = "Display Hands";
const QString DisplayHandTargets = "Display Hand Targets";

View file

@ -359,9 +359,9 @@ void HeightfieldBuffer::render() {
int nextLineIndex = (i + 1) * sizeWithSkirt;
for (int j = 0; j < rows; j++) {
*index++ = lineIndex + j;
*index++ = lineIndex + j + 1;
*index++ = nextLineIndex + j + 1;
*index++ = nextLineIndex + j;
*index++ = nextLineIndex + j + 1;
*index++ = lineIndex + j + 1;
}
}
@ -388,6 +388,9 @@ void HeightfieldBuffer::render() {
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
DefaultMetavoxelRendererImplementation::getHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getHeightScaleLocation(), 1.0f / _heightSize);
glDrawRangeElements(GL_QUADS, 0, vertexCount - 1, indexCount, GL_UNSIGNED_INT, 0);
glBindTexture(GL_TEXTURE_2D, 0);
@ -405,6 +408,7 @@ QHash<int, HeightfieldBuffer::BufferPair> HeightfieldBuffer::_bufferPairs;
void HeightfieldPreview::render(const glm::vec3& translation, float scale) const {
glDisable(GL_BLEND);
glEnable(GL_CULL_FACE);
glEnable(GL_ALPHA_TEST);
glAlphaFunc(GL_EQUAL, 0.0f);
@ -431,6 +435,7 @@ void HeightfieldPreview::render(const glm::vec3& translation, float scale) const
glDisableClientState(GL_VERTEX_ARRAY);
glDisable(GL_ALPHA_TEST);
glDisable(GL_CULL_FACE);
glEnable(GL_BLEND);
}
@ -468,6 +473,7 @@ void DefaultMetavoxelRendererImplementation::init() {
_heightfieldProgram.bind();
_heightfieldProgram.setUniformValue("heightMap", 0);
_heightfieldProgram.setUniformValue("diffuseMap", 1);
_heightScaleLocation = _heightfieldProgram.uniformLocation("heightScale");
_heightfieldProgram.release();
}
}
@ -737,6 +743,7 @@ void DefaultMetavoxelRendererImplementation::render(MetavoxelData& data, Metavox
_pointProgram.release();
glEnable(GL_CULL_FACE);
glEnable(GL_ALPHA_TEST);
glAlphaFunc(GL_EQUAL, 0.0f);
@ -756,12 +763,14 @@ void DefaultMetavoxelRendererImplementation::render(MetavoxelData& data, Metavox
glDisableClientState(GL_VERTEX_ARRAY);
glDisable(GL_ALPHA_TEST);
glDisable(GL_CULL_FACE);
glEnable(GL_BLEND);
}
ProgramObject DefaultMetavoxelRendererImplementation::_pointProgram;
int DefaultMetavoxelRendererImplementation::_pointScaleLocation;
ProgramObject DefaultMetavoxelRendererImplementation::_heightfieldProgram;
int DefaultMetavoxelRendererImplementation::_heightScaleLocation;
static void enableClipPlane(GLenum plane, float x, float y, float z, float w) {
GLdouble coefficients[] = { x, y, z, w };

View file

@ -191,6 +191,7 @@ public:
static void init();
static ProgramObject& getHeightfieldProgram() { return _heightfieldProgram; }
static int getHeightScaleLocation() { return _heightScaleLocation; }
Q_INVOKABLE DefaultMetavoxelRendererImplementation();
@ -204,6 +205,7 @@ private:
static int _pointScaleLocation;
static ProgramObject _heightfieldProgram;
static int _heightScaleLocation;
};
/// Base class for spanner renderers; provides clipping.

View file

@ -27,6 +27,7 @@
#include "Hand.h"
#include "Head.h"
#include "Menu.h"
#include "ModelReferential.h"
#include "Physics.h"
#include "world.h"
#include "devices/OculusManager.h"
@ -102,6 +103,31 @@ float Avatar::getLODDistance() const {
void Avatar::simulate(float deltaTime) {
PerformanceTimer perfTimer("simulate");
// update the avatar's position according to its referential
if (_referential) {
if (_referential->hasExtraData()) {
ModelTree* tree = Application::getInstance()->getModels()->getTree();
switch (_referential->type()) {
case Referential::MODEL:
_referential = new ModelReferential(_referential,
tree,
this);
break;
case Referential::JOINT:
_referential = new JointReferential(_referential,
tree,
this);
break;
default:
qDebug() << "[WARNING] Avatar::simulate(): Unknown referential type.";
break;
}
}
_referential->update();
}
if (_scale != _targetScale) {
setScale(_targetScale);
}
@ -218,6 +244,9 @@ static TextRenderer* textRenderer(TextRendererType type) {
}
void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
if (_referential) {
_referential->update();
}
if (glm::distance(Application::getInstance()->getAvatar()->getPosition(),
_position) < 10.0f) {
@ -268,7 +297,7 @@ void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
float boundingRadius = getBillboardSize();
ViewFrustum* frustum = (renderMode == Avatar::SHADOW_RENDER_MODE) ?
Application::getInstance()->getShadowViewFrustum() : Application::getInstance()->getViewFrustum();
if (frustum->sphereInFrustum(_position, boundingRadius) == ViewFrustum::OUTSIDE) {
if (frustum->sphereInFrustum(getPosition(), boundingRadius) == ViewFrustum::OUTSIDE) {
return;
}

View file

@ -209,7 +209,7 @@ protected:
virtual void renderAttachments(RenderMode renderMode);
virtual void updateJointMappings();
private:
bool _initialized;

View file

@ -41,7 +41,7 @@ void AvatarManager::init() {
}
void AvatarManager::updateOtherAvatars(float deltaTime) {
if (_avatarHash.size() < 2) {
if (_avatarHash.size() < 2 && _avatarFades.isEmpty()) {
return;
}
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);

View file

@ -0,0 +1,189 @@
//
// ModelReferential.cpp
//
//
// Created by Clement on 7/30/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <AvatarData.h>
#include "ModelTree.h"
#include "../renderer/Model.h"
#include "ModelReferential.h"
ModelReferential::ModelReferential(Referential* referential, ModelTree* tree, AvatarData* avatar) :
Referential(MODEL, avatar),
_tree(tree) {
_translation = referential->getTranslation();
_rotation = referential->getRotation();
_scale = referential->getScale();
unpackExtraData(reinterpret_cast<unsigned char*>(referential->getExtraData().data()),
referential->getExtraData().size());
if (!isValid()) {
qDebug() << "ModelReferential::copyConstructor(): Not Valid";
return;
}
const ModelItem* item = _tree->findModelByID(_modelID);
if (item != NULL) {
_refScale = item->getRadius();
_refRotation = item->getModelRotation();
_refPosition = item->getPosition() * (float)TREE_SCALE;
update();
}
}
ModelReferential::ModelReferential(uint32_t modelID, ModelTree* tree, AvatarData* avatar) :
Referential(MODEL, avatar),
_modelID(modelID),
_tree(tree)
{
const ModelItem* item = _tree->findModelByID(_modelID);
if (!isValid() || item == NULL) {
qDebug() << "ModelReferential::constructor(): Not Valid";
_isValid = false;
return;
}
_refScale = item->getRadius();
_refRotation = item->getModelRotation();
_refPosition = item->getPosition() * (float)TREE_SCALE;
glm::quat refInvRot = glm::inverse(_refRotation);
_scale = _avatar->getTargetScale() / _refScale;
_rotation = refInvRot * _avatar->getOrientation();
_translation = refInvRot * (avatar->getPosition() - _refPosition) / _refScale;
}
void ModelReferential::update() {
const ModelItem* item = _tree->findModelByID(_modelID);
if (!isValid() || item == NULL || _avatar == NULL) {
return;
}
bool somethingChanged = false;
if (item->getRadius() != _refScale) {
_refScale = item->getRadius();
_avatar->setTargetScale(_refScale * _scale, true);
somethingChanged = true;
}
if (item->getModelRotation() != _refRotation) {
_refRotation = item->getModelRotation();
_avatar->setOrientation(_refRotation * _rotation, true);
somethingChanged = true;
}
if (item->getPosition() != _refPosition || somethingChanged) {
_refPosition = item->getPosition();
_avatar->setPosition(_refPosition * (float)TREE_SCALE + _refRotation * (_translation * _refScale), true);
}
}
int ModelReferential::packExtraData(unsigned char* destinationBuffer) const {
memcpy(destinationBuffer, &_modelID, sizeof(_modelID));
return sizeof(_modelID);
}
int ModelReferential::unpackExtraData(const unsigned char *sourceBuffer, int size) {
memcpy(&_modelID, sourceBuffer, sizeof(_modelID));
return sizeof(_modelID);
}
JointReferential::JointReferential(Referential* referential, ModelTree* tree, AvatarData* avatar) :
ModelReferential(referential, tree, avatar)
{
_type = JOINT;
if (!isValid()) {
qDebug() << "JointReferential::copyConstructor(): Not Valid";
return;
}
const ModelItem* item = _tree->findModelByID(_modelID);
const Model* model = getModel(item);
if (!isValid() || model == NULL || _jointIndex >= model->getJointStateCount()) {
_refScale = item->getRadius();
model->getJointRotationInWorldFrame(_jointIndex, _refRotation);
model->getJointPositionInWorldFrame(_jointIndex, _refPosition);
}
update();
}
JointReferential::JointReferential(uint32_t jointIndex, uint32_t modelID, ModelTree* tree, AvatarData* avatar) :
ModelReferential(modelID, tree, avatar),
_jointIndex(jointIndex)
{
_type = JOINT;
const ModelItem* item = _tree->findModelByID(_modelID);
const Model* model = getModel(item);
if (!isValid() || model == NULL || _jointIndex >= model->getJointStateCount()) {
qDebug() << "JointReferential::constructor(): Not Valid";
_isValid = false;
return;
}
_refScale = item->getRadius();
model->getJointRotationInWorldFrame(_jointIndex, _refRotation);
model->getJointPositionInWorldFrame(_jointIndex, _refPosition);
glm::quat refInvRot = glm::inverse(_refRotation);
_scale = _avatar->getTargetScale() / _refScale;
_rotation = refInvRot * _avatar->getOrientation();
_translation = refInvRot * (avatar->getPosition() - _refPosition) / _refScale;
}
void JointReferential::update() {
const ModelItem* item = _tree->findModelByID(_modelID);
const Model* model = getModel(item);
if (!isValid() || model == NULL || _jointIndex >= model->getJointStateCount()) {
return;
}
bool somethingChanged = false;
if (item->getRadius() != _refScale) {
_refScale = item->getRadius();
_avatar->setTargetScale(_refScale * _scale, true);
somethingChanged = true;
}
if (item->getModelRotation() != _refRotation) {
model->getJointRotationInWorldFrame(_jointIndex, _refRotation);
_avatar->setOrientation(_refRotation * _rotation, true);
somethingChanged = true;
}
if (item->getPosition() != _refPosition || somethingChanged) {
model->getJointPositionInWorldFrame(_jointIndex, _refPosition);
_avatar->setPosition(_refPosition + _refRotation * (_translation * _refScale), true);
}
}
const Model* JointReferential::getModel(const ModelItem* item) {
ModelItemFBXService* fbxService = _tree->getFBXService();
if (item != NULL && fbxService != NULL) {
return fbxService->getModelForModelItem(*item);
}
return NULL;
}
int JointReferential::packExtraData(unsigned char* destinationBuffer) const {
unsigned char* startPosition = destinationBuffer;
destinationBuffer += ModelReferential::packExtraData(destinationBuffer);
memcpy(destinationBuffer, &_jointIndex, sizeof(_jointIndex));
destinationBuffer += sizeof(_jointIndex);
return destinationBuffer - startPosition;
}
int JointReferential::unpackExtraData(const unsigned char *sourceBuffer, int size) {
const unsigned char* startPosition = sourceBuffer;
sourceBuffer += ModelReferential::unpackExtraData(sourceBuffer, size);
memcpy(&_jointIndex, sourceBuffer, sizeof(_jointIndex));
sourceBuffer += sizeof(_jointIndex);
return sourceBuffer - startPosition;
}

View file

@ -0,0 +1,48 @@
//
// ModelReferential.h
//
//
// Created by Clement on 7/30/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_ModelReferential_h
#define hifi_ModelReferential_h
#include <Referential.h>
class ModelTree;
class Model;
class ModelReferential : public Referential {
public:
ModelReferential(Referential* ref, ModelTree* tree, AvatarData* avatar);
ModelReferential(uint32_t modelID, ModelTree* tree, AvatarData* avatar);
virtual void update();
protected:
virtual int packExtraData(unsigned char* destinationBuffer) const;
virtual int unpackExtraData(const unsigned char* sourceBuffer, int size);
uint32_t _modelID;
ModelTree* _tree;
};
class JointReferential : public ModelReferential {
public:
JointReferential(Referential* ref, ModelTree* tree, AvatarData* avatar);
JointReferential(uint32_t jointIndex, uint32_t modelID, ModelTree* tree, AvatarData* avatar);
virtual void update();
protected:
const Model* getModel(const ModelItem* item);
virtual int packExtraData(unsigned char* destinationBuffer) const;
virtual int unpackExtraData(const unsigned char* sourceBuffer, int size);
uint32_t _jointIndex;
};
#endif // hifi_ModelReferential_h

View file

@ -32,6 +32,7 @@
#include "Audio.h"
#include "Environment.h"
#include "Menu.h"
#include "ModelReferential.h"
#include "MyAvatar.h"
#include "Physics.h"
#include "devices/Faceshift.h"
@ -108,6 +109,10 @@ void MyAvatar::reset() {
}
void MyAvatar::update(float deltaTime) {
if (_referential) {
_referential->update();
}
Head* head = getHead();
head->relaxLean(deltaTime);
updateFromTrackers(deltaTime);
@ -443,6 +448,32 @@ glm::vec3 MyAvatar::getRightPalmPosition() {
return rightHandPosition;
}
void MyAvatar::clearReferential() {
changeReferential(NULL);
}
bool MyAvatar::setModelReferential(int id) {
ModelTree* tree = Application::getInstance()->getModels()->getTree();
changeReferential(new ModelReferential(id, tree, this));
if (_referential->isValid()) {
return true;
} else {
changeReferential(NULL);
return false;
}
}
bool MyAvatar::setJointReferential(int id, int jointIndex) {
ModelTree* tree = Application::getInstance()->getModels()->getTree();
changeReferential(new JointReferential(jointIndex, id, tree, this));
if (!_referential->isValid()) {
return true;
} else {
changeReferential(NULL);
return false;
}
}
void MyAvatar::setLocalGravity(glm::vec3 gravity) {
_motionBehaviors |= AVATAR_MOTION_OBEY_LOCAL_GRAVITY;
// Environmental and Local gravities are incompatible. Since Local is being set here

View file

@ -18,6 +18,8 @@
#include "Avatar.h"
class ModelItemID;
enum AvatarHandState
{
HAND_STATE_NULL = 0,
@ -149,6 +151,10 @@ public slots:
glm::vec3 getLeftPalmPosition();
glm::vec3 getRightPalmPosition();
void clearReferential();
bool setModelReferential(int id);
bool setJointReferential(int id, int jointIndex);
signals:
void transformChanged();

View file

@ -76,6 +76,10 @@ const FBXGeometry* ModelTreeRenderer::getGeometryForModel(const ModelItem& model
return result;
}
const Model* ModelTreeRenderer::getModelForModelItem(const ModelItem& modelItem) {
return getModel(modelItem);
}
Model* ModelTreeRenderer::getModel(const ModelItem& modelItem) {
Model* model = NULL;

View file

@ -51,7 +51,7 @@ public:
virtual void render(RenderMode renderMode = DEFAULT_RENDER_MODE);
virtual const FBXGeometry* getGeometryForModel(const ModelItem& modelItem);
virtual const Model* getModelForModelItem(const ModelItem& modelItem);
/// clears the tree
virtual void clear();

View file

@ -930,6 +930,13 @@ void HeightfieldTool::render() {
ImportHeightfieldTool::ImportHeightfieldTool(MetavoxelEditor* editor) :
HeightfieldTool(editor, "Import Heightfield") {
_form->addRow("Block Size:", _blockSize = new QSpinBox());
_blockSize->setPrefix("2^");
_blockSize->setMinimum(1);
_blockSize->setValue(5);
connect(_blockSize, static_cast<void (QSpinBox::*)(int)>(&QSpinBox::valueChanged), this,
&ImportHeightfieldTool::updatePreview);
_form->addRow("Height:", _height = new QPushButton());
connect(_height, &QAbstractButton::clicked, this, &ImportHeightfieldTool::selectHeightFile);
_form->addRow("Color:", _color = new QPushButton());
@ -989,23 +996,22 @@ void ImportHeightfieldTool::selectColorFile() {
updatePreview();
}
const int BLOCK_SIZE = 32;
const int BLOCK_ADVANCEMENT = BLOCK_SIZE - 1;
void ImportHeightfieldTool::updatePreview() {
QVector<BufferDataPointer> buffers;
if (_heightImage.width() > 0 && _heightImage.height() > 0) {
float z = 0.0f;
for (int i = 0; i < _heightImage.height(); i += BLOCK_ADVANCEMENT, z++) {
int blockSize = pow(2.0, _blockSize->value());
int blockAdvancement = blockSize - 1;
for (int i = 0; i < _heightImage.height(); i += blockAdvancement, z++) {
float x = 0.0f;
for (int j = 0; j < _heightImage.width(); j += BLOCK_ADVANCEMENT, x++) {
QByteArray height(BLOCK_SIZE * BLOCK_SIZE, 0);
int rows = qMin(BLOCK_SIZE, _heightImage.height() - i);
int columns = qMin(BLOCK_SIZE, _heightImage.width() - j);
for (int j = 0; j < _heightImage.width(); j += blockAdvancement, x++) {
QByteArray height(blockSize * blockSize, 0);
int rows = qMin(blockSize, _heightImage.height() - i);
int columns = qMin(blockSize, _heightImage.width() - j);
const int BYTES_PER_COLOR = 3;
for (int y = 0; y < rows; y++) {
uchar* src = _heightImage.scanLine(i + y) + j * BYTES_PER_COLOR;
char* dest = height.data() + y * BLOCK_SIZE;
char* dest = height.data() + y * blockSize;
for (int x = 0; x < columns; x++) {
*dest++ = *src;
src += BYTES_PER_COLOR;
@ -1014,11 +1020,11 @@ void ImportHeightfieldTool::updatePreview() {
QByteArray color;
if (!_colorImage.isNull()) {
color = QByteArray(BLOCK_SIZE * BLOCK_SIZE * BYTES_PER_COLOR, 0);
rows = qMax(0, qMin(BLOCK_SIZE, _colorImage.height() - i));
columns = qMax(0, qMin(BLOCK_SIZE, _colorImage.width() - j));
color = QByteArray(blockSize * blockSize * BYTES_PER_COLOR, 0);
rows = qMax(0, qMin(blockSize, _colorImage.height() - i));
columns = qMax(0, qMin(blockSize, _colorImage.width() - j));
for (int y = 0; y < rows; y++) {
memcpy(color.data() + y * BLOCK_SIZE * BYTES_PER_COLOR,
memcpy(color.data() + y * blockSize * BYTES_PER_COLOR,
_colorImage.scanLine(i + y) + j * BYTES_PER_COLOR, columns * BYTES_PER_COLOR);
}
}

View file

@ -267,10 +267,11 @@ private slots:
void selectHeightFile();
void selectColorFile();
void updatePreview();
private:
void updatePreview();
QSpinBox* _blockSize;
QPushButton* _height;
QPushButton* _color;

View file

@ -70,7 +70,12 @@ void InboundAudioStream::clearBuffer() {
_currentJitterBufferFrames = 0;
}
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}
int InboundAudioStream::parseData(const QByteArray& packet) {
PacketType packetType = packetTypeForPacket(packet);
QUuid senderUUID = uuidFromPacketHeader(packet);
@ -82,7 +87,9 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
// parse sequence number and track it
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
readBytes += sizeof(quint16);
SequenceNumberStats::ArrivalInfo arrivalInfo = frameReceivedUpdateNetworkStats(sequence, senderUUID);
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
frameReceivedUpdateTimingStats();
// TODO: handle generalized silent packet here?????
@ -130,32 +137,71 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
return readBytes;
}
bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) {
int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples();
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped) {
int samplesPopped = 0;
int samplesAvailable = _ringBuffer.samplesAvailable();
if (_isStarved) {
// we're still refilling; don't pop
_consecutiveNotMixedCount++;
_lastPopSucceeded = false;
} else {
if (_ringBuffer.samplesAvailable() >= numSamplesRequested) {
// we have enough samples to pop, so we're good to mix
_lastPopOutput = _ringBuffer.nextOutput();
_ringBuffer.shiftReadPosition(numSamplesRequested);
framesAvailableChanged();
_hasStarted = true;
_lastPopSucceeded = true;
if (samplesAvailable >= maxSamples) {
// we have enough samples to pop, so we're good to pop
popSamplesNoCheck(maxSamples);
samplesPopped = maxSamples;
} else if (!allOrNothing && samplesAvailable > 0) {
// we don't have the requested number of samples, but we do have some
// samples available, so pop all those (except in all-or-nothing mode)
popSamplesNoCheck(samplesAvailable);
samplesPopped = samplesAvailable;
} else {
// we don't have enough samples, so set this stream to starve
// if starveOnFail is true
if (starveOnFail) {
starved();
// we can't pop any samples. set this stream to starved if needed
if (starveIfNoSamplesPopped) {
setToStarved();
_consecutiveNotMixedCount++;
}
_lastPopSucceeded = false;
}
}
return _lastPopSucceeded;
return samplesPopped;
}
int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing, bool starveIfNoFramesPopped) {
int framesPopped = 0;
int framesAvailable = _ringBuffer.framesAvailable();
if (_isStarved) {
// we're still refilling; don't pop
_consecutiveNotMixedCount++;
_lastPopSucceeded = false;
} else {
if (framesAvailable >= maxFrames) {
// we have enough frames to pop, so we're good to pop
popSamplesNoCheck(maxFrames * _ringBuffer.getNumFrameSamples());
framesPopped = maxFrames;
} else if (!allOrNothing && framesAvailable > 0) {
// we don't have the requested number of frames, but we do have some
// frames available, so pop all those (except in all-or-nothing mode)
popSamplesNoCheck(framesAvailable * _ringBuffer.getNumFrameSamples());
framesPopped = framesAvailable;
} else {
// we can't pop any frames. set this stream to starved if needed
if (starveIfNoFramesPopped) {
setToStarved();
_consecutiveNotMixedCount = 1;
}
_lastPopSucceeded = false;
}
}
return framesPopped;
}
void InboundAudioStream::popSamplesNoCheck(int samples) {
_lastPopOutput = _ringBuffer.nextOutput();
_ringBuffer.shiftReadPosition(samples);
framesAvailableChanged();
_hasStarted = true;
_lastPopSucceeded = true;
}
void InboundAudioStream::framesAvailableChanged() {
@ -168,16 +214,12 @@ void InboundAudioStream::framesAvailableChanged() {
}
void InboundAudioStream::setToStarved() {
starved();
if (_ringBuffer.framesAvailable() >= _desiredJitterBufferFrames) {
_isStarved = false;
}
}
void InboundAudioStream::starved() {
_isStarved = true;
_consecutiveNotMixedCount = 0;
_starveCount++;
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
// be considered refilled. in that case, there's no need to set _isStarved to true.
_isStarved = (_ringBuffer.framesAvailable() < _desiredJitterBufferFrames);
}
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
@ -204,9 +246,7 @@ int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
}
SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID) {
// track the sequence number we received
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequenceNumber, senderUUID);
void InboundAudioStream::frameReceivedUpdateTimingStats() {
// update our timegap stats and desired jitter buffer frames if necessary
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
@ -243,8 +283,6 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS
}
}
_lastFrameReceivedTime = now;
return arrivalInfo;
}
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {

View file

@ -63,8 +63,8 @@ public:
virtual int parseData(const QByteArray& packet);
bool popFrames(int numFrames, bool starveOnFail = true);
int popFrames(int maxFrames, bool allOrNothing, bool starveIfNoFramesPopped = true);
int popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped = true);
bool lastPopSucceeded() const { return _lastPopSucceeded; };
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
@ -111,13 +111,12 @@ public:
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
private:
void starved();
SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID);
void frameReceivedUpdateTimingStats();
int clampDesiredJitterBufferFramesValue(int desired) const;
int writeSamplesForDroppedPackets(int numSamples);
void popSamplesNoCheck(int samples);
void framesAvailableChanged();
protected:
@ -126,11 +125,12 @@ protected:
InboundAudioStream& operator= (const InboundAudioStream&);
/// parses the info between the seq num and the audio data in the network packet and calculates
/// how many audio samples this packet contains
/// how many audio samples this packet contains (used when filling in samples for dropped packets).
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
/// parses the audio data in the network packet
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0;
/// parses the audio data in the network packet.
/// default implementation assumes packet contains raw audio samples after stream properties
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
int writeDroppableSilentSamples(int numSilentSamples);

View file

@ -58,10 +58,6 @@ int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray
return packetStream.device()->pos();
}
int InjectedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}
AudioStreamStats InjectedAudioStream::getAudioStreamStats() const {
AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats();
streamStats._streamIdentifier = _streamIdentifier;

View file

@ -32,7 +32,6 @@ private:
AudioStreamStats getAudioStreamStats() const;
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
const QUuid _streamIdentifier;
float _radius;

View file

@ -1,3 +1,13 @@
//
// MixedAudioStream.cpp
// libraries/audio/src
//
// Created by Yixin Wang on 8/4/14.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "MixedAudioStream.h"
@ -11,7 +21,3 @@ int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& p
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}
int MixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}

View file

@ -2,7 +2,7 @@
// MixedAudioStream.h
// libraries/audio/src
//
// Created by Stephen Birarda on 6/5/13.
// Created by Yixin Wang on 8/4/14.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
@ -23,7 +23,6 @@ public:
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
};
#endif // hifi_MixedAudioStream_h

View file

@ -0,0 +1,45 @@
//
// MixedProcessedAudioStream.cpp
// libraries/audio/src
//
// Created by Yixin Wang on 8/4/14.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "MixedProcessedAudioStream.h"
MixedProcessedAudioStream ::MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
{
}
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;
int deviceOutputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * _outputFormatChannelsTimesSampleRate / SAMPLE_RATE;
_ringBuffer.resizeForFrameSize(deviceOutputFrameSize);
}
int MixedProcessedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
int numNetworkSamples = packetAfterSeqNum.size() / sizeof(int16_t);
// since numAudioSamples is used to know how many samples to add for each dropped packet before this one,
// we want to set it to the number of device audio samples since this stream contains device audio samples, not network samples.
const int STEREO_DIVIDER = 2;
numAudioSamples = numNetworkSamples * _outputFormatChannelsTimesSampleRate / (STEREO_DIVIDER * SAMPLE_RATE);
return 0;
}
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
QByteArray outputBuffer;
emit processSamples(packetAfterStreamProperties, outputBuffer);
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
return packetAfterStreamProperties.size();
}

View file

@ -0,0 +1,37 @@
//
// MixedProcessedAudioStream.h
// libraries/audio/src
//
// Created by Yixin Wang on 8/4/14.
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_MixedProcessedAudioStream_h
#define hifi_MixedProcessedAudioStream_h
#include "InboundAudioStream.h"
class MixedProcessedAudioStream : public InboundAudioStream {
Q_OBJECT
public:
MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
signals:
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
public:
void outputFormatChanged(int outputFormatChannelCountTimesSampleRate);
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
private:
int _outputFormatChannelsTimesSampleRate;
};
#endif // hifi_MixedProcessedAudioStream_h

View file

@ -36,6 +36,7 @@ using namespace std;
AvatarData::AvatarData() :
_sessionUUID(),
_handPosition(0,0,0),
_referential(NULL),
_bodyYaw(-90.f),
_bodyPitch(0.0f),
_bodyRoll(0.0f),
@ -62,6 +63,59 @@ AvatarData::AvatarData() :
AvatarData::~AvatarData() {
delete _headData;
delete _handData;
delete _referential;
}
const glm::vec3& AvatarData::getPosition() {
if (_referential) {
_referential->update();
}
return _position;
}
void AvatarData::setPosition(const glm::vec3 position, bool overideReferential) {
if (!_referential || overideReferential) {
_position = position;
}
}
glm::quat AvatarData::getOrientation() const {
if (_referential) {
_referential->update();
}
return glm::quat(glm::radians(glm::vec3(_bodyPitch, _bodyYaw, _bodyRoll)));
}
void AvatarData::setOrientation(const glm::quat& orientation, bool overideReferential) {
if (!_referential || overideReferential) {
glm::vec3 eulerAngles = glm::degrees(safeEulerAngles(orientation));
_bodyPitch = eulerAngles.x;
_bodyYaw = eulerAngles.y;
_bodyRoll = eulerAngles.z;
}
}
float AvatarData::getTargetScale() const {
if (_referential) {
_referential->update();
}
return _targetScale;
}
void AvatarData::setTargetScale(float targetScale, bool overideReferential) {
if (!_referential || overideReferential) {
_targetScale = targetScale;
}
}
void AvatarData::setClampedTargetScale(float targetScale, bool overideReferential) {
targetScale = glm::clamp(targetScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE);
setTargetScale(targetScale, overideReferential);
qDebug() << "Changed scale to " << _targetScale;
}
glm::vec3 AvatarData::getHandPosition() const {
@ -135,11 +189,21 @@ QByteArray AvatarData::toByteArray() {
// hand state
setSemiNibbleAt(bitItems,HAND_STATE_START_BIT,_handState);
// faceshift state
if (_headData->_isFaceshiftConnected) { setAtBit(bitItems, IS_FACESHIFT_CONNECTED); }
if (_headData->_isFaceshiftConnected) {
setAtBit(bitItems, IS_FACESHIFT_CONNECTED);
}
if (_isChatCirclingEnabled) {
setAtBit(bitItems, IS_CHAT_CIRCLING_ENABLED);
}
if (_referential != NULL && _referential->isValid()) {
setAtBit(bitItems, HAS_REFERENTIAL);
}
*destinationBuffer++ = bitItems;
// Add referential
if (_referential != NULL && _referential->isValid()) {
destinationBuffer += _referential->packReferential(destinationBuffer);
}
// If it is connected, pack up the data
if (_headData->_isFaceshiftConnected) {
@ -370,18 +434,32 @@ int AvatarData::parseDataAtOffset(const QByteArray& packet, int offset) {
} // 1 + chatMessageSize bytes
{ // bitFlags and face data
unsigned char bitItems = 0;
bitItems = (unsigned char)*sourceBuffer++;
unsigned char bitItems = *sourceBuffer++;
// key state, stored as a semi-nibble in the bitItems
_keyState = (KeyState)getSemiNibbleAt(bitItems,KEY_STATE_START_BIT);
// hand state, stored as a semi-nibble in the bitItems
_handState = getSemiNibbleAt(bitItems,HAND_STATE_START_BIT);
_headData->_isFaceshiftConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED);
_isChatCirclingEnabled = oneAtBit(bitItems, IS_CHAT_CIRCLING_ENABLED);
bool hasReferential = oneAtBit(bitItems, HAS_REFERENTIAL);
// Referential
if (hasReferential) {
Referential* ref = new Referential(sourceBuffer, this);
if (_referential == NULL ||
ref->version() != _referential->version()) {
changeReferential(ref);
} else {
delete ref;
}
_referential->update();
} else if (_referential != NULL) {
changeReferential(NULL);
}
if (_headData->_isFaceshiftConnected) {
float leftEyeBlink, rightEyeBlink, averageLoudness, browAudioLift;
minPossibleSize += sizeof(leftEyeBlink) + sizeof(rightEyeBlink) + sizeof(averageLoudness) + sizeof(browAudioLift);
@ -503,6 +581,15 @@ int AvatarData::parseDataAtOffset(const QByteArray& packet, int offset) {
return sourceBuffer - startPosition;
}
bool AvatarData::hasReferential() {
return _referential != NULL;
}
void AvatarData::changeReferential(Referential *ref) {
delete _referential;
_referential = ref;
}
void AvatarData::setJointData(int index, const glm::quat& rotation) {
if (index == -1) {
return;
@ -803,21 +890,6 @@ void AvatarData::setJointMappingsFromNetworkReply() {
networkReply->deleteLater();
}
void AvatarData::setClampedTargetScale(float targetScale) {
targetScale = glm::clamp(targetScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE);
_targetScale = targetScale;
qDebug() << "Changed scale to " << _targetScale;
}
void AvatarData::setOrientation(const glm::quat& orientation) {
glm::vec3 eulerAngles = glm::degrees(safeEulerAngles(orientation));
_bodyPitch = eulerAngles.x;
_bodyYaw = eulerAngles.y;
_bodyRoll = eulerAngles.z;
}
void AvatarData::sendIdentityPacket() {
QByteArray identityPacket = byteArrayWithPopulatedHeader(PacketTypeAvatarIdentity);
identityPacket.append(identityByteArray());

View file

@ -49,6 +49,7 @@ typedef unsigned long long quint64;
#include <Node.h>
#include "Referential.h"
#include "HeadData.h"
#include "HandData.h"
@ -80,7 +81,8 @@ const quint32 AVATAR_MOTION_SCRIPTABLE_BITS =
const int KEY_STATE_START_BIT = 0; // 1st and 2nd bits
const int HAND_STATE_START_BIT = 2; // 3rd and 4th bits
const int IS_FACESHIFT_CONNECTED = 4; // 5th bit
const int IS_CHAT_CIRCLING_ENABLED = 5;
const int IS_CHAT_CIRCLING_ENABLED = 5; // 6th bit
const int HAS_REFERENTIAL = 6; // 7th bit
static const float MAX_AVATAR_SCALE = 1000.f;
static const float MIN_AVATAR_SCALE = .005f;
@ -141,8 +143,8 @@ public:
const QUuid& getSessionUUID() { return _sessionUUID; }
const glm::vec3& getPosition() const { return _position; }
void setPosition(const glm::vec3 position) { _position = position; }
const glm::vec3& getPosition();
void setPosition(const glm::vec3 position, bool overideReferential = false);
glm::vec3 getHandPosition() const;
void setHandPosition(const glm::vec3& handPosition);
@ -165,8 +167,8 @@ public:
float getBodyRoll() const { return _bodyRoll; }
void setBodyRoll(float bodyRoll) { _bodyRoll = bodyRoll; }
glm::quat getOrientation() const { return glm::quat(glm::radians(glm::vec3(_bodyPitch, _bodyYaw, _bodyRoll))); }
void setOrientation(const glm::quat& orientation);
glm::quat getOrientation() const;
void setOrientation(const glm::quat& orientation, bool overideReferential = false);
glm::quat getHeadOrientation() const { return _headData->getOrientation(); }
void setHeadOrientation(const glm::quat& orientation) { _headData->setOrientation(orientation); }
@ -188,9 +190,9 @@ public:
void setAudioAverageLoudness(float value) { _headData->setAudioAverageLoudness(value); }
// Scale
float getTargetScale() const { return _targetScale; }
void setTargetScale(float targetScale) { _targetScale = targetScale; }
void setClampedTargetScale(float targetScale);
float getTargetScale() const;
void setTargetScale(float targetScale, bool overideReferential = false);
void setClampedTargetScale(float targetScale, bool overideReferential = false);
// Hand State
Q_INVOKABLE void setHandState(char s) { _handState = s; }
@ -280,6 +282,8 @@ public:
QElapsedTimer& getLastUpdateTimer() { return _lastUpdateTimer; }
virtual float getBoundingRadius() const { return 1.f; }
const Referential* getReferential() const { return _referential; }
public slots:
void sendIdentityPacket();
@ -287,10 +291,14 @@ public slots:
void setBillboardFromNetworkReply();
void setJointMappingsFromNetworkReply();
void setSessionUUID(const QUuid& id) { _sessionUUID = id; }
bool hasReferential();
protected:
QUuid _sessionUUID;
glm::vec3 _position;
glm::vec3 _handPosition;
Referential* _referential;
// Body rotation
float _bodyYaw; // degrees
@ -340,6 +348,7 @@ protected:
/// Loads the joint indices, names from the FST file (if any)
virtual void updateJointMappings();
void changeReferential(Referential* ref);
private:
// privatize the copy constructor and assignment operator so they cannot be called

View file

@ -0,0 +1,113 @@
//
// Referential.cpp
//
//
// Created by Clement on 7/30/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <SharedUtil.h>
#include "AvatarData.h"
#include "Referential.h"
Referential::Referential(Type type, AvatarData* avatar) :
_type(type),
_version(0),
_isValid(true),
_avatar(avatar)
{
if (_avatar == NULL) {
_isValid = false;
return;
}
if (_avatar->hasReferential()) {
_version = _avatar->getReferential()->version() + 1;
}
}
Referential::Referential(const unsigned char*& sourceBuffer, AvatarData* avatar) :
_isValid(false),
_avatar(avatar)
{
// Since we can't return how many byte have been read
// We take a reference to the pointer as argument and increment the pointer ouself.
sourceBuffer += unpackReferential(sourceBuffer);
// The actual unpacking to the right referential type happens in Avatar::simulate()
// If subclassed, make sure to add a case there to unpack the new referential type correctly
}
Referential::~Referential() {
}
int Referential::packReferential(unsigned char* destinationBuffer) const {
const unsigned char* startPosition = destinationBuffer;
destinationBuffer += pack(destinationBuffer);
unsigned char* sizePosition = destinationBuffer++; // Save a spot for the extra data size
char size = packExtraData(destinationBuffer);
*sizePosition = size; // write extra data size in saved spot here
destinationBuffer += size;
return destinationBuffer - startPosition;
}
int Referential::unpackReferential(const unsigned char* sourceBuffer) {
const unsigned char* startPosition = sourceBuffer;
sourceBuffer += unpack(sourceBuffer);
char expectedSize = *sourceBuffer++;
char bytesRead = unpackExtraData(sourceBuffer, expectedSize);
_isValid = (bytesRead == expectedSize);
if (!_isValid) {
// Will occur if the new instance unpacking is of the wrong type
qDebug() << "[ERROR] Referential extra data overflow";
}
sourceBuffer += expectedSize;
return sourceBuffer - startPosition;
}
int Referential::pack(unsigned char* destinationBuffer) const {
unsigned char* startPosition = destinationBuffer;
*destinationBuffer++ = (unsigned char)_type;
memcpy(destinationBuffer, &_version, sizeof(_version));
destinationBuffer += sizeof(_version);
destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, _translation, 0);
destinationBuffer += packOrientationQuatToBytes(destinationBuffer, _rotation);
destinationBuffer += packFloatScalarToSignedTwoByteFixed(destinationBuffer, _scale, 0);
return destinationBuffer - startPosition;
}
int Referential::unpack(const unsigned char* sourceBuffer) {
const unsigned char* startPosition = sourceBuffer;
_type = (Type)*sourceBuffer++;
if (_type < 0 || _type >= NUM_TYPE) {
_type = UNKNOWN;
}
memcpy(&_version, sourceBuffer, sizeof(_version));
sourceBuffer += sizeof(_version);
sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, _translation, 0);
sourceBuffer += unpackOrientationQuatFromBytes(sourceBuffer, _rotation);
sourceBuffer += unpackFloatScalarFromSignedTwoByteFixed((const int16_t*) sourceBuffer, &_scale, 0);
return sourceBuffer - startPosition;
}
int Referential::packExtraData(unsigned char *destinationBuffer) const {
// Since we can't interpret that data, just store it in a buffer for later use.
memcpy(destinationBuffer, _extraDataBuffer.data(), _extraDataBuffer.size());
return _extraDataBuffer.size();
}
int Referential::unpackExtraData(const unsigned char* sourceBuffer, int size) {
_extraDataBuffer.clear();
_extraDataBuffer.setRawData(reinterpret_cast<const char*>(sourceBuffer), size);
return size;
}

View file

@ -0,0 +1,75 @@
//
// Referential.h
//
//
// Created by Clement on 7/30/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Referential_h
#define hifi_Referential_h
#include <glm/gtx/quaternion.hpp>
#include <glm/vec3.hpp>
class AvatarData;
/// Stores and enforce the relative position of an avatar to a given referential (ie. model, joint, ...)
class Referential {
public:
enum Type {
UNKNOWN,
MODEL,
JOINT,
AVATAR,
NUM_TYPE
};
Referential(const unsigned char*& sourceBuffer, AvatarData* avatar);
virtual ~Referential();
Type type() const { return _type; }
quint8 version() const { return _version; }
bool isValid() const { return _isValid; }
bool hasExtraData() const { return !_extraDataBuffer.isEmpty(); }
glm::vec3 getTranslation() const { return _translation; }
glm::quat getRotation() const { return _rotation; }
float getScale() const {return _scale; }
QByteArray getExtraData() const { return _extraDataBuffer; }
virtual void update() {}
int packReferential(unsigned char* destinationBuffer) const;
int unpackReferential(const unsigned char* sourceBuffer);
protected:
Referential(Type type, AvatarData* avatar);
// packs the base class data
int pack(unsigned char* destinationBuffer) const;
int unpack(const unsigned char* sourceBuffer);
// virtual functions that pack fthe extra data of subclasses (needs to be reimplemented in subclass)
virtual int packExtraData(unsigned char* destinationBuffer) const;
virtual int unpackExtraData(const unsigned char* sourceBuffer, int size);
Type _type;
quint8 _version;
bool _isValid;
AvatarData* _avatar;
QByteArray _extraDataBuffer;
glm::vec3 _refPosition;
glm::quat _refRotation;
float _refScale;
glm::vec3 _translation;
glm::quat _rotation;
float _scale;
};
#endif // hifi_Referential_h

View file

@ -242,6 +242,30 @@ bool Attribute::metavoxelRootsEqual(const MetavoxelNode& firstRoot, const Metavo
return firstRoot.deepEquals(this, secondRoot, minimum, size, lod);
}
MetavoxelNode* Attribute::expandMetavoxelRoot(const MetavoxelNode& root) {
AttributePointer attribute(this);
MetavoxelNode* newParent = new MetavoxelNode(attribute);
for (int i = 0; i < MetavoxelNode::CHILD_COUNT; i++) {
MetavoxelNode* newChild = new MetavoxelNode(attribute);
newParent->setChild(i, newChild);
int index = MetavoxelNode::getOppositeChildIndex(i);
if (root.isLeaf()) {
newChild->setChild(index, new MetavoxelNode(root.getAttributeValue(attribute)));
} else {
MetavoxelNode* grandchild = root.getChild(i);
grandchild->incrementReferenceCount();
newChild->setChild(index, grandchild);
}
for (int j = 1; j < MetavoxelNode::CHILD_COUNT; j++) {
MetavoxelNode* newGrandchild = new MetavoxelNode(attribute);
newChild->setChild((index + j) % MetavoxelNode::CHILD_COUNT, newGrandchild);
}
newChild->mergeChildren(attribute);
}
newParent->mergeChildren(attribute);
return newParent;
}
FloatAttribute::FloatAttribute(const QString& name, float defaultValue) :
SimpleInlineAttribute<float>(name, defaultValue) {
}
@ -559,9 +583,6 @@ bool HeightfieldAttribute::merge(void*& parent, void* children[], bool postRead)
}
const QByteArray& childContents = child->getContents();
int childSize = glm::sqrt((float)childContents.size());
if (childSize != size) {
continue; // TODO: handle differently-sized children
}
const int INDEX_MASK = 1;
int xIndex = i & INDEX_MASK;
const int Y_SHIFT = 1;
@ -576,13 +597,33 @@ bool HeightfieldAttribute::merge(void*& parent, void* children[], bool postRead)
char* dest = contents.data() + (zIndex * halfSize * size) + (xIndex * halfSize);
uchar* src = (uchar*)childContents.data();
int childSizePlusOne = childSize + 1;
for (int z = 0; z < halfSize; z++) {
for (char* end = dest + halfSize; dest != end; src += 2) {
int max = qMax(qMax(src[0], src[1]), qMax(src[childSize], src[childSizePlusOne]));
*dest++ = (max == 0) ? 0 : (yOffset + (max >> 1));
if (childSize == size) {
// simple case: one destination value for four child values
for (int z = 0; z < halfSize; z++) {
for (char* end = dest + halfSize; dest != end; src += 2) {
int max = qMax(qMax(src[0], src[1]), qMax(src[childSize], src[childSizePlusOne]));
*dest++ = (max == 0) ? 0 : (yOffset + (max >> 1));
}
dest += halfSize;
src += childSize;
}
} else {
// more complex: N destination values for four child values
int halfChildSize = childSize / 2;
int destPerSrc = size / childSize;
for (int z = 0; z < halfChildSize; z++) {
for (uchar* end = src + childSize; src != end; src += 2) {
int max = qMax(qMax(src[0], src[1]), qMax(src[childSize], src[childSizePlusOne]));
memset(dest, (max == 0) ? 0 : (yOffset + (max >> 1)), destPerSrc);
dest += destPerSrc;
}
dest += halfSize;
for (int j = 1; j < destPerSrc; j++) {
memcpy(dest, dest - size, halfSize);
dest += size;
}
src += childSize;
}
dest += halfSize;
src += childSize;
}
}
*(HeightfieldDataPointer*)&parent = HeightfieldDataPointer(new HeightfieldData(contents));
@ -638,9 +679,6 @@ bool HeightfieldColorAttribute::merge(void*& parent, void* children[], bool post
}
const QByteArray& childContents = child->getContents();
int childSize = glm::sqrt(childContents.size() / (float)BYTES_PER_PIXEL);
if (childSize != size) {
continue; // TODO: handle differently-sized children
}
const int INDEX_MASK = 1;
int xIndex = i & INDEX_MASK;
const int Y_SHIFT = 1;
@ -653,7 +691,8 @@ bool HeightfieldColorAttribute::merge(void*& parent, void* children[], bool post
char* dest = contents.data() + ((zIndex * halfSize * size) + (xIndex * halfSize)) * BYTES_PER_PIXEL;
uchar* src = (uchar*)childContents.data();
int childStride = childSize * BYTES_PER_PIXEL;
int halfStride = halfSize * BYTES_PER_PIXEL;
int stride = size * BYTES_PER_PIXEL;
int halfStride = stride / 2;
int childStep = 2 * BYTES_PER_PIXEL;
int redOffset3 = childStride + BYTES_PER_PIXEL;
int greenOffset1 = BYTES_PER_PIXEL + 1;
@ -662,14 +701,38 @@ bool HeightfieldColorAttribute::merge(void*& parent, void* children[], bool post
int blueOffset1 = BYTES_PER_PIXEL + 2;
int blueOffset2 = childStride + 2;
int blueOffset3 = childStride + BYTES_PER_PIXEL + 2;
for (int z = 0; z < halfSize; z++) {
for (char* end = dest + halfSize * BYTES_PER_PIXEL; dest != end; src += childStep) {
*dest++ = ((int)src[0] + (int)src[BYTES_PER_PIXEL] + (int)src[childStride] + (int)src[redOffset3]) >> 2;
*dest++ = ((int)src[1] + (int)src[greenOffset1] + (int)src[greenOffset2] + (int)src[greenOffset3]) >> 2;
*dest++ = ((int)src[2] + (int)src[blueOffset1] + (int)src[blueOffset2] + (int)src[blueOffset3]) >> 2;
if (childSize == size) {
// simple case: one destination value for four child values
for (int z = 0; z < halfSize; z++) {
for (char* end = dest + halfSize * BYTES_PER_PIXEL; dest != end; src += childStep) {
*dest++ = ((int)src[0] + (int)src[BYTES_PER_PIXEL] + (int)src[childStride] + (int)src[redOffset3]) >> 2;
*dest++ = ((int)src[1] + (int)src[greenOffset1] + (int)src[greenOffset2] + (int)src[greenOffset3]) >> 2;
*dest++ = ((int)src[2] + (int)src[blueOffset1] + (int)src[blueOffset2] + (int)src[blueOffset3]) >> 2;
}
dest += halfStride;
src += childStride;
}
} else {
// more complex: N destination values for four child values
int halfChildSize = childSize / 2;
int destPerSrc = size / childSize;
for (int z = 0; z < halfChildSize; z++) {
for (uchar* end = src + childSize * BYTES_PER_PIXEL; src != end; src += childStep) {
*dest++ = ((int)src[0] + (int)src[BYTES_PER_PIXEL] + (int)src[childStride] + (int)src[redOffset3]) >> 2;
*dest++ = ((int)src[1] + (int)src[greenOffset1] + (int)src[greenOffset2] + (int)src[greenOffset3]) >> 2;
*dest++ = ((int)src[2] + (int)src[blueOffset1] + (int)src[blueOffset2] + (int)src[blueOffset3]) >> 2;
for (int j = 1; j < destPerSrc; j++) {
memcpy(dest, dest - BYTES_PER_PIXEL, BYTES_PER_PIXEL);
dest += BYTES_PER_PIXEL;
}
}
dest += halfStride;
for (int j = 1; j < destPerSrc; j++) {
memcpy(dest, dest - stride, halfStride);
dest += stride;
}
src += childStride;
}
dest += halfStride;
src += childStride;
}
}
*(HeightfieldDataPointer*)&parent = HeightfieldDataPointer(new HeightfieldData(contents));
@ -770,6 +833,29 @@ bool SharedObjectSetAttribute::deepEqual(void* first, void* second) const {
return setsEqual(decodeInline<SharedObjectSet>(first), decodeInline<SharedObjectSet>(second));
}
MetavoxelNode* SharedObjectSetAttribute::expandMetavoxelRoot(const MetavoxelNode& root) {
AttributePointer attribute(this);
MetavoxelNode* newParent = new MetavoxelNode(attribute);
for (int i = 0; i < MetavoxelNode::CHILD_COUNT; i++) {
MetavoxelNode* newChild = new MetavoxelNode(root.getAttributeValue(attribute));
newParent->setChild(i, newChild);
if (root.isLeaf()) {
continue;
}
MetavoxelNode* grandchild = root.getChild(i);
grandchild->incrementReferenceCount();
int index = MetavoxelNode::getOppositeChildIndex(i);
newChild->setChild(index, grandchild);
for (int j = 1; j < MetavoxelNode::CHILD_COUNT; j++) {
MetavoxelNode* newGrandchild = new MetavoxelNode(attribute);
newChild->setChild((index + j) % MetavoxelNode::CHILD_COUNT, newGrandchild);
}
newChild->mergeChildren(attribute);
}
newParent->mergeChildren(attribute);
return newParent;
}
bool SharedObjectSetAttribute::merge(void*& parent, void* children[], bool postRead) const {
for (int i = 0; i < MERGE_COUNT; i++) {
if (!decodeInline<SharedObjectSet>(children[i]).isEmpty()) {

View file

@ -238,6 +238,10 @@ public:
virtual bool metavoxelRootsEqual(const MetavoxelNode& firstRoot, const MetavoxelNode& secondRoot,
const glm::vec3& minimum, float size, const MetavoxelLOD& lod);
/// Expands the specified root, doubling its size in each dimension.
/// \return a new node representing the result
virtual MetavoxelNode* expandMetavoxelRoot(const MetavoxelNode& root);
/// Merges the value of a parent and its children.
/// \param postRead whether or not the merge is happening after a read
/// \return whether or not the children and parent values are all equal
@ -511,6 +515,8 @@ public:
virtual bool deepEqual(void* first, void* second) const;
virtual MetavoxelNode* expandMetavoxelRoot(const MetavoxelNode& root);
virtual bool merge(void*& parent, void* children[], bool postRead = false) const;
virtual AttributeValue inherit(const AttributeValue& parentValue) const;

View file

@ -512,33 +512,11 @@ void MetavoxelData::set(const glm::vec3& minimum, const MetavoxelData& data, boo
}
}
static int getOppositeIndex(int index) {
return index ^ MAXIMUM_FLAG_MASK;
}
void MetavoxelData::expand() {
for (QHash<AttributePointer, MetavoxelNode*>::iterator it = _roots.begin(); it != _roots.end(); it++) {
MetavoxelNode* newParent = new MetavoxelNode(it.key());
for (int i = 0; i < MetavoxelNode::CHILD_COUNT; i++) {
MetavoxelNode* newChild = new MetavoxelNode(it.key());
newParent->setChild(i, newChild);
int index = getOppositeIndex(i);
if (it.value()->isLeaf()) {
newChild->setChild(index, new MetavoxelNode(it.value()->getAttributeValue(it.key())));
} else {
MetavoxelNode* grandchild = it.value()->getChild(i);
grandchild->incrementReferenceCount();
newChild->setChild(index, grandchild);
}
for (int j = 1; j < MetavoxelNode::CHILD_COUNT; j++) {
MetavoxelNode* newGrandchild = new MetavoxelNode(it.key());
newChild->setChild((index + j) % MetavoxelNode::CHILD_COUNT, newGrandchild);
}
newChild->mergeChildren(it.key());
}
newParent->mergeChildren(it.key());
MetavoxelNode* newNode = it.key()->expandMetavoxelRoot(*it.value());
it.value()->decrementReferenceCount(it.key());
it.value() = newParent;
it.value() = newNode;
}
_size *= 2.0f;
}
@ -823,6 +801,10 @@ void MetavoxelStreamState::setMinimum(const glm::vec3& lastMinimum, int index) {
minimum = getNextMinimum(lastMinimum, size, index);
}
int MetavoxelNode::getOppositeChildIndex(int index) {
return index ^ MAXIMUM_FLAG_MASK;
}
MetavoxelNode::MetavoxelNode(const AttributeValue& attributeValue, const MetavoxelNode* copyChildren) :
_referenceCount(1) {

View file

@ -197,6 +197,8 @@ public:
static const int CHILD_COUNT = 8;
static int getOppositeChildIndex(int index);
MetavoxelNode(const AttributeValue& attributeValue, const MetavoxelNode* copyChildren = NULL);
MetavoxelNode(const AttributePointer& attribute, const MetavoxelNode* copy);

View file

@ -179,7 +179,6 @@ void ModelItemIDfromScriptValue(const QScriptValue &object, ModelItemID& propert
/// ModelItem class - this is the actual model item class.
class ModelItem {
public:
ModelItem();

View file

@ -15,6 +15,8 @@
#include <Octree.h>
#include "ModelTreeElement.h"
class Model;
class NewlyCreatedModelHook {
public:
virtual void modelCreated(const ModelItem& newModel, const SharedNodePointer& senderNode) = 0;
@ -23,6 +25,7 @@ public:
class ModelItemFBXService {
public:
virtual const FBXGeometry* getGeometryForModel(const ModelItem& modelItem) = 0;
virtual const Model* getModelForModelItem(const ModelItem& modelItem) = 0;
};
class ModelTree : public Octree {
@ -80,6 +83,7 @@ public:
void processEraseMessage(const QByteArray& dataByteArray, const SharedNodePointer& sourceNode);
void handleAddModelResponse(const QByteArray& packet);
ModelItemFBXService* getFBXService() const { return _fbxService; }
void setFBXService(ModelItemFBXService* service) { _fbxService = service; }
const FBXGeometry* getGeometryForModel(const ModelItem& modelItem) {
return _fbxService ? _fbxService->getGeometryForModel(modelItem) : NULL;

View file

@ -167,7 +167,7 @@ bool oneAtBit(unsigned char byte, int bitIndex) {
}
void setAtBit(unsigned char& byte, int bitIndex) {
byte += (1 << (7 - bitIndex));
byte |= (1 << (7 - bitIndex));
}
void clearAtBit(unsigned char& byte, int bitIndex) {
@ -176,7 +176,7 @@ void clearAtBit(unsigned char& byte, int bitIndex) {
}
}
int getSemiNibbleAt(unsigned char& byte, int bitIndex) {
int getSemiNibbleAt(unsigned char byte, int bitIndex) {
return (byte >> (6 - bitIndex) & 3); // semi-nibbles store 00, 01, 10, or 11
}
@ -207,7 +207,7 @@ bool isBetween(int64_t value, int64_t max, int64_t min) {
void setSemiNibbleAt(unsigned char& byte, int bitIndex, int value) {
//assert(value <= 3 && value >= 0);
byte += ((value & 3) << (6 - bitIndex)); // semi-nibbles store 00, 01, 10, or 11
byte |= ((value & 3) << (6 - bitIndex)); // semi-nibbles store 00, 01, 10, or 11
}
bool isInEnvironment(const char* environment) {
@ -496,7 +496,7 @@ int packFloatScalarToSignedTwoByteFixed(unsigned char* buffer, float scalar, int
return sizeof(uint16_t);
}
int unpackFloatScalarFromSignedTwoByteFixed(int16_t* byteFixedPointer, float* destinationPointer, int radix) {
int unpackFloatScalarFromSignedTwoByteFixed(const int16_t* byteFixedPointer, float* destinationPointer, int radix) {
*destinationPointer = *byteFixedPointer / (float)(1 << radix);
return sizeof(int16_t);
}

View file

@ -82,7 +82,7 @@ int numberOfOnes(unsigned char byte);
bool oneAtBit(unsigned char byte, int bitIndex);
void setAtBit(unsigned char& byte, int bitIndex);
void clearAtBit(unsigned char& byte, int bitIndex);
int getSemiNibbleAt(unsigned char& byte, int bitIndex);
int getSemiNibbleAt(unsigned char byte, int bitIndex);
void setSemiNibbleAt(unsigned char& byte, int bitIndex, int value);
int getNthBit(unsigned char byte, int ordinal); /// determines the bit placement 0-7 of the ordinal set bit