diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index 25c917f788..34aef00950 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -74,6 +74,10 @@ const glm::vec3 START_LOCATION(4.f, 0.f, 5.f); // Where one's own agent begin const int IDLE_SIMULATE_MSECS = 16; // How often should call simulate and other stuff // in the idle loop? (60 FPS is default) +const int STARTUP_JITTER_SAMPLES = PACKET_LENGTH_SAMPLES_PER_CHANNEL / 2; + // Startup optimistically with small jitter buffer that + // will start playback on the second received audio packet. + // customized canvas that simply forwards requests/events to the singleton application class GLCanvas : public QGLWidget { protected: @@ -182,7 +186,7 @@ Application::Application(int& argc, char** argv, timeval &startup_time) : _oculusProgram(0), _oculusDistortionScale(1.25), #ifndef _WIN32 - _audio(&_audioScope), + _audio(&_audioScope, STARTUP_JITTER_SAMPLES), #endif _stopNetworkReceiveThread(false), _packetCount(0), @@ -564,6 +568,7 @@ void Application::keyPressEvent(QKeyEvent* event) { case Qt::Key_Space: resetSensors(); + _audio.reset(); break; case Qt::Key_G: @@ -961,7 +966,13 @@ void Application::editPreferences() { QDoubleSpinBox* leanScale = new QDoubleSpinBox(); leanScale->setValue(_myAvatar.getLeanScale()); form->addRow("Lean Scale:", leanScale); - + + QSpinBox* audioJitterBufferSamples = new QSpinBox(); + audioJitterBufferSamples->setMaximum(10000); + audioJitterBufferSamples->setMinimum(-10000); + audioJitterBufferSamples->setValue(_audioJitterBufferSamples); + form->addRow("Audio Jitter Buffer Samples (0 for automatic):", audioJitterBufferSamples); + QDialogButtonBox* buttons = new QDialogButtonBox(QDialogButtonBox::Ok | QDialogButtonBox::Cancel); dialog.connect(buttons, SIGNAL(accepted()), SLOT(accept())); dialog.connect(buttons, SIGNAL(rejected()), SLOT(reject())); @@ -976,6 +987,10 @@ void Application::editPreferences() { _audio.setIsCancellingEcho( audioEchoCancellation->isChecked() ); _headCameraPitchYawScale = headCameraPitchYawScale->value(); _myAvatar.setLeanScale(leanScale->value()); + _audioJitterBufferSamples = audioJitterBufferSamples->value(); + if (!shouldDynamicallySetJitterBuffer()) { + _audio.setJitterBufferSamples(_audioJitterBufferSamples); + } } void Application::pair() { @@ -1428,6 +1443,7 @@ void Application::initMenu() { debugMenu->addAction("Wants Res-In", this, SLOT(setWantsResIn(bool)))->setCheckable(true); debugMenu->addAction("Wants Monochrome", this, SLOT(setWantsMonochrome(bool)))->setCheckable(true); debugMenu->addAction("Wants View Delta Sending", this, SLOT(setWantsDelta(bool)))->setCheckable(true); + (_shouldLowPassFilter = debugMenu->addAction("Test: LowPass filter"))->setCheckable(true); debugMenu->addAction("Wants Occlusion Culling", this, SLOT(setWantsOcclusionCulling(bool)))->setCheckable(true); QMenu* settingsMenu = menuBar->addMenu("Settings"); @@ -1508,6 +1524,12 @@ void Application::init() { gettimeofday(&_lastTimeIdle, NULL); loadSettings(); + if (!shouldDynamicallySetJitterBuffer()) { + _audio.setJitterBufferSamples(_audioJitterBufferSamples); + } + + printLog("Loaded settings.\n"); + sendAvatarVoxelURLMessage(_myAvatar.getVoxels()->getVoxelURL()); } @@ -2763,7 +2785,7 @@ void Application::loadSettings(QSettings* settings) { } _headCameraPitchYawScale = loadSetting(settings, "headCameraPitchYawScale", 0.0f); - + _audioJitterBufferSamples = loadSetting(settings, "audioJitterBufferSamples", 0); settings->beginGroup("View Frustum Offset Camera"); // in case settings is corrupt or missing loadSetting() will check for NaN _viewFrustumOffsetYaw = loadSetting(settings, "viewFrustumOffsetYaw" , 0.0f); @@ -2787,6 +2809,7 @@ void Application::saveSettings(QSettings* settings) { } settings->setValue("headCameraPitchYawScale", _headCameraPitchYawScale); + settings->setValue("audioJitterBufferSamples", _audioJitterBufferSamples); settings->beginGroup("View Frustum Offset Camera"); settings->setValue("viewFrustumOffsetYaw", _viewFrustumOffsetYaw); settings->setValue("viewFrustumOffsetPitch", _viewFrustumOffsetPitch); diff --git a/interface/src/Application.h b/interface/src/Application.h index 63d8689751..f84ff6c20a 100644 --- a/interface/src/Application.h +++ b/interface/src/Application.h @@ -84,6 +84,9 @@ public: SerialInterface* getSerialHeadSensor() { return &_serialHeadSensor; } Webcam* getWebcam() { return &_webcam; } bool shouldEchoAudio() { return _echoAudioMode->isChecked(); } + bool shouldLowPassFilter() { return _shouldLowPassFilter->isChecked(); } + + bool shouldDynamicallySetJitterBuffer() { return _audioJitterBufferSamples == 0; } QNetworkAccessManager* getNetworkAccessManager() { return _networkAccessManager; } @@ -186,6 +189,7 @@ private: QAction* _lookingInMirror; // Are we currently rendering one's own head as if in mirror? QAction* _echoAudioMode; // Are we asking the mixer to echo back our audio? + QAction* _shouldLowPassFilter; // Use test lowpass filter QAction* _gyroLook; // Whether to allow the gyro data from head to move your view QAction* _renderAvatarBalls; // Switch between voxels and joints/balls for avatar render QAction* _mouseLook; // Whether the have the mouse near edge of screen move your view @@ -270,6 +274,8 @@ private: int _headMouseX, _headMouseY; float _headCameraPitchYawScale; + int _audioJitterBufferSamples; // Number of extra samples to wait before starting audio playback + HandControl _handControl; int _mouseX; diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index 533b6e9e3b..fd72826dfb 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -26,18 +26,14 @@ #include "Util.h" #include "Log.h" +// Uncomment the following definition to test audio device latency by copying output to input +//#define TEST_AUDIO_LOOPBACK +//#define SHOW_AUDIO_DEBUG + #define VISUALIZE_ECHO_CANCELLATION -static const int NUM_AUDIO_CHANNELS = 2; - -static const int PACKET_LENGTH_BYTES = 1024; -static const int PACKET_LENGTH_BYTES_PER_CHANNEL = PACKET_LENGTH_BYTES / 2; -static const int PACKET_LENGTH_SAMPLES = PACKET_LENGTH_BYTES / sizeof(int16_t); -static const int PACKET_LENGTH_SAMPLES_PER_CHANNEL = PACKET_LENGTH_SAMPLES / 2; - static const int PHASE_DELAY_AT_90 = 20; static const float AMPLITUDE_RATIO_AT_90 = 0.5; - static const int MIN_FLANGE_EFFECT_THRESHOLD = 600; static const int MAX_FLANGE_EFFECT_THRESHOLD = 1500; static const float FLANGE_BASE_RATE = 4; @@ -156,31 +152,53 @@ inline void Audio::performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* o AudioRingBuffer* ringBuffer = &_ringBuffer; - // if we've been reset, and there isn't any new packets yet - // just play some silence + // if there is anything in the ring buffer, decide what to do: if (ringBuffer->getEndOfLastWrite()) { - - if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES) { -// printLog("Held back, buffer has %d of %d samples required.\n", -// ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES); - } else if (ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES) { + if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < (PACKET_LENGTH_SAMPLES + _jitterBufferSamples * (ringBuffer->isStereo() ? 2 : 1))) { + // + // If not enough audio has arrived to start playback, keep waiting + // +#ifdef SHOW_AUDIO_DEBUG + printLog("%i,%i,%i,%i\n", + _packetsReceivedThisPlayback, + ringBuffer->diffLastWriteNextOutput(), + PACKET_LENGTH_SAMPLES, + _jitterBufferSamples); +#endif + } else if (ringBuffer->isStarted() && (ringBuffer->diffLastWriteNextOutput() + < PACKET_LENGTH_SAMPLES * (ringBuffer->isStereo() ? 2 : 1))) { + // + // If we have started and now have run out of audio to send to the audio device, + // this means we've starved and should restart. + // ringBuffer->setStarted(false); _numStarves++; _packetsReceivedThisPlayback = 0; - - // printLog("Starved #%d\n", starve_counter); - _wasStarved = 10; // Frames to render the indication that the system was starved. + _wasStarved = 10; // Frames for which to render the indication that the system was starved. +#ifdef SHOW_AUDIO_DEBUG + printLog("Starved, remaining samples = %.0f\n", + ringBuffer->diffLastWriteNextOutput()); +#endif + } else { + // + // We are either already playing back, or we have enough audio to start playing back. + // if (!ringBuffer->isStarted()) { ringBuffer->setStarted(true); - // printLog("starting playback %3.1f msecs delayed \n", (usecTimestampNow() - usecTimestamp(&firstPlaybackTimer))/1000.0); - } else { - // printLog("pushing buffer\n"); +#ifdef SHOW_AUDIO_DEBUG + printLog("starting playback %0.1f msecs delayed, jitter = %d, pkts recvd: %d \n", + (usecTimestampNow() - usecTimestamp(&_firstPacketReceivedTime))/1000.0, + _jitterBufferSamples, + _packetsReceivedThisPlayback); +#endif } + + // // play whatever we have in the audio buffer - + // // if we haven't fired off the flange effect, check if we should // TODO: lastMeasuredHeadYaw is now relative to body - check if this still works. @@ -241,9 +259,13 @@ inline void Audio::performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* o } } } - +#ifndef TEST_AUDIO_LOOPBACK outputLeft[s] = leftSample; outputRight[s] = rightSample; +#else + outputLeft[s] = inputLeft[s]; + outputRight[s] = inputLeft[s]; +#endif } ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES); @@ -300,22 +322,25 @@ static void outputPortAudioError(PaError error) { } } -Audio::Audio(Oscilloscope* scope) : +void Audio::reset() { + _packetsReceivedThisPlayback = 0; + _ringBuffer.reset(); +} + +Audio::Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples) : _stream(NULL), _ringBuffer(true), _scope(scope), _averagedLatency(0.0), _measuredJitter(0), -// _jitterBufferLengthMsecs(12.0), -// _jitterBufferSamples(_jitterBufferLengthMsecs * -// NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0)), + _jitterBufferSamples(initialJitterBufferSamples), _wasStarved(0), _numStarves(0), _lastInputLoudness(0), _lastVelocity(0), _lastAcceleration(0), _totalPacketsReceived(0), - _firstPlaybackTime(), + _firstPacketReceivedTime(), _packetsReceivedThisPlayback(0), _isCancellingEcho(false), _echoDelay(0), @@ -332,15 +357,37 @@ Audio::Audio(Oscilloscope* scope) : _flangeWeight(0.0f) { outputPortAudioError(Pa_Initialize()); - outputPortAudioError(Pa_OpenDefaultStream(&_stream, - 2, - 2, - (paInt16 | paNonInterleaved), - SAMPLE_RATE, - BUFFER_LENGTH_SAMPLES_PER_CHANNEL, - audioCallback, - (void*) this)); + + // NOTE: Portaudio documentation is unclear as to whether it is safe to specify the + // number of frames per buffer explicitly versus setting this value to zero. + // Possible source of latency that we need to investigate further. + // + unsigned long FRAMES_PER_BUFFER = BUFFER_LENGTH_SAMPLES_PER_CHANNEL; + + // Manually initialize the portaudio stream to ask for minimum latency + PaStreamParameters inputParameters, outputParameters; + + inputParameters.device = Pa_GetDefaultInputDevice(); + inputParameters.channelCount = 2; // Stereo input + inputParameters.sampleFormat = (paInt16 | paNonInterleaved); + inputParameters.suggestedLatency = Pa_GetDeviceInfo( inputParameters.device )->defaultLowInputLatency; + inputParameters.hostApiSpecificStreamInfo = NULL; + outputParameters.device = Pa_GetDefaultOutputDevice(); + outputParameters.channelCount = 2; // Stereo output + outputParameters.sampleFormat = (paInt16 | paNonInterleaved); + outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultLowOutputLatency; + outputParameters.hostApiSpecificStreamInfo = NULL; + + outputPortAudioError(Pa_OpenStream(&_stream, + &inputParameters, + &outputParameters, + SAMPLE_RATE, + FRAMES_PER_BUFFER, + paNoFlag, + audioCallback, + (void*) this)); + if (! _stream) { return; } @@ -381,6 +428,15 @@ Audio::Audio(Oscilloscope* scope) : // start the stream now that sources are good to go outputPortAudioError(Pa_StartStream(_stream)); + + // Uncomment these lines to see the system-reported latency + //printLog("Default low input, output latency (secs): %0.4f, %0.4f\n", + // Pa_GetDeviceInfo(Pa_GetDefaultInputDevice())->defaultLowInputLatency, + // Pa_GetDeviceInfo(Pa_GetDefaultOutputDevice())->defaultLowOutputLatency); + + const PaStreamInfo* streamInfo = Pa_GetStreamInfo(_stream); + printLog("Started audio with reported latency msecs In/Out: %.0f, %.0f\n", streamInfo->inputLatency * 1000.f, + streamInfo->outputLatency * 1000.f); gettimeofday(&_lastReceiveTime, NULL); } @@ -399,6 +455,7 @@ Audio::~Audio() { void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes) { const int NUM_INITIAL_PACKETS_DISCARD = 3; + const int STANDARD_DEVIATION_SAMPLE_COUNT = 500; timeval currentReceiveTime; gettimeofday(¤tReceiveTime, NULL); @@ -411,9 +468,18 @@ void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBy _stdev.addValue(timeDiff); } - if (_stdev.getSamples() > 500) { + if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) { _measuredJitter = _stdev.getStDev(); _stdev.reset(); + // Set jitter buffer to be a multiple of the measured standard deviation + const int MAX_JITTER_BUFFER_SAMPLES = RING_BUFFER_LENGTH_SAMPLES / 2; + const float NUM_STANDARD_DEVIATIONS = 3.f; + if (Application::getInstance()->shouldDynamicallySetJitterBuffer()) { + float newJitterBufferSamples = (NUM_STANDARD_DEVIATIONS * _measuredJitter) + / 1000.f + * SAMPLE_RATE; + setJitterBufferSamples(glm::clamp((int)newJitterBufferSamples, 0, MAX_JITTER_BUFFER_SAMPLES)); + } } if (!_ringBuffer.isStarted()) { @@ -421,9 +487,10 @@ void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBy } if (_packetsReceivedThisPlayback == 1) { - gettimeofday(&_firstPlaybackTime, NULL); + gettimeofday(&_firstPacketReceivedTime, NULL); } + //printf("Got audio packet %d\n", _packetsReceivedThisPlayback); _ringBuffer.parseData((unsigned char*) receivedData, PACKET_LENGTH_BYTES + sizeof(PACKET_HEADER)); _lastReceiveTime = currentReceiveTime; @@ -447,7 +514,7 @@ void Audio::render(int screenWidth, int screenHeight) { glVertex2f(currentX, topY); glVertex2f(currentX, bottomY); - for (int i = 0; i < RING_BUFFER_LENGTH_FRAMES; i++) { + for (int i = 0; i < RING_BUFFER_LENGTH_FRAMES / 2; i++) { glVertex2f(currentX, halfY); glVertex2f(currentX + frameWidth, halfY); currentX += frameWidth; @@ -500,29 +567,60 @@ void Audio::render(int screenWidth, int screenHeight) { char out[40]; sprintf(out, "%3.0f\n", _averagedLatency); - drawtext(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 10, topY - 10, 0.10, 0, 1, 0, out, 1,1,0); - //drawtext(startX + 0, topY-10, 0.08, 0, 1, 0, out, 1,1,0); + drawtext(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 10, topY - 9, 0.10, 0, 1, 0, out, 1,1,0); - // Show a Cyan bar with the most recently measured jitter stdev + // Show a red bar with the 'start' point of one frame plus the jitter buffer - int jitterPels = _measuredJitter / ((1000.0f * PACKET_LENGTH_SAMPLES / SAMPLE_RATE)) * frameWidth; - - glColor3f(0,1,1); + glColor3f(1, 0, 0); + int jitterBufferPels = (1.f + (float)getJitterBufferSamples() / (float)PACKET_LENGTH_SAMPLES_PER_CHANNEL) * frameWidth; + sprintf(out, "%.0f\n", getJitterBufferSamples() / SAMPLE_RATE * 1000.f); + drawtext(startX + jitterBufferPels - 5, topY - 9, 0.10, 0, 1, 0, out, 1, 0, 0); + sprintf(out, "j %.1f\n", _measuredJitter); + if (Application::getInstance()->shouldDynamicallySetJitterBuffer()) { + drawtext(startX + jitterBufferPels - 5, bottomY + 12, 0.10, 0, 1, 0, out, 1, 0, 0); + } else { + drawtext(startX, bottomY + 12, 0.10, 0, 1, 0, out, 1, 0, 0); + } + glBegin(GL_QUADS); - glVertex2f(startX + jitterPels - 2, topY - 2); - glVertex2f(startX + jitterPels + 2, topY - 2); - glVertex2f(startX + jitterPels + 2, bottomY + 2); - glVertex2f(startX + jitterPels - 2, bottomY + 2); + glVertex2f(startX + jitterBufferPels - 2, topY - 2); + glVertex2f(startX + jitterBufferPels + 2, topY - 2); + glVertex2f(startX + jitterBufferPels + 2, bottomY + 2); + glVertex2f(startX + jitterBufferPels - 2, bottomY + 2); glEnd(); - - sprintf(out,"%3.1f\n", _measuredJitter); - drawtext(startX + jitterPels - 5, topY-10, 0.10, 0, 1, 0, out, 0,1,1); - - sprintf(out, "%3.1fms\n", JITTER_BUFFER_LENGTH_MSECS); - drawtext(startX - 10, bottomY + 15, 0.1, 0, 1, 0, out, 1, 0, 0); + } } +// +// Very Simple LowPass filter which works by averaging a bunch of samples with a moving window +// +//#define lowpass 1 +void Audio::lowPassFilter(int16_t* inputBuffer) { + static int16_t outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL]; + for (int i = 2; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2; i++) { +#ifdef lowpass + outputBuffer[i] = (int16_t)(0.125f * (float)inputBuffer[i - 2] + + 0.25f * (float)inputBuffer[i - 1] + + 0.25f * (float)inputBuffer[i] + + 0.25f * (float)inputBuffer[i + 1] + + 0.125f * (float)inputBuffer[i + 2] ); +#else + outputBuffer[i] = (int16_t)(0.125f * -(float)inputBuffer[i - 2] + + 0.25f * -(float)inputBuffer[i - 1] + + 1.75f * (float)inputBuffer[i] + + 0.25f * -(float)inputBuffer[i + 1] + + 0.125f * -(float)inputBuffer[i + 2] ); + +#endif + } + outputBuffer[0] = inputBuffer[0]; + outputBuffer[1] = inputBuffer[1]; + outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2] = inputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2]; + outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 1] = inputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 1]; + memcpy(inputBuffer, outputBuffer, BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t)); +} + // Take a pointer to the acquired microphone input samples and add procedural sounds void Audio::addProceduralSounds(int16_t* inputBuffer, int numSamples) { const float MAX_AUDIBLE_VELOCITY = 6.0; diff --git a/interface/src/Audio.h b/interface/src/Audio.h index ad27b5e711..1e2c69616f 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -20,12 +20,20 @@ #include "Oscilloscope.h" #include "Avatar.h" +static const int NUM_AUDIO_CHANNELS = 2; + +static const int PACKET_LENGTH_BYTES = 1024; +static const int PACKET_LENGTH_BYTES_PER_CHANNEL = PACKET_LENGTH_BYTES / 2; +static const int PACKET_LENGTH_SAMPLES = PACKET_LENGTH_BYTES / sizeof(int16_t); +static const int PACKET_LENGTH_SAMPLES_PER_CHANNEL = PACKET_LENGTH_SAMPLES / 2; + class Audio { public: // initializes audio I/O - Audio(Oscilloscope* scope); + Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples); ~Audio(); + void reset(); void render(int screenWidth, int screenHeight); void addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes); @@ -34,7 +42,14 @@ public: void setLastAcceleration(glm::vec3 lastAcceleration) { _lastAcceleration = lastAcceleration; }; void setLastVelocity(glm::vec3 lastVelocity) { _lastVelocity = lastVelocity; }; - + + void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; }; + int getJitterBufferSamples() { return _jitterBufferSamples; }; + + void lowPassFilter(int16_t* inputBuffer); + + void startEchoTest(); + void renderEchoCompare(); void setIsCancellingEcho(bool enabled); bool isCancellingEcho() const; @@ -45,6 +60,7 @@ public: // The results of the analysis are written to the log. bool eventuallyAnalyzePing(); + private: PaStream* _stream; AudioRingBuffer _ringBuffer; @@ -54,15 +70,14 @@ private: timeval _lastReceiveTime; float _averagedLatency; float _measuredJitter; -// float _jitterBufferLengthMsecs; // currently unused -// short _jitterBufferSamples; // currently unsused + int16_t _jitterBufferSamples; int _wasStarved; int _numStarves; float _lastInputLoudness; glm::vec3 _lastVelocity; glm::vec3 _lastAcceleration; int _totalPacketsReceived; - timeval _firstPlaybackTime; + timeval _firstPacketReceivedTime; int _packetsReceivedThisPlayback; // Echo cancellation volatile bool _isCancellingEcho; @@ -101,6 +116,7 @@ private: // Determines round trip time of the audio system. Called from 'eventuallyAnalyzePing'. inline void analyzePing(); + // Add sounds that we want the user to not hear themselves, by adding on top of mic input signal void addProceduralSounds(int16_t* inputBuffer, int numSamples); diff --git a/interface/src/Avatar.cpp b/interface/src/Avatar.cpp index dc8e1dbc40..80cc7d0c09 100644 --- a/interface/src/Avatar.cpp +++ b/interface/src/Avatar.cpp @@ -462,7 +462,16 @@ void Avatar::simulate(float deltaTime, Transmitter* transmitter) { } // update balls - if (_balls) { _balls->simulate(deltaTime); } + if (_balls) { + _balls->moveOrigin(_position); + glm::vec3 lookAt = _head.getLookAtPosition(); + if (glm::length(lookAt) > EPSILON) { + _balls->moveOrigin(lookAt); + } else { + _balls->moveOrigin(_position); + } + _balls->simulate(deltaTime); + } // update torso rotation based on head lean _skeleton.joint[AVATAR_JOINT_TORSO].rotation = glm::quat(glm::radians(glm::vec3( @@ -997,7 +1006,6 @@ void Avatar::render(bool lookingInMirror, bool renderAvatarBalls) { // Render the balls if (_balls) { glPushMatrix(); - glTranslatef(_position.x, _position.y, _position.z); _balls->render(); glPopMatrix(); } diff --git a/interface/src/Balls.cpp b/interface/src/Balls.cpp index 5f88ff512f..49932c84ce 100644 --- a/interface/src/Balls.cpp +++ b/interface/src/Balls.cpp @@ -7,36 +7,55 @@ // A cloud of spring-mass spheres to simulate the avatar body/skin. Each ball // connects to as many as 4 neighbors, and executes motion according to a damped // spring, while responding physically to other avatars. -// +// +#include +#include "Util.h" +#include "sharedUtil.h" +#include "world.h" +#include "InterfaceConfig.h" #include "Balls.h" +const float INITIAL_AREA = 0.2f; +const float BALL_RADIUS = 0.025f; +const glm::vec3 INITIAL_COLOR(0.62f, 0.74f, 0.91f); + Balls::Balls(int numberOfBalls) { _numberOfBalls = numberOfBalls; _balls = new Ball[_numberOfBalls]; for (unsigned int i = 0; i < _numberOfBalls; ++i) { - _balls[i].position = glm::vec3(1.0 + randFloat() * 0.5, - 0.5 + randFloat() * 0.5, - 1.0 + randFloat() * 0.5); - _balls[i].radius = 0.02 + randFloat() * 0.06; + _balls[i].position = randVector() * INITIAL_AREA; + _balls[i].targetPosition = _balls[i].position; + _balls[i].velocity = glm::vec3(0, 0, 0); + _balls[i].radius = BALL_RADIUS; for (unsigned int j = 0; j < NUMBER_SPRINGS; ++j) { - _balls[i].links[j] = rand() % (numberOfBalls + 1); - if (_balls[i].links[j]-1 == i) { _balls[i].links[j] = 0; } - _balls[i].springLength[j] = 0.5; - } + _balls[i].links[j] = NULL; + } + } + _color = INITIAL_COLOR; + _origin = glm::vec3(0, 0, 0); +} + +void Balls::moveOrigin(const glm::vec3& newOrigin) { + glm::vec3 delta = newOrigin - _origin; + if (glm::length(delta) > EPSILON) { + _origin = newOrigin; + for (unsigned int i = 0; i < _numberOfBalls; ++i) { + _balls[i].targetPosition += delta; + } } } -const bool RENDER_SPRINGS = true; +const bool RENDER_SPRINGS = false; void Balls::render() { // Render Balls NOTE: This needs to become something other that GlutSpheres! - glColor3f(0.62,0.74,0.91); + glColor3fv(&_color.x); for (unsigned int i = 0; i < _numberOfBalls; ++i) { glPushMatrix(); glTranslatef(_balls[i].position.x, _balls[i].position.y, _balls[i].position.z); - glutSolidSphere(_balls[i].radius, 15, 15); + glutSolidSphere(_balls[i].radius, 8, 8); glPopMatrix(); } @@ -71,18 +90,22 @@ void Balls::simulate(float deltaTime) { // Move particles _balls[i].position += _balls[i].velocity * deltaTime; + _balls[i].targetPosition += _balls[i].velocity * deltaTime; // Drag: decay velocity _balls[i].velocity *= (1.f - CONSTANT_VELOCITY_DAMPING * deltaTime); // Add noise - _balls[i].velocity += glm::vec3((randFloat() - 0.5) * NOISE_SCALE, - (randFloat() - 0.5) * NOISE_SCALE, - (randFloat() - 0.5) * NOISE_SCALE); + _balls[i].velocity += randVector() * NOISE_SCALE; + // Approach target position + for (unsigned int i = 0; i < _numberOfBalls; ++i) { + _balls[i].position += randFloat() * deltaTime * (_balls[i].targetPosition - _balls[i].position); + } + // Spring Force - + /* for (unsigned int j = 0; j < NUMBER_SPRINGS; ++j) { if(_balls[i].links[j] > 0) { float separation = glm::distance(_balls[i].position, @@ -96,7 +119,7 @@ void Balls::simulate(float deltaTime) { //_balls[i].velocity *= (1.f - SPRING_DAMPING*deltaTime); } - } + } */ diff --git a/interface/src/Balls.h b/interface/src/Balls.h index 653854a0c6..440f340307 100644 --- a/interface/src/Balls.h +++ b/interface/src/Balls.h @@ -9,12 +9,6 @@ #ifndef hifi_Balls_h #define hifi_Balls_h -#include -#include "Util.h" -#include "world.h" -#include "InterfaceConfig.h" - - const int NUMBER_SPRINGS = 4; class Balls { @@ -24,14 +18,19 @@ public: void simulate(float deltaTime); void render(); + void setColor(const glm::vec3& c) { _color = c; }; + void moveOrigin(const glm::vec3& newOrigin); + private: struct Ball { - glm::vec3 position, velocity; + glm::vec3 position, targetPosition, velocity; int links[NUMBER_SPRINGS]; float springLength[NUMBER_SPRINGS]; float radius; } *_balls; int _numberOfBalls; + glm::vec3 _origin; + glm::vec3 _color; }; #endif diff --git a/libraries/audio/src/AudioRingBuffer.cpp b/libraries/audio/src/AudioRingBuffer.cpp index 19af07d600..2d396439aa 100644 --- a/libraries/audio/src/AudioRingBuffer.cpp +++ b/libraries/audio/src/AudioRingBuffer.cpp @@ -27,6 +27,12 @@ AudioRingBuffer::~AudioRingBuffer() { delete[] _buffer; } +void AudioRingBuffer::reset() { + _endOfLastWrite = _buffer; + _nextOutput = _buffer; + _isStarted = false; +} + int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) { return parseAudioSamples(sourceBuffer + sizeof(PACKET_HEADER_MIXED_AUDIO), numBytes - sizeof(PACKET_HEADER_MIXED_AUDIO)); } diff --git a/libraries/audio/src/AudioRingBuffer.h b/libraries/audio/src/AudioRingBuffer.h index 4ef8c4ead5..4a86e1242b 100644 --- a/libraries/audio/src/AudioRingBuffer.h +++ b/libraries/audio/src/AudioRingBuffer.h @@ -22,7 +22,7 @@ const int BUFFER_LENGTH_BYTES_STEREO = 1024; const int BUFFER_LENGTH_BYTES_PER_CHANNEL = 512; const int BUFFER_LENGTH_SAMPLES_PER_CHANNEL = BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t); -const short RING_BUFFER_LENGTH_FRAMES = 10; +const short RING_BUFFER_LENGTH_FRAMES = 20; const short RING_BUFFER_LENGTH_SAMPLES = RING_BUFFER_LENGTH_FRAMES * BUFFER_LENGTH_SAMPLES_PER_CHANNEL; class AudioRingBuffer : public AgentData { @@ -30,6 +30,7 @@ public: AudioRingBuffer(bool isStereo); ~AudioRingBuffer(); + void reset(); int parseData(unsigned char* sourceBuffer, int numBytes); int parseAudioSamples(unsigned char* sourceBuffer, int numBytes); @@ -44,8 +45,11 @@ public: bool isStarted() const { return _isStarted; } void setStarted(bool isStarted) { _isStarted = isStarted; } - + int diffLastWriteNextOutput() const; + + bool isStereo() const { return _isStereo; } + protected: // disallow copying of AudioRingBuffer objects AudioRingBuffer(const AudioRingBuffer&);