mirror of
https://github.com/Armored-Dragon/overte.git
synced 2025-03-11 16:13:16 +01:00
Added adjustable audio jitter buffer (in preferences), and simple lowPassFilter (for pert testing compared to LPF)
This commit is contained in:
parent
b637408b54
commit
002f8c736f
5 changed files with 81 additions and 28 deletions
|
@ -155,7 +155,7 @@ Application::Application(int& argc, char** argv, timeval &startup_time) :
|
||||||
_oculusProgram(0),
|
_oculusProgram(0),
|
||||||
_oculusDistortionScale(1.25),
|
_oculusDistortionScale(1.25),
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
_audio(&_audioScope),
|
_audio(&_audioScope, 0),
|
||||||
#endif
|
#endif
|
||||||
_stopNetworkReceiveThread(false),
|
_stopNetworkReceiveThread(false),
|
||||||
_packetCount(0),
|
_packetCount(0),
|
||||||
|
@ -889,7 +889,13 @@ void Application::editPreferences() {
|
||||||
QDoubleSpinBox* leanScale = new QDoubleSpinBox();
|
QDoubleSpinBox* leanScale = new QDoubleSpinBox();
|
||||||
leanScale->setValue(_myAvatar.getLeanScale());
|
leanScale->setValue(_myAvatar.getLeanScale());
|
||||||
form->addRow("Lean Scale:", leanScale);
|
form->addRow("Lean Scale:", leanScale);
|
||||||
|
|
||||||
|
QSpinBox* audioJitterBufferSamples = new QSpinBox();
|
||||||
|
audioJitterBufferSamples->setMaximum(10000);
|
||||||
|
audioJitterBufferSamples->setMinimum(-10000);
|
||||||
|
audioJitterBufferSamples->setValue(_audioJitterBufferSamples);
|
||||||
|
form->addRow("Audio Jitter Buffer Samples:", audioJitterBufferSamples);
|
||||||
|
|
||||||
QDialogButtonBox* buttons = new QDialogButtonBox(QDialogButtonBox::Ok | QDialogButtonBox::Cancel);
|
QDialogButtonBox* buttons = new QDialogButtonBox(QDialogButtonBox::Ok | QDialogButtonBox::Cancel);
|
||||||
dialog.connect(buttons, SIGNAL(accepted()), SLOT(accept()));
|
dialog.connect(buttons, SIGNAL(accepted()), SLOT(accept()));
|
||||||
dialog.connect(buttons, SIGNAL(rejected()), SLOT(reject()));
|
dialog.connect(buttons, SIGNAL(rejected()), SLOT(reject()));
|
||||||
|
@ -903,7 +909,9 @@ void Application::editPreferences() {
|
||||||
sendAvatarVoxelURLMessage(url);
|
sendAvatarVoxelURLMessage(url);
|
||||||
|
|
||||||
_headCameraPitchYawScale = headCameraPitchYawScale->value();
|
_headCameraPitchYawScale = headCameraPitchYawScale->value();
|
||||||
|
_audioJitterBufferSamples = audioJitterBufferSamples->value();
|
||||||
_myAvatar.setLeanScale(leanScale->value());
|
_myAvatar.setLeanScale(leanScale->value());
|
||||||
|
_audio.setJitterBufferSamples(audioJitterBufferSamples->value());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::pair() {
|
void Application::pair() {
|
||||||
|
@ -1341,6 +1349,8 @@ void Application::initMenu() {
|
||||||
debugMenu->addAction("Wants Res-In", this, SLOT(setWantsResIn(bool)))->setCheckable(true);
|
debugMenu->addAction("Wants Res-In", this, SLOT(setWantsResIn(bool)))->setCheckable(true);
|
||||||
debugMenu->addAction("Wants Monochrome", this, SLOT(setWantsMonochrome(bool)))->setCheckable(true);
|
debugMenu->addAction("Wants Monochrome", this, SLOT(setWantsMonochrome(bool)))->setCheckable(true);
|
||||||
debugMenu->addAction("Wants View Delta Sending", this, SLOT(setWantsDelta(bool)))->setCheckable(true);
|
debugMenu->addAction("Wants View Delta Sending", this, SLOT(setWantsDelta(bool)))->setCheckable(true);
|
||||||
|
(_shouldLowPassFilter = debugMenu->addAction("Test: LowPass filter"))->setCheckable(true);
|
||||||
|
|
||||||
|
|
||||||
QMenu* settingsMenu = menuBar->addMenu("Settings");
|
QMenu* settingsMenu = menuBar->addMenu("Settings");
|
||||||
(_settingsAutosave = settingsMenu->addAction("Autosave"))->setCheckable(true);
|
(_settingsAutosave = settingsMenu->addAction("Autosave"))->setCheckable(true);
|
||||||
|
@ -1420,6 +1430,9 @@ void Application::init() {
|
||||||
gettimeofday(&_lastTimeIdle, NULL);
|
gettimeofday(&_lastTimeIdle, NULL);
|
||||||
|
|
||||||
loadSettings();
|
loadSettings();
|
||||||
|
_audio.setJitterBufferSamples(_audioJitterBufferSamples);
|
||||||
|
printLog("Loaded settings.\n");
|
||||||
|
|
||||||
|
|
||||||
sendAvatarVoxelURLMessage(_myAvatar.getVoxels()->getVoxelURL());
|
sendAvatarVoxelURLMessage(_myAvatar.getVoxels()->getVoxelURL());
|
||||||
}
|
}
|
||||||
|
@ -2661,7 +2674,7 @@ void Application::loadSettings(QSettings* settings) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_headCameraPitchYawScale = loadSetting(settings, "headCameraPitchYawScale", 0.0f);
|
_headCameraPitchYawScale = loadSetting(settings, "headCameraPitchYawScale", 0.0f);
|
||||||
|
_audioJitterBufferSamples = loadSetting(settings, "audioJitterBufferSamples", 0);
|
||||||
settings->beginGroup("View Frustum Offset Camera");
|
settings->beginGroup("View Frustum Offset Camera");
|
||||||
// in case settings is corrupt or missing loadSetting() will check for NaN
|
// in case settings is corrupt or missing loadSetting() will check for NaN
|
||||||
_viewFrustumOffsetYaw = loadSetting(settings, "viewFrustumOffsetYaw" , 0.0f);
|
_viewFrustumOffsetYaw = loadSetting(settings, "viewFrustumOffsetYaw" , 0.0f);
|
||||||
|
@ -2682,6 +2695,7 @@ void Application::saveSettings(QSettings* settings) {
|
||||||
}
|
}
|
||||||
|
|
||||||
settings->setValue("headCameraPitchYawScale", _headCameraPitchYawScale);
|
settings->setValue("headCameraPitchYawScale", _headCameraPitchYawScale);
|
||||||
|
settings->setValue("audioJitterBufferSamples", _audioJitterBufferSamples);
|
||||||
settings->beginGroup("View Frustum Offset Camera");
|
settings->beginGroup("View Frustum Offset Camera");
|
||||||
settings->setValue("viewFrustumOffsetYaw", _viewFrustumOffsetYaw);
|
settings->setValue("viewFrustumOffsetYaw", _viewFrustumOffsetYaw);
|
||||||
settings->setValue("viewFrustumOffsetPitch", _viewFrustumOffsetPitch);
|
settings->setValue("viewFrustumOffsetPitch", _viewFrustumOffsetPitch);
|
||||||
|
|
|
@ -76,6 +76,7 @@ public:
|
||||||
QSettings* getSettings() { return _settings; }
|
QSettings* getSettings() { return _settings; }
|
||||||
Environment* getEnvironment() { return &_environment; }
|
Environment* getEnvironment() { return &_environment; }
|
||||||
bool shouldEchoAudio() { return _echoAudioMode->isChecked(); }
|
bool shouldEchoAudio() { return _echoAudioMode->isChecked(); }
|
||||||
|
bool shouldLowPassFilter() { return _shouldLowPassFilter->isChecked(); }
|
||||||
|
|
||||||
QNetworkAccessManager* getNetworkAccessManager() { return _networkAccessManager; }
|
QNetworkAccessManager* getNetworkAccessManager() { return _networkAccessManager; }
|
||||||
|
|
||||||
|
@ -176,6 +177,7 @@ private:
|
||||||
|
|
||||||
QAction* _lookingInMirror; // Are we currently rendering one's own head as if in mirror?
|
QAction* _lookingInMirror; // Are we currently rendering one's own head as if in mirror?
|
||||||
QAction* _echoAudioMode; // Are we asking the mixer to echo back our audio?
|
QAction* _echoAudioMode; // Are we asking the mixer to echo back our audio?
|
||||||
|
QAction* _shouldLowPassFilter; // Use test lowpass filter
|
||||||
QAction* _gyroLook; // Whether to allow the gyro data from head to move your view
|
QAction* _gyroLook; // Whether to allow the gyro data from head to move your view
|
||||||
QAction* _renderAvatarBalls; // Switch between voxels and joints/balls for avatar render
|
QAction* _renderAvatarBalls; // Switch between voxels and joints/balls for avatar render
|
||||||
QAction* _mouseLook; // Whether the have the mouse near edge of screen move your view
|
QAction* _mouseLook; // Whether the have the mouse near edge of screen move your view
|
||||||
|
@ -257,6 +259,8 @@ private:
|
||||||
int _headMouseX, _headMouseY;
|
int _headMouseX, _headMouseY;
|
||||||
float _headCameraPitchYawScale;
|
float _headCameraPitchYawScale;
|
||||||
|
|
||||||
|
int _audioJitterBufferSamples; // Number of extra samples to wait before starting audio playback
|
||||||
|
|
||||||
HandControl _handControl;
|
HandControl _handControl;
|
||||||
|
|
||||||
int _mouseX;
|
int _mouseX;
|
||||||
|
|
|
@ -25,6 +25,9 @@
|
||||||
#include "Util.h"
|
#include "Util.h"
|
||||||
#include "Log.h"
|
#include "Log.h"
|
||||||
|
|
||||||
|
// Uncomment the following definition to test audio device latency by copying output to input
|
||||||
|
//#define TEST_AUDIO_LOOPBACK
|
||||||
|
|
||||||
const int NUM_AUDIO_CHANNELS = 2;
|
const int NUM_AUDIO_CHANNELS = 2;
|
||||||
|
|
||||||
const int PACKET_LENGTH_BYTES = 1024;
|
const int PACKET_LENGTH_BYTES = 1024;
|
||||||
|
@ -41,13 +44,8 @@ const float FLANGE_BASE_RATE = 4;
|
||||||
const float MAX_FLANGE_SAMPLE_WEIGHT = 0.50;
|
const float MAX_FLANGE_SAMPLE_WEIGHT = 0.50;
|
||||||
const float MIN_FLANGE_INTENSITY = 0.25;
|
const float MIN_FLANGE_INTENSITY = 0.25;
|
||||||
|
|
||||||
const float JITTER_BUFFER_LENGTH_MSECS = 12;
|
|
||||||
const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_LENGTH_MSECS *
|
|
||||||
NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0);
|
|
||||||
|
|
||||||
const float AUDIO_CALLBACK_MSECS = (float)BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
const float AUDIO_CALLBACK_MSECS = (float)BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
||||||
|
|
||||||
|
|
||||||
const int AGENT_LOOPBACK_MODIFIER = 307;
|
const int AGENT_LOOPBACK_MODIFIER = 307;
|
||||||
|
|
||||||
int numStarves = 0;
|
int numStarves = 0;
|
||||||
|
@ -84,6 +82,13 @@ int audioCallback (const void* inputBuffer,
|
||||||
int16_t* outputLeft = ((int16_t**) outputBuffer)[0];
|
int16_t* outputLeft = ((int16_t**) outputBuffer)[0];
|
||||||
int16_t* outputRight = ((int16_t**) outputBuffer)[1];
|
int16_t* outputRight = ((int16_t**) outputBuffer)[1];
|
||||||
|
|
||||||
|
// LowPass filter test
|
||||||
|
if (Application::getInstance()->shouldLowPassFilter()) {
|
||||||
|
parentAudio->lowPassFilter(inputLeft);
|
||||||
|
memcpy(parentAudio->_echoInputSamples, inputLeft,
|
||||||
|
PACKET_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
|
||||||
// Add Procedural effects to input samples
|
// Add Procedural effects to input samples
|
||||||
parentAudio->addProceduralSounds(inputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
parentAudio->addProceduralSounds(inputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
|
@ -168,9 +173,10 @@ int audioCallback (const void* inputBuffer,
|
||||||
|
|
||||||
if (ringBuffer->getEndOfLastWrite()) {
|
if (ringBuffer->getEndOfLastWrite()) {
|
||||||
|
|
||||||
if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES) {
|
if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < (PACKET_LENGTH_SAMPLES + parentAudio->_jitterBufferSamples)) {
|
||||||
// printLog("Held back, buffer has %d of %d samples required.\n",
|
// printLog("Held back, buffer has %d of %d samples required.\n",
|
||||||
// ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES);
|
// ringBuffer->diffLastWriteNextOutput(),
|
||||||
|
// PACKET_LENGTH_SAMPLES + parentAudio->_jitterBufferSamples);
|
||||||
} else if (ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES) {
|
} else if (ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES) {
|
||||||
ringBuffer->setStarted(false);
|
ringBuffer->setStarted(false);
|
||||||
|
|
||||||
|
@ -182,7 +188,9 @@ int audioCallback (const void* inputBuffer,
|
||||||
} else {
|
} else {
|
||||||
if (!ringBuffer->isStarted()) {
|
if (!ringBuffer->isStarted()) {
|
||||||
ringBuffer->setStarted(true);
|
ringBuffer->setStarted(true);
|
||||||
// printLog("starting playback %3.1f msecs delayed \n", (usecTimestampNow() - usecTimestamp(&firstPlaybackTimer))/1000.0);
|
//printLog("starting playback %3.1f msecs delayed, jitter buffer = %d \n",
|
||||||
|
// (usecTimestampNow() - usecTimestamp(&parentAudio->_firstPlaybackTime))/1000.0,
|
||||||
|
// parentAudio->_jitterBufferSamples);
|
||||||
} else {
|
} else {
|
||||||
// printLog("pushing buffer\n");
|
// printLog("pushing buffer\n");
|
||||||
}
|
}
|
||||||
|
@ -248,9 +256,13 @@ int audioCallback (const void* inputBuffer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#ifndef TEST_AUDIO_LOOPBACK
|
||||||
outputLeft[s] = leftSample;
|
outputLeft[s] = leftSample;
|
||||||
outputRight[s] = rightSample;
|
outputRight[s] = rightSample;
|
||||||
|
#else
|
||||||
|
outputLeft[s] = inputLeft[s];
|
||||||
|
outputRight[s] = inputLeft[s];
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES);
|
ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES);
|
||||||
|
|
||||||
|
@ -282,15 +294,13 @@ void outputPortAudioError(PaError error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Audio::Audio(Oscilloscope* scope) :
|
Audio::Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples) :
|
||||||
_stream(NULL),
|
_stream(NULL),
|
||||||
_ringBuffer(true),
|
_ringBuffer(true),
|
||||||
_scope(scope),
|
_scope(scope),
|
||||||
_averagedLatency(0.0),
|
_averagedLatency(0.0),
|
||||||
_measuredJitter(0),
|
_measuredJitter(0),
|
||||||
_jitterBufferLengthMsecs(12.0),
|
_jitterBufferSamples(initialJitterBufferSamples),
|
||||||
_jitterBufferSamples(_jitterBufferLengthMsecs *
|
|
||||||
NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0)),
|
|
||||||
_wasStarved(0),
|
_wasStarved(0),
|
||||||
_lastInputLoudness(0),
|
_lastInputLoudness(0),
|
||||||
_lastVelocity(0),
|
_lastVelocity(0),
|
||||||
|
@ -304,12 +314,20 @@ Audio::Audio(Oscilloscope* scope) :
|
||||||
_isGatheringEchoOutputFrames(false)
|
_isGatheringEchoOutputFrames(false)
|
||||||
{
|
{
|
||||||
outputPortAudioError(Pa_Initialize());
|
outputPortAudioError(Pa_Initialize());
|
||||||
|
|
||||||
|
// NOTE: Portaudio documentation is unclear as to whether it is safe to specify the
|
||||||
|
// number of frames per buffer explicitly versus setting this value to zero.
|
||||||
|
// Possible source of latency that we need to investigate further.
|
||||||
|
//
|
||||||
|
unsigned long FRAMES_PER_BUFFER = BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||||
|
//unsigned long FRAMES_PER_BUFFER = 0;
|
||||||
|
|
||||||
outputPortAudioError(Pa_OpenDefaultStream(&_stream,
|
outputPortAudioError(Pa_OpenDefaultStream(&_stream,
|
||||||
2,
|
2,
|
||||||
2,
|
2,
|
||||||
(paInt16 | paNonInterleaved),
|
(paInt16 | paNonInterleaved),
|
||||||
SAMPLE_RATE,
|
SAMPLE_RATE,
|
||||||
BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
|
FRAMES_PER_BUFFER,
|
||||||
audioCallback,
|
audioCallback,
|
||||||
(void*) this));
|
(void*) this));
|
||||||
|
|
||||||
|
@ -533,9 +551,28 @@ void Audio::render(int screenWidth, int screenHeight) {
|
||||||
sprintf(out,"%3.1f\n", _measuredJitter);
|
sprintf(out,"%3.1f\n", _measuredJitter);
|
||||||
drawtext(startX + jitterPels - 5, topY-10, 0.10, 0, 1, 0, out, 0,1,1);
|
drawtext(startX + jitterPels - 5, topY-10, 0.10, 0, 1, 0, out, 0,1,1);
|
||||||
|
|
||||||
sprintf(out, "%3.1fms\n", JITTER_BUFFER_LENGTH_MSECS);
|
//sprintf(out, "%3.1fms\n", JITTER_BUFFER_LENGTH_MSECS);
|
||||||
drawtext(startX - 10, bottomY + 15, 0.1, 0, 1, 0, out, 1, 0, 0);
|
//drawtext(startX - 10, bottomY + 15, 0.1, 0, 1, 0, out, 1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Very Simple LowPass filter which works by averaging a bunch of samples with a moving window
|
||||||
|
//
|
||||||
|
void Audio::lowPassFilter(int16_t* inputBuffer) {
|
||||||
|
static int16_t outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL];
|
||||||
|
for (int i = 2; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2; i++) {
|
||||||
|
outputBuffer[i] = (int16_t)(0.125f * (float)inputBuffer[i - 2] +
|
||||||
|
0.25f * (float)inputBuffer[i - 1] +
|
||||||
|
0.25f * (float)inputBuffer[i] +
|
||||||
|
0.25f * (float)inputBuffer[i + 1] +
|
||||||
|
0.125f * (float)inputBuffer[i + 2] );
|
||||||
|
}
|
||||||
|
outputBuffer[0] = inputBuffer[0];
|
||||||
|
outputBuffer[1] = inputBuffer[1];
|
||||||
|
outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2] = inputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2];
|
||||||
|
outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 1] = inputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 1];
|
||||||
|
memcpy(inputBuffer, outputBuffer, BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
class Audio {
|
class Audio {
|
||||||
public:
|
public:
|
||||||
// initializes audio I/O
|
// initializes audio I/O
|
||||||
Audio(Oscilloscope* scope);
|
Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples);
|
||||||
~Audio();
|
~Audio();
|
||||||
|
|
||||||
void render(int screenWidth, int screenHeight);
|
void render(int screenWidth, int screenHeight);
|
||||||
|
@ -29,10 +29,11 @@ public:
|
||||||
void setLastAcceleration(glm::vec3 lastAcceleration) { _lastAcceleration = lastAcceleration; };
|
void setLastAcceleration(glm::vec3 lastAcceleration) { _lastAcceleration = lastAcceleration; };
|
||||||
void setLastVelocity(glm::vec3 lastVelocity) { _lastVelocity = lastVelocity; };
|
void setLastVelocity(glm::vec3 lastVelocity) { _lastVelocity = lastVelocity; };
|
||||||
|
|
||||||
void addProceduralSounds(int16_t* inputBuffer, int numSamples);
|
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; };
|
||||||
void analyzeEcho(int16_t* inputBuffer, int16_t* outputBuffer, int numSamples);
|
|
||||||
|
|
||||||
|
|
||||||
|
void addProceduralSounds(int16_t* inputBuffer, int numSamples);
|
||||||
|
void lowPassFilter(int16_t* inputBuffer);
|
||||||
|
void analyzeEcho(int16_t* inputBuffer, int16_t* outputBuffer, int numSamples);
|
||||||
void addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes);
|
void addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes);
|
||||||
|
|
||||||
void startEchoTest();
|
void startEchoTest();
|
||||||
|
@ -46,8 +47,7 @@ private:
|
||||||
timeval _lastReceiveTime;
|
timeval _lastReceiveTime;
|
||||||
float _averagedLatency;
|
float _averagedLatency;
|
||||||
float _measuredJitter;
|
float _measuredJitter;
|
||||||
float _jitterBufferLengthMsecs;
|
int16_t _jitterBufferSamples;
|
||||||
short _jitterBufferSamples;
|
|
||||||
int _wasStarved;
|
int _wasStarved;
|
||||||
float _lastInputLoudness;
|
float _lastInputLoudness;
|
||||||
glm::vec3 _lastVelocity;
|
glm::vec3 _lastVelocity;
|
||||||
|
|
|
@ -485,9 +485,7 @@ void Head::renderEyeBalls() {
|
||||||
_irisProgram->bind();
|
_irisProgram->bind();
|
||||||
glBindTexture(GL_TEXTURE_2D, _irisTextureID);
|
glBindTexture(GL_TEXTURE_2D, _irisTextureID);
|
||||||
glEnable(GL_TEXTURE_2D);
|
glEnable(GL_TEXTURE_2D);
|
||||||
|
|
||||||
glm::vec3 front = getFrontDirection();
|
|
||||||
|
|
||||||
// render left iris
|
// render left iris
|
||||||
glPushMatrix(); {
|
glPushMatrix(); {
|
||||||
glTranslatef(_leftEyePosition.x, _leftEyePosition.y, _leftEyePosition.z); //translate to eyeball position
|
glTranslatef(_leftEyePosition.x, _leftEyePosition.y, _leftEyePosition.z); //translate to eyeball position
|
||||||
|
|
Loading…
Reference in a new issue