mirror of
https://github.com/lubosz/overte.git
synced 2025-04-23 20:34:07 +02:00
Merge branch 'master' of git://github.com/worklist/hifi into 19165
Conflicts: interface/src/Oscilloscope.cpp
This commit is contained in:
commit
7115975c41
37 changed files with 853 additions and 433 deletions
|
@ -11,7 +11,4 @@ setup_hifi_project(${TARGET_NAME})
|
|||
# link the shared hifi library
|
||||
include(${MACRO_DIR}/LinkHifiLibrary.cmake)
|
||||
link_hifi_library(shared ${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
# link the threads library
|
||||
find_package(Threads REQUIRED)
|
||||
target_link_libraries(${TARGET_NAME} ${CMAKE_THREAD_LIBS_INIT})
|
||||
link_hifi_library(audio ${TARGET_NAME} ${ROOT_DIR})
|
|
@ -12,7 +12,6 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <fcntl.h>
|
||||
#include <pthread.h>
|
||||
#include <errno.h>
|
||||
#include <fstream>
|
||||
#include <limits>
|
||||
|
|
|
@ -17,4 +17,5 @@ include_glm(${TARGET_NAME} ${ROOT_DIR})
|
|||
# link the required hifi libraries
|
||||
include(${MACRO_DIR}/LinkHifiLibrary.cmake)
|
||||
link_hifi_library(shared ${TARGET_NAME} ${ROOT_DIR})
|
||||
link_hifi_library(avatars ${TARGET_NAME} ${ROOT_DIR})
|
||||
link_hifi_library(avatars ${TARGET_NAME} ${ROOT_DIR})
|
||||
link_hifi_library(audio ${TARGET_NAME} ${ROOT_DIR})
|
|
@ -15,6 +15,7 @@
|
|||
#include <PacketHeaders.h>
|
||||
#include <AgentList.h>
|
||||
#include <AvatarData.h>
|
||||
#include <AudioInjectionManager.h>
|
||||
#include <AudioInjector.h>
|
||||
|
||||
const int EVE_AGENT_LISTEN_PORT = 55441;
|
||||
|
@ -34,6 +35,7 @@ const int HAND_TIMER_SLEEP_ITERATIONS = 50;
|
|||
const float EVE_PELVIS_HEIGHT = 0.565925f;
|
||||
|
||||
const float AUDIO_INJECT_PROXIMITY = 0.4f;
|
||||
const int EVE_VOLUME_BYTE = 190;
|
||||
|
||||
bool stopReceiveAgentDataThread;
|
||||
|
||||
|
@ -106,20 +108,19 @@ int main(int argc, const char* argv[]) {
|
|||
// put her hand out so somebody can shake it
|
||||
eve.setHandPosition(glm::vec3(eve.getPosition()[0] - 0.2,
|
||||
0.5,
|
||||
eve.getPosition()[2] + 0.1));
|
||||
eve.getPosition()[2] + 0.1));
|
||||
|
||||
// prepare the audio injection manager by giving it a handle to our agent socket
|
||||
AudioInjectionManager::setInjectorSocket(agentList->getAgentSocket());
|
||||
|
||||
// read eve's audio data
|
||||
AudioInjector eveAudioInjector("/etc/highfidelity/eve/resources/eve.raw");
|
||||
|
||||
// lower Eve's volume by setting the attentuation modifier (this is a value out of 255)
|
||||
eveAudioInjector.setAttenuationModifier(190);
|
||||
|
||||
// pass the agentList UDPSocket pointer to the audio injector
|
||||
eveAudioInjector.setInjectorSocket(agentList->getAgentSocket());
|
||||
eveAudioInjector.setVolume(EVE_VOLUME_BYTE);
|
||||
|
||||
// set the position of the audio injector
|
||||
float injectorPosition[3];
|
||||
memcpy(injectorPosition, &eve.getPosition(), sizeof(injectorPosition));
|
||||
eveAudioInjector.setPosition(injectorPosition);
|
||||
eveAudioInjector.setPosition(eve.getPosition());
|
||||
|
||||
// register the callback for agent data creation
|
||||
agentList->linkedDataCreateCallback = createAvatarDataForAgent;
|
||||
|
@ -165,15 +166,11 @@ int main(int argc, const char* argv[]) {
|
|||
Agent* audioMixer = AgentList::getInstance()->soloAgentOfType(AGENT_TYPE_AUDIO_MIXER);
|
||||
|
||||
if (audioMixer) {
|
||||
// until the audio mixer is setup for ping-reply, activate the public socket if it's not active
|
||||
if (!audioMixer->getActiveSocket()) {
|
||||
audioMixer->activatePublicSocket();
|
||||
}
|
||||
|
||||
eveAudioInjector.setDestinationSocket(audioMixer->getActiveSocket());
|
||||
// update the destination socket for the AIM, in case the mixer has changed
|
||||
AudioInjectionManager::setDestinationSocket(*audioMixer->getPublicSocket());
|
||||
|
||||
// we have an active audio mixer we can send data to
|
||||
eveAudioInjector.threadInjectionOfAudio();
|
||||
AudioInjectionManager::threadInjector(&eveAudioInjector);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,15 +3,19 @@ cmake_minimum_required(VERSION 2.8)
|
|||
set(ROOT_DIR ..)
|
||||
set(MACRO_DIR ${ROOT_DIR}/cmake/macros)
|
||||
|
||||
# setup for find modules
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules/")
|
||||
|
||||
set(TARGET_NAME injector)
|
||||
|
||||
include(${MACRO_DIR}/SetupHifiProject.cmake)
|
||||
setup_hifi_project(${TARGET_NAME})
|
||||
|
||||
# set up the external glm library
|
||||
include(${MACRO_DIR}/IncludeGLM.cmake)
|
||||
include_glm(${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
# link the shared hifi library
|
||||
include(${MACRO_DIR}/LinkHifiLibrary.cmake)
|
||||
link_hifi_library(shared ${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
# link the threads library
|
||||
find_package(Threads REQUIRED)
|
||||
target_link_libraries(${TARGET_NAME} ${CMAKE_THREAD_LIBS_INIT})
|
||||
link_hifi_library(audio ${TARGET_NAME} ${ROOT_DIR})
|
|
@ -6,7 +6,6 @@
|
|||
// Copyright (c) 2013 Leonardo Murillo. All rights reserved.
|
||||
//
|
||||
|
||||
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -15,15 +14,17 @@
|
|||
#include <string.h>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
#include <SharedUtil.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <UDPSocket.h>
|
||||
#include <AudioInjector.h>
|
||||
#include <AudioInjectionManager.h>
|
||||
|
||||
char EC2_WEST_AUDIO_SERVER[] = "54.241.92.53";
|
||||
const int AUDIO_UDP_LISTEN_PORT = 55443;
|
||||
|
||||
const int DEFAULT_INJECTOR_VOLUME = 0xFF;
|
||||
|
||||
// Command line parameter defaults
|
||||
bool loopAudio = true;
|
||||
float sleepIntervalMin = 1.00;
|
||||
|
@ -31,7 +32,7 @@ float sleepIntervalMax = 2.00;
|
|||
char *sourceAudioFile = NULL;
|
||||
const char *allowedParameters = ":rb::t::c::a::f:";
|
||||
float floatArguments[4] = {0.0f, 0.0f, 0.0f, 0.0f};
|
||||
unsigned char attenuationModifier = 255;
|
||||
unsigned char volume = DEFAULT_INJECTOR_VOLUME;
|
||||
|
||||
void usage(void)
|
||||
{
|
||||
|
@ -82,7 +83,7 @@ bool processParameters(int parameterCount, char* parameterData[])
|
|||
break;
|
||||
}
|
||||
case 'a':
|
||||
::attenuationModifier = atoi(optarg);
|
||||
::volume = atoi(optarg);
|
||||
std::cout << "[DEBUG] Attenuation modifier: " << optarg << std::endl;
|
||||
break;
|
||||
default:
|
||||
|
@ -112,19 +113,17 @@ int main(int argc, char* argv[]) {
|
|||
exit(-1);
|
||||
} else {
|
||||
AudioInjector injector(sourceAudioFile);
|
||||
injector.setInjectorSocket(&streamSocket);
|
||||
injector.setDestinationSocket((sockaddr*) &mixerSocket);
|
||||
|
||||
injector.setPosition(::floatArguments);
|
||||
injector.setPosition(glm::vec3(::floatArguments[0], ::floatArguments[1], ::floatArguments[2]));
|
||||
injector.setBearing(*(::floatArguments + 3));
|
||||
injector.setAttenuationModifier(::attenuationModifier);
|
||||
injector.setVolume(::volume);
|
||||
|
||||
float delay = 0;
|
||||
int usecDelay = 0;
|
||||
|
||||
while (true) {
|
||||
injector.injectAudio();
|
||||
|
||||
injector.injectAudio(&streamSocket, (sockaddr*) &mixerSocket);
|
||||
|
||||
if (!::loopAudio) {
|
||||
delay = randFloatInRange(::sleepIntervalMin, ::sleepIntervalMax);
|
||||
usecDelay = delay * 1000 * 1000;
|
||||
|
|
|
@ -71,6 +71,7 @@ include(${MACRO_DIR}/LinkHifiLibrary.cmake)
|
|||
link_hifi_library(shared ${TARGET_NAME} ${ROOT_DIR})
|
||||
link_hifi_library(voxels ${TARGET_NAME} ${ROOT_DIR})
|
||||
link_hifi_library(avatars ${TARGET_NAME} ${ROOT_DIR})
|
||||
link_hifi_library(audio ${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
# find required libraries
|
||||
find_package(GLM REQUIRED)
|
||||
|
|
63
interface/resources/shaders/perlin_modulate.frag
Normal file
63
interface/resources/shaders/perlin_modulate.frag
Normal file
|
@ -0,0 +1,63 @@
|
|||
#version 120
|
||||
|
||||
//
|
||||
// perlin_modulate.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 5/15/13.
|
||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
// the texture containing our permutations and normals
|
||||
uniform sampler2D permutationNormalTexture;
|
||||
|
||||
// the noise frequency
|
||||
const float frequency = 1024.0;
|
||||
|
||||
// the noise amplitude
|
||||
const float amplitude = 0.1;
|
||||
|
||||
// the position in model space
|
||||
varying vec3 position;
|
||||
|
||||
// returns the gradient at a single corner of our sampling cube
|
||||
vec3 grad(vec3 location) {
|
||||
float p1 = texture2D(permutationNormalTexture, vec2(location.x / 256.0, 0.25)).r;
|
||||
float p2 = texture2D(permutationNormalTexture, vec2(p1 + location.y / 256.0, 0.25)).r;
|
||||
return texture2D(permutationNormalTexture, vec2(p2 + location.z / 256.0, 0.75)).xyz * 2.0 - vec3(1.0, 1.0, 1.0);
|
||||
}
|
||||
|
||||
// returns the perlin noise value for the specified location
|
||||
float perlin(vec3 location) {
|
||||
vec3 floors = floor(location);
|
||||
vec3 ceils = ceil(location);
|
||||
vec3 fff = grad(floors);
|
||||
vec3 ffc = grad(vec3(floors.x, floors.y, ceils.z));
|
||||
vec3 fcf = grad(vec3(floors.x, ceils.y, floors.z));
|
||||
vec3 fcc = grad(vec3(floors.x, ceils.y, ceils.z));
|
||||
vec3 cff = grad(vec3(ceils.x, floors.y, floors.z));
|
||||
vec3 cfc = grad(vec3(ceils.x, floors.y, ceils.z));
|
||||
vec3 ccf = grad(vec3(ceils.x, ceils.y, floors.z));
|
||||
vec3 ccc = grad(ceils);
|
||||
vec3 ffracts = fract(location);
|
||||
vec3 cfracts = ffracts - vec3(1.0, 1.0, 1.0);
|
||||
vec3 params = ffracts*ffracts*(3.0 - 2.0*ffracts);
|
||||
|
||||
float fffv = dot(fff, ffracts);
|
||||
float ffcv = dot(ffc, vec3(ffracts.x, ffracts.y, cfracts.z));
|
||||
float fcfv = dot(fcf, vec3(ffracts.x, cfracts.y, ffracts.z));
|
||||
float fccv = dot(fcc, vec3(ffracts.x, cfracts.y, cfracts.z));
|
||||
float cffv = dot(cff, vec3(cfracts.x, ffracts.y, ffracts.z));
|
||||
float cfcv = dot(cfc, vec3(cfracts.x, ffracts.y, cfracts.z));
|
||||
float ccfv = dot(ccf, vec3(cfracts.x, cfracts.y, ffracts.z));
|
||||
float cccv = dot(ccc, cfracts);
|
||||
|
||||
return mix(
|
||||
mix(mix(fffv, cffv, params.x), mix(fcfv, ccfv, params.x), params.y),
|
||||
mix(mix(ffcv, cfcv, params.x), mix(fccv, cccv, params.x), params.y),
|
||||
params.z);
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
gl_FragColor = vec4(gl_Color.rgb * (1.0 + amplitude*(perlin(position * frequency) - 1.0)), 1.0);
|
||||
}
|
20
interface/resources/shaders/perlin_modulate.vert
Normal file
20
interface/resources/shaders/perlin_modulate.vert
Normal file
|
@ -0,0 +1,20 @@
|
|||
#version 120
|
||||
|
||||
//
|
||||
// perlin_modulate.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Andrzej Kapolka on 5/15/13.
|
||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
// the position in model space
|
||||
varying vec3 position;
|
||||
|
||||
void main(void) {
|
||||
position = gl_Vertex.xyz;
|
||||
vec4 normal = normalize(gl_ModelViewMatrix * vec4(gl_Normal, 0.0));
|
||||
gl_FrontColor = gl_Color * (gl_LightModel.ambient + gl_LightSource[0].ambient +
|
||||
gl_LightSource[0].diffuse * max(0.0, dot(normal, gl_LightSource[0].position)));
|
||||
gl_Position = ftransform();
|
||||
}
|
|
@ -56,16 +56,9 @@ static char STAR_CACHE_FILE[] = "cachedStars.txt";
|
|||
const glm::vec3 START_LOCATION(6.1f, 0, 1.4f); // Where one's own agent begins in the world
|
||||
// (will be overwritten if avatar data file is found)
|
||||
|
||||
const int IDLE_SIMULATE_MSECS = 16; // How often should call simulate and other stuff
|
||||
// in the idle loop? (60 FPS is default)
|
||||
const int IDLE_SIMULATE_MSECS = 16; // How often should call simulate and other stuff
|
||||
// in the idle loop? (60 FPS is default)
|
||||
|
||||
const bool USING_MOUSE_VIEW_SHIFT = false;
|
||||
const float MOUSE_VIEW_SHIFT_RATE = 40.0f;
|
||||
const float MOUSE_VIEW_SHIFT_YAW_MARGIN = (float)(1200 * 0.2f);
|
||||
const float MOUSE_VIEW_SHIFT_PITCH_MARGIN = (float)(800 * 0.2f);
|
||||
const float MOUSE_VIEW_SHIFT_YAW_LIMIT = 45.0;
|
||||
const float MOUSE_VIEW_SHIFT_PITCH_LIMIT = 30.0;
|
||||
|
||||
const bool DISPLAY_HEAD_MOUSE = true;
|
||||
|
||||
// customized canvas that simply forwards requests/events to the singleton application
|
||||
|
@ -137,8 +130,6 @@ Application::Application(int& argc, char** argv) :
|
|||
_viewFrustumOffsetRoll(0.0),
|
||||
_viewFrustumOffsetDistance(25.0),
|
||||
_viewFrustumOffsetUp(0.0),
|
||||
_mouseViewShiftYaw(0.0f),
|
||||
_mouseViewShiftPitch(0.0f),
|
||||
_audioScope(256, 200, true),
|
||||
_myAvatar(true),
|
||||
_mouseX(0),
|
||||
|
@ -307,18 +298,23 @@ void Application::paintGL() {
|
|||
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
_myCamera.setTargetPosition(_myAvatar.getSpringyHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getBodyYaw() - 180.0f, 0.0f, 0.0f);
|
||||
_myCamera.setTargetRotation(_myAvatar.getBodyYaw() - 180.0f,
|
||||
0.0f,
|
||||
0.0f);
|
||||
|
||||
} else {
|
||||
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
||||
_myCamera.setTargetPosition(_myAvatar.getSpringyHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getAbsoluteHeadYaw() - _mouseViewShiftYaw,
|
||||
_myAvatar.getRenderPitch() + _mouseViewShiftPitch, 0.0f);
|
||||
_myCamera.setTargetRotation(_myAvatar.getAbsoluteHeadYaw(),
|
||||
-_myAvatar.getAbsoluteHeadPitch(),
|
||||
0.0f);
|
||||
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
||||
_myCamera.setTargetPosition(_myAvatar.getHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getBodyYaw() - _mouseViewShiftYaw, _mouseViewShiftPitch, 0.0f);
|
||||
}
|
||||
_myCamera.setTargetRotation(_myAvatar.getBodyYaw(),
|
||||
-_myAvatar.getAbsoluteHeadPitch(),
|
||||
0.0f);
|
||||
}
|
||||
}
|
||||
|
||||
// important...
|
||||
|
@ -538,14 +534,14 @@ void Application::keyPressEvent(QKeyEvent* event) {
|
|||
shiftPaintingColor();
|
||||
break;
|
||||
|
||||
case Qt::Key_Minus:
|
||||
sendVoxelServerEraseAll();
|
||||
break;
|
||||
|
||||
case Qt::Key_Percent:
|
||||
sendVoxelServerAddScene();
|
||||
break;
|
||||
|
||||
case Qt::Key_Semicolon:
|
||||
_audio.startEchoTest();
|
||||
break;
|
||||
|
||||
case Qt::Key_L:
|
||||
_displayLevels = !_displayLevels;
|
||||
break;
|
||||
|
@ -782,11 +778,20 @@ void Application::idle() {
|
|||
if (diffclock(&_lastTimeIdle, &check) > IDLE_SIMULATE_MSECS) {
|
||||
|
||||
float deltaTime = 1.f/_fps;
|
||||
|
||||
// update behaviors for avatar hand movement: handControl takes mouse values as input,
|
||||
// and gives back 3D values modulated for smooth transitioning between interaction modes.
|
||||
_handControl.update(_mouseX, _mouseY);
|
||||
_myAvatar.setHandMovementValues(_handControl.getValues());
|
||||
|
||||
// Use Transmitter Hand to move hand if connected, else use mouse
|
||||
if (_myAvatar.isTransmitterV2Connected()) {
|
||||
const float HAND_FORCE_SCALING = 0.05f;
|
||||
const float* handAcceleration = _myAvatar.getTransmitterHandLastAcceleration();
|
||||
_myAvatar.setHandMovementValues(glm::vec3(-handAcceleration[0] * HAND_FORCE_SCALING,
|
||||
handAcceleration[1] * HAND_FORCE_SCALING,
|
||||
handAcceleration[2] * HAND_FORCE_SCALING));
|
||||
} else {
|
||||
// update behaviors for avatar hand movement: handControl takes mouse values as input,
|
||||
// and gives back 3D values modulated for smooth transitioning between interaction modes.
|
||||
_handControl.update(_mouseX, _mouseY);
|
||||
_myAvatar.setHandMovementValues(_handControl.getValues());
|
||||
}
|
||||
|
||||
// tell my avatar if the mouse is being pressed...
|
||||
_myAvatar.setMousePressed(_mousePressed);
|
||||
|
@ -856,10 +861,15 @@ void Application::idle() {
|
|||
// walking triggers the handControl to stop
|
||||
if (_myAvatar.getMode() == AVATAR_MODE_WALKING) {
|
||||
_handControl.stop();
|
||||
_mouseViewShiftYaw *= 0.9;
|
||||
_mouseViewShiftPitch *= 0.9;
|
||||
}
|
||||
|
||||
// Update from Mouse
|
||||
QPoint mouse = QCursor::pos();
|
||||
_myAvatar.updateFromMouse(_glWidget->mapFromGlobal(mouse).x(),
|
||||
_glWidget->mapFromGlobal(mouse).y(),
|
||||
_glWidget->width(),
|
||||
_glWidget->height());
|
||||
|
||||
// Read serial port interface devices
|
||||
if (_serialPort.active) {
|
||||
_serialPort.readData();
|
||||
|
@ -1125,6 +1135,9 @@ void Application::initMenu() {
|
|||
_renderAtmosphereOn->setShortcut(Qt::SHIFT | Qt::Key_A);
|
||||
(_renderAvatarsOn = renderMenu->addAction("Avatars"))->setCheckable(true);
|
||||
_renderAvatarsOn->setChecked(true);
|
||||
(_renderFrameTimerOn = renderMenu->addAction("Show Timer"))->setCheckable(true);
|
||||
_renderFrameTimerOn->setChecked(false);
|
||||
|
||||
renderMenu->addAction("First Person", this, SLOT(setRenderFirstPerson(bool)), Qt::Key_P)->setCheckable(true);
|
||||
(_oculusOn = renderMenu->addAction("Oculus", this, SLOT(setOculus(bool)), Qt::Key_O))->setCheckable(true);
|
||||
|
||||
|
@ -1223,8 +1236,8 @@ void Application::init() {
|
|||
|
||||
_handControl.setScreenDimensions(_glWidget->width(), _glWidget->height());
|
||||
|
||||
_headMouseX = _glWidget->width()/2;
|
||||
_headMouseY = _glWidget->height()/2;
|
||||
_headMouseX = _mouseX = _glWidget->width() / 2;
|
||||
_headMouseY = _mouseY = _glWidget->height() / 2;
|
||||
|
||||
_stars.readInput(STAR_FILE, STAR_CACHE_FILE, 0);
|
||||
|
||||
|
@ -1234,7 +1247,9 @@ void Application::init() {
|
|||
a.distance = 1.5f;
|
||||
a.tightness = 8.0f;
|
||||
_myCamera.setMode(CAMERA_MODE_THIRD_PERSON, a);
|
||||
_myAvatar.setDisplayingHead(true);
|
||||
_myAvatar.setDisplayingHead(true);
|
||||
|
||||
QCursor::setPos(_headMouseX, _headMouseY);
|
||||
|
||||
OculusManager::connect();
|
||||
|
||||
|
@ -1268,6 +1283,7 @@ void Application::updateAvatar(float deltaTime) {
|
|||
// Update head and body pitch and yaw based on measured gyro rates
|
||||
if (_gyroLook->isChecked()) {
|
||||
// Render Yaw
|
||||
/* NOTE: PER - Leave here until I get back and can modify to couple gyros to head pitch, yaw
|
||||
float renderYawSpring = fabs(_headMouseX - _glWidget->width() / 2.f) / (_glWidget->width() / 2.f);
|
||||
const float RENDER_YAW_MULTIPLY = 4.f;
|
||||
_myAvatar.setRenderYaw((1.f - renderYawSpring * deltaTime) * _myAvatar.getRenderYaw() +
|
||||
|
@ -1277,34 +1293,7 @@ void Application::updateAvatar(float deltaTime) {
|
|||
const float RENDER_PITCH_MULTIPLY = 4.f;
|
||||
_myAvatar.setRenderPitch((1.f - renderPitchSpring * deltaTime) * _myAvatar.getRenderPitch() +
|
||||
renderPitchSpring * deltaTime * -_myAvatar.getHeadPitch() * RENDER_PITCH_MULTIPLY);
|
||||
}
|
||||
|
||||
|
||||
if (USING_MOUSE_VIEW_SHIFT)
|
||||
{
|
||||
//make it so that when your mouse hits the edge of the screen, the camera shifts
|
||||
float rightBoundary = (float)_glWidget->width() - MOUSE_VIEW_SHIFT_YAW_MARGIN;
|
||||
float bottomBoundary = (float)_glWidget->height() - MOUSE_VIEW_SHIFT_PITCH_MARGIN;
|
||||
|
||||
if (_mouseX > rightBoundary) {
|
||||
float f = (_mouseX - rightBoundary) / ( (float)_glWidget->width() - rightBoundary);
|
||||
_mouseViewShiftYaw += MOUSE_VIEW_SHIFT_RATE * f * deltaTime;
|
||||
if (_mouseViewShiftYaw > MOUSE_VIEW_SHIFT_YAW_LIMIT) { _mouseViewShiftYaw = MOUSE_VIEW_SHIFT_YAW_LIMIT; }
|
||||
} else if (_mouseX < MOUSE_VIEW_SHIFT_YAW_MARGIN) {
|
||||
float f = 1.0 - (_mouseX / MOUSE_VIEW_SHIFT_YAW_MARGIN);
|
||||
_mouseViewShiftYaw -= MOUSE_VIEW_SHIFT_RATE * f * deltaTime;
|
||||
if (_mouseViewShiftYaw < -MOUSE_VIEW_SHIFT_YAW_LIMIT) { _mouseViewShiftYaw = -MOUSE_VIEW_SHIFT_YAW_LIMIT; }
|
||||
}
|
||||
if (_mouseY < MOUSE_VIEW_SHIFT_PITCH_MARGIN) {
|
||||
float f = 1.0 - (_mouseY / MOUSE_VIEW_SHIFT_PITCH_MARGIN);
|
||||
_mouseViewShiftPitch += MOUSE_VIEW_SHIFT_RATE * f * deltaTime;
|
||||
if (_mouseViewShiftPitch > MOUSE_VIEW_SHIFT_PITCH_LIMIT ) { _mouseViewShiftPitch = MOUSE_VIEW_SHIFT_PITCH_LIMIT; }
|
||||
}
|
||||
else if (_mouseY > bottomBoundary) {
|
||||
float f = (_mouseY - bottomBoundary) / ((float)_glWidget->height() - bottomBoundary);
|
||||
_mouseViewShiftPitch -= MOUSE_VIEW_SHIFT_RATE * f * deltaTime;
|
||||
if (_mouseViewShiftPitch < -MOUSE_VIEW_SHIFT_PITCH_LIMIT) { _mouseViewShiftPitch = -MOUSE_VIEW_SHIFT_PITCH_LIMIT; }
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
if (OculusManager::isConnected()) {
|
||||
|
@ -1673,6 +1662,7 @@ void Application::displayOverlay() {
|
|||
#ifndef _WIN32
|
||||
_audio.render(_glWidget->width(), _glWidget->height());
|
||||
_audioScope.render(20, _glWidget->height() - 200);
|
||||
//_audio.renderEchoCompare(); // PER: Will turn back on to further test echo
|
||||
#endif
|
||||
|
||||
//noiseTest(_glWidget->width(), _glWidget->height());
|
||||
|
@ -1695,6 +1685,10 @@ void Application::displayOverlay() {
|
|||
// Show detected levels from the serial I/O ADC channel sensors
|
||||
if (_displayLevels) _serialPort.renderLevels(_glWidget->width(), _glWidget->height());
|
||||
|
||||
// Show hand transmitter data if detected
|
||||
if (_myAvatar.isTransmitterV2Connected()) {
|
||||
_myAvatar.transmitterV2RenderLevels(_glWidget->width(), _glWidget->height());
|
||||
}
|
||||
// Display stats and log text onscreen
|
||||
glLineWidth(1.0f);
|
||||
glPointSize(1.0f);
|
||||
|
@ -1706,6 +1700,17 @@ void Application::displayOverlay() {
|
|||
if (_chatEntryOn) {
|
||||
_chatEntry.render(_glWidget->width(), _glWidget->height());
|
||||
}
|
||||
|
||||
// Show on-screen msec timer
|
||||
if (_renderFrameTimerOn->isChecked()) {
|
||||
char frameTimer[10];
|
||||
double mSecsNow = floor(usecTimestampNow() / 1000.0 + 0.5);
|
||||
mSecsNow = mSecsNow - floor(mSecsNow / 1000.0) * 1000.0;
|
||||
sprintf(frameTimer, "%3.0f\n", mSecsNow);
|
||||
drawtext(_glWidget->width() - 100, _glWidget->height() - 20, 0.30, 0, 1.0, 0, frameTimer, 0, 0, 0);
|
||||
drawtext(_glWidget->width() - 102, _glWidget->height() - 22, 0.30, 0, 1.0, 0, frameTimer, 1, 1, 1);
|
||||
}
|
||||
|
||||
|
||||
// Stats at upper right of screen about who domain server is telling us about
|
||||
glPointSize(1.0f);
|
||||
|
@ -1967,12 +1972,13 @@ void Application::deleteVoxelUnderCursor() {
|
|||
|
||||
void Application::resetSensors() {
|
||||
_myAvatar.setPosition(START_LOCATION);
|
||||
_headMouseX = _glWidget->width() / 2;
|
||||
_headMouseY = _glWidget->height() / 2;
|
||||
_headMouseX = _mouseX = _glWidget->width() / 2;
|
||||
_headMouseY = _mouseY = _glWidget->height() / 2;
|
||||
|
||||
if (_serialPort.active) {
|
||||
_serialPort.resetAverages();
|
||||
}
|
||||
}
|
||||
QCursor::setPos(_headMouseX, _headMouseY);
|
||||
_myAvatar.reset();
|
||||
}
|
||||
|
||||
|
@ -2031,17 +2037,13 @@ void* Application::networkReceive(void* args) {
|
|||
|
||||
switch (app->_incomingPacket[0]) {
|
||||
case PACKET_HEADER_TRANSMITTER_DATA_V1:
|
||||
// Process UDP packets that are sent to the client from local sensor devices
|
||||
// V1 = android app, or the Google Glass
|
||||
app->_myAvatar.processTransmitterData(app->_incomingPacket, bytesReceived);
|
||||
break;
|
||||
case PACKET_HEADER_TRANSMITTER_DATA_V2:
|
||||
float rotationRates[3];
|
||||
float accelerations[3];
|
||||
// V2 = IOS transmitter app
|
||||
app->_myAvatar.processTransmitterDataV2(app->_incomingPacket, bytesReceived);
|
||||
|
||||
memcpy(rotationRates, app->_incomingPacket + 2, sizeof(rotationRates));
|
||||
memcpy(accelerations, app->_incomingPacket + 3 + sizeof(rotationRates), sizeof(accelerations));
|
||||
|
||||
printf("The rotation: %f, %f, %f\n", rotationRates[0], rotationRates[1], rotationRates[2]);
|
||||
break;
|
||||
case PACKET_HEADER_MIXED_AUDIO:
|
||||
app->_audio.addReceivedAudioToBuffer(app->_incomingPacket, bytesReceived);
|
||||
|
|
|
@ -138,7 +138,8 @@ private:
|
|||
QAction* _renderAtmosphereOn; // Whether to display the atmosphere
|
||||
QAction* _renderAvatarsOn; // Whether to render avatars
|
||||
QAction* _oculusOn; // Whether to configure the display for the Oculus Rift
|
||||
QAction* _renderStatsOn; // Whether to show onscreen text overlay with stats
|
||||
QAction* _renderStatsOn; // Whether to show onscreen text overlay with stats
|
||||
QAction* _renderFrameTimerOn; // Whether to show onscreen text overlay with stats
|
||||
QAction* _logOn; // Whether to show on-screen log
|
||||
QActionGroup* _voxelModeActions; // The group of voxel edit mode actions
|
||||
QAction* _addVoxelMode; // Whether add voxel mode is enabled
|
||||
|
@ -182,9 +183,6 @@ private:
|
|||
float _viewFrustumOffsetDistance;
|
||||
float _viewFrustumOffsetUp;
|
||||
|
||||
float _mouseViewShiftYaw;
|
||||
float _mouseViewShiftPitch;
|
||||
|
||||
Oscilloscope _audioScope;
|
||||
|
||||
Avatar _myAvatar; // The rendered avatar of oneself
|
||||
|
|
|
@ -87,11 +87,26 @@ int audioCallback (const void* inputBuffer,
|
|||
Application* interface = (Application*) QCoreApplication::instance();
|
||||
Avatar* interfaceAvatar = interface->getAvatar();
|
||||
|
||||
int16_t *inputLeft = ((int16_t **) inputBuffer)[0];
|
||||
|
||||
int16_t* inputLeft = ((int16_t**) inputBuffer)[0];
|
||||
int16_t* outputLeft = ((int16_t**) outputBuffer)[0];
|
||||
int16_t* outputRight = ((int16_t**) outputBuffer)[1];
|
||||
|
||||
// Add Procedural effects to input samples
|
||||
parentAudio->addProceduralSounds(inputLeft, BUFFER_LENGTH_SAMPLES);
|
||||
|
||||
// add output (@speakers) data to the scope
|
||||
parentAudio->_scope->addSamples(1, outputLeft, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
parentAudio->_scope->addSamples(2, outputRight, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// if needed, add input/output data to echo analysis buffers
|
||||
if (parentAudio->_isGatheringEchoFrames) {
|
||||
memcpy(parentAudio->_echoInputSamples, inputLeft,
|
||||
PACKET_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
||||
memcpy(parentAudio->_echoOutputSamples, outputLeft,
|
||||
PACKET_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
||||
parentAudio->addedPingFrame();
|
||||
}
|
||||
|
||||
if (inputLeft != NULL) {
|
||||
|
||||
// Measure the loudness of the signal from the microphone and store in audio object
|
||||
|
@ -103,7 +118,7 @@ int audioCallback (const void* inputBuffer,
|
|||
loudness /= BUFFER_LENGTH_SAMPLES;
|
||||
parentAudio->_lastInputLoudness = loudness;
|
||||
|
||||
// add data to the scope
|
||||
// add input (@microphone) data to the scope
|
||||
parentAudio->_scope->addSamples(0, inputLeft, BUFFER_LENGTH_SAMPLES);
|
||||
|
||||
Agent* audioMixer = agentList->soloAgentOfType(AGENT_TYPE_AUDIO_MIXER);
|
||||
|
@ -151,9 +166,6 @@ int audioCallback (const void* inputBuffer,
|
|||
|
||||
}
|
||||
|
||||
int16_t* outputLeft = ((int16_t**) outputBuffer)[0];
|
||||
int16_t* outputRight = ((int16_t**) outputBuffer)[1];
|
||||
|
||||
memset(outputLeft, 0, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
||||
memset(outputRight, 0, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
||||
|
@ -184,7 +196,6 @@ int audioCallback (const void* inputBuffer,
|
|||
}
|
||||
// play whatever we have in the audio buffer
|
||||
|
||||
|
||||
// if we haven't fired off the flange effect, check if we should
|
||||
// TODO: lastMeasuredHeadYaw is now relative to body - check if this still works.
|
||||
|
||||
|
@ -249,11 +260,6 @@ int audioCallback (const void* inputBuffer,
|
|||
outputLeft[s] = leftSample;
|
||||
outputRight[s] = rightSample;
|
||||
}
|
||||
|
||||
// add data to the scope
|
||||
parentAudio->_scope->addSamples(1, outputLeft, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
parentAudio->_scope->addSamples(2, outputRight, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES);
|
||||
|
||||
if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_SAMPLES) {
|
||||
|
@ -261,11 +267,19 @@ int audioCallback (const void* inputBuffer,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (parentAudio->_isSendingEchoPing) {
|
||||
const float PING_PITCH = 4.f;
|
||||
const float PING_VOLUME = 32000.f;
|
||||
for (int s = 0; s < PACKET_LENGTH_SAMPLES_PER_CHANNEL; s++) {
|
||||
outputLeft[s] = outputRight[s] = (int16_t)(sinf((float) s / PING_PITCH) * PING_VOLUME);
|
||||
}
|
||||
parentAudio->_isGatheringEchoFrames = true;
|
||||
}
|
||||
gettimeofday(&parentAudio->_lastCallbackTime, NULL);
|
||||
return paContinue;
|
||||
}
|
||||
|
||||
|
||||
void outputPortAudioError(PaError error) {
|
||||
if (error != paNoError) {
|
||||
printLog("-- portaudio termination error --\n");
|
||||
|
@ -286,8 +300,12 @@ Audio::Audio(Oscilloscope* scope) :
|
|||
_lastAcceleration(0),
|
||||
_totalPacketsReceived(0),
|
||||
_firstPlaybackTime(),
|
||||
_packetsReceivedThisPlayback(0)
|
||||
{
|
||||
_packetsReceivedThisPlayback(0),
|
||||
_shouldStartEcho(false),
|
||||
_isSendingEchoPing(false),
|
||||
_echoPingFrameCount(0),
|
||||
_isGatheringEchoFrames(false)
|
||||
{
|
||||
outputPortAudioError(Pa_Initialize());
|
||||
outputPortAudioError(Pa_OpenDefaultStream(&_stream,
|
||||
2,
|
||||
|
@ -300,7 +318,12 @@ Audio::Audio(Oscilloscope* scope) :
|
|||
|
||||
// start the stream now that sources are good to go
|
||||
outputPortAudioError(Pa_StartStream(_stream));
|
||||
|
||||
|
||||
_echoInputSamples = new int16_t[BUFFER_LENGTH_BYTES];
|
||||
_echoOutputSamples = new int16_t[BUFFER_LENGTH_BYTES];
|
||||
memset(_echoInputSamples, 0, BUFFER_LENGTH_SAMPLES * sizeof(int));
|
||||
memset(_echoOutputSamples, 0, BUFFER_LENGTH_SAMPLES * sizeof(int));
|
||||
|
||||
gettimeofday(&_lastReceiveTime, NULL);
|
||||
}
|
||||
|
||||
|
@ -311,6 +334,28 @@ Audio::~Audio() {
|
|||
}
|
||||
}
|
||||
|
||||
void Audio::renderEchoCompare() {
|
||||
const int XPOS = 0;
|
||||
const int YPOS = 500;
|
||||
const int YSCALE = 500;
|
||||
const int XSCALE = 2;
|
||||
glPointSize(1.0);
|
||||
glLineWidth(1.0);
|
||||
glDisable(GL_LINE_SMOOTH);
|
||||
glColor3f(1,1,1);
|
||||
glBegin(GL_LINE_STRIP);
|
||||
for (int i = 0; i < BUFFER_LENGTH_SAMPLES; i++) {
|
||||
glVertex2f(XPOS + i * XSCALE, YPOS + _echoInputSamples[i]/YSCALE);
|
||||
}
|
||||
glEnd();
|
||||
glColor3f(0,1,1);
|
||||
glBegin(GL_LINE_STRIP);
|
||||
for (int i = 0; i < BUFFER_LENGTH_SAMPLES; i++) {
|
||||
glVertex2f(XPOS + i * XSCALE, YPOS + _echoOutputSamples[i]/YSCALE);
|
||||
}
|
||||
glEnd();
|
||||
}
|
||||
|
||||
// Take a pointer to the acquired microphone input samples and add procedural sounds
|
||||
void Audio::addProceduralSounds(int16_t* inputBuffer, int numSamples) {
|
||||
const float MAX_AUDIBLE_VELOCITY = 6.0;
|
||||
|
@ -324,11 +369,61 @@ void Audio::addProceduralSounds(int16_t* inputBuffer, int numSamples) {
|
|||
// Add a noise-modulated sinewave with volume that tapers off with speed increasing
|
||||
if ((speed > MIN_AUDIBLE_VELOCITY) && (speed < MAX_AUDIBLE_VELOCITY)) {
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
inputBuffer[i] += (int16_t)((cosf((float) i / SOUND_PITCH * speed) * randFloat()) * volume * speed);
|
||||
inputBuffer[i] += (int16_t)((sinf((float) i / SOUND_PITCH * speed) * randFloat()) * volume * speed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::startEchoTest() {
|
||||
_shouldStartEcho = true;
|
||||
_echoPingFrameCount = 0;
|
||||
_isSendingEchoPing = true;
|
||||
_isGatheringEchoFrames = false;
|
||||
}
|
||||
|
||||
void Audio::addedPingFrame() {
|
||||
const int ECHO_PING_FRAMES = 1;
|
||||
_echoPingFrameCount++;
|
||||
if (_echoPingFrameCount == ECHO_PING_FRAMES) {
|
||||
_isGatheringEchoFrames = false;
|
||||
_isSendingEchoPing = false;
|
||||
//startEchoTest();
|
||||
}
|
||||
}
|
||||
void Audio::analyzeEcho(int16_t* inputBuffer, int16_t* outputBuffer, int numSamples) {
|
||||
// Compare output and input streams, looking for evidence of correlation needing echo cancellation
|
||||
//
|
||||
// OFFSET_RANGE tells us how many samples to vary the analysis window when looking for correlation,
|
||||
// and should be equal to the largest physical distance between speaker and microphone, where
|
||||
// OFFSET_RANGE = 1 / (speedOfSound (meters / sec) / SamplingRate (samples / sec)) * distance
|
||||
//
|
||||
const int OFFSET_RANGE = 10;
|
||||
const int SIGNAL_FLOOR = 1000;
|
||||
float correlation[2 * OFFSET_RANGE + 1];
|
||||
int numChecked = 0;
|
||||
bool foundSignal = false;
|
||||
|
||||
memset(correlation, 0, sizeof(float) * (2 * OFFSET_RANGE + 1));
|
||||
|
||||
for (int offset = -OFFSET_RANGE; offset <= OFFSET_RANGE; offset++) {
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
if ((i + offset >= 0) && (i + offset < numSamples)) {
|
||||
correlation[offset + OFFSET_RANGE] +=
|
||||
(float) abs(inputBuffer[i] - outputBuffer[i + offset]);
|
||||
numChecked++;
|
||||
foundSignal |= (inputBuffer[i] > SIGNAL_FLOOR);
|
||||
}
|
||||
}
|
||||
correlation[offset + OFFSET_RANGE] /= numChecked;
|
||||
numChecked = 0;
|
||||
if (foundSignal) {
|
||||
printLog("%4.2f, ", correlation[offset + OFFSET_RANGE]);
|
||||
}
|
||||
}
|
||||
if (foundSignal) printLog("\n");
|
||||
}
|
||||
|
||||
|
||||
void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes) {
|
||||
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
||||
|
||||
|
@ -345,7 +440,6 @@ void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBy
|
|||
|
||||
if (::stdev.getSamples() > 500) {
|
||||
_measuredJitter = ::stdev.getStDev();
|
||||
//printLog("Avg: %4.2f, Stdev: %4.2f\n", stdev.getAverage(), sharedAudioData->measuredJitter);
|
||||
::stdev.reset();
|
||||
}
|
||||
|
||||
|
|
|
@ -32,8 +32,15 @@ public:
|
|||
void setLastVelocity(glm::vec3 lastVelocity) { _lastVelocity = lastVelocity; };
|
||||
|
||||
void addProceduralSounds(int16_t* inputBuffer, int numSamples);
|
||||
void analyzeEcho(int16_t* inputBuffer, int16_t* outputBuffer, int numSamples);
|
||||
|
||||
|
||||
void addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes);
|
||||
|
||||
void startEchoTest();
|
||||
void addedPingFrame();
|
||||
void renderEchoCompare();
|
||||
|
||||
private:
|
||||
PaStream* _stream;
|
||||
AudioRingBuffer _ringBuffer;
|
||||
|
@ -50,6 +57,12 @@ private:
|
|||
int _totalPacketsReceived;
|
||||
timeval _firstPlaybackTime;
|
||||
int _packetsReceivedThisPlayback;
|
||||
bool _shouldStartEcho;
|
||||
bool _isSendingEchoPing;
|
||||
int _echoPingFrameCount;
|
||||
int16_t* _echoInputSamples;
|
||||
int16_t* _echoOutputSamples;
|
||||
bool _isGatheringEchoFrames;
|
||||
|
||||
// give access to AudioData class from audioCallback
|
||||
friend int audioCallback (const void*, void*, unsigned long, const PaStreamCallbackTimeInfo*, PaStreamCallbackFlags, void*);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <vector>
|
||||
#include <lodepng.h>
|
||||
#include <SharedUtil.h>
|
||||
#include "world.h"
|
||||
#include "Avatar.h"
|
||||
#include "Head.h"
|
||||
#include "Log.h"
|
||||
|
@ -17,6 +18,7 @@
|
|||
#include <AgentList.h>
|
||||
#include <AgentTypes.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <OculusManager.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
@ -28,8 +30,6 @@ const float THRUST_MAG = 1200.0;
|
|||
const float YAW_MAG = 500.0;
|
||||
const float BODY_SPIN_FRICTION = 5.0;
|
||||
const float BODY_UPRIGHT_FORCE = 10.0;
|
||||
const float BODY_PITCH_WHILE_WALKING = 40.0;
|
||||
const float BODY_ROLL_WHILE_TURNING = 0.1;
|
||||
const float VELOCITY_DECAY = 5.0;
|
||||
const float MY_HAND_HOLDING_PULL = 0.2;
|
||||
const float YOUR_HAND_HOLDING_PULL = 1.0;
|
||||
|
@ -49,7 +49,6 @@ const float HEAD_MAX_YAW = 85;
|
|||
const float HEAD_MIN_YAW = -85;
|
||||
const float AVATAR_BRAKING_RANGE = 1.6f;
|
||||
const float AVATAR_BRAKING_STRENGTH = 30.0f;
|
||||
//const float MAX_JOINT_TOUCH_DOT = 0.995f;
|
||||
const float JOINT_TOUCH_RANGE = 0.0005f;
|
||||
|
||||
float skinColor [] = {1.0, 0.84, 0.66};
|
||||
|
@ -82,6 +81,7 @@ Avatar::Avatar(bool isMine) {
|
|||
_transmitterPackets = 0;
|
||||
_transmitterIsFirstData = true;
|
||||
_transmitterInitialReading = glm::vec3(0.f, 0.f, 0.f);
|
||||
_isTransmitterV2Connected = false;
|
||||
_speed = 0.0;
|
||||
_pelvisStandingHeight = 0.0f;
|
||||
_displayingHead = true;
|
||||
|
@ -96,12 +96,12 @@ Avatar::Avatar(bool isMine) {
|
|||
_head.initialize();
|
||||
|
||||
_movedHandOffset = glm::vec3(0.0f, 0.0f, 0.0f);
|
||||
_renderYaw = 0.0;
|
||||
_renderPitch = 0.0;
|
||||
_sphere = NULL;
|
||||
_handHoldingPosition = glm::vec3(0.0f, 0.0f, 0.0f);
|
||||
_distanceToNearestAvatar = std::numeric_limits<float>::max();
|
||||
_gravity = glm::vec3(0.0f, -1.0f, 0.0f); // default
|
||||
_gravity = glm::vec3(0.0f, -1.0f, 0.0f);
|
||||
_cumulativeMouseYaw = 0.f;
|
||||
_isMouseTurningRight = false;
|
||||
|
||||
initializeSkeleton();
|
||||
|
||||
|
@ -126,7 +126,6 @@ Avatar::Avatar(const Avatar &otherAvatar) {
|
|||
_mode = otherAvatar._mode;
|
||||
_isMine = otherAvatar._isMine;
|
||||
_renderYaw = otherAvatar._renderYaw;
|
||||
_renderPitch = otherAvatar._renderPitch;
|
||||
_maxArmLength = otherAvatar._maxArmLength;
|
||||
_transmitterTimer = otherAvatar._transmitterTimer;
|
||||
_transmitterIsFirstData = otherAvatar._transmitterIsFirstData;
|
||||
|
@ -134,6 +133,7 @@ Avatar::Avatar(const Avatar &otherAvatar) {
|
|||
_transmitterHz = otherAvatar._transmitterHz;
|
||||
_transmitterInitialReading = otherAvatar._transmitterInitialReading;
|
||||
_transmitterPackets = otherAvatar._transmitterPackets;
|
||||
_isTransmitterV2Connected = otherAvatar._isTransmitterV2Connected;
|
||||
_TEST_bigSphereRadius = otherAvatar._TEST_bigSphereRadius;
|
||||
_TEST_bigSpherePosition = otherAvatar._TEST_bigSpherePosition;
|
||||
_movedHandOffset = otherAvatar._movedHandOffset;
|
||||
|
@ -186,15 +186,6 @@ Avatar::Avatar(const Avatar &otherAvatar) {
|
|||
|
||||
initializeSkeleton();
|
||||
|
||||
/*
|
||||
if (iris_texture.size() == 0) {
|
||||
switchToResourcesParentIfRequired();
|
||||
unsigned error = lodepng::decode(iris_texture, iris_texture_width, iris_texture_height, iris_texture_file);
|
||||
if (error != 0) {
|
||||
printLog("error %u: %s\n", error, lodepng_error_text(error));
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
Avatar::~Avatar() {
|
||||
|
@ -212,7 +203,6 @@ void Avatar::reset() {
|
|||
_head.leanForward = _head.leanSideways = 0;
|
||||
}
|
||||
|
||||
|
||||
// Update avatar head rotation with sensor data
|
||||
void Avatar::updateHeadFromGyros(float deltaTime, SerialInterface* serialInterface, glm::vec3* gravity) {
|
||||
float measuredPitchRate = 0.0f;
|
||||
|
@ -224,8 +214,6 @@ void Avatar::updateHeadFromGyros(float deltaTime, SerialInterface* serialInterfa
|
|||
measuredRollRate = serialInterface->getLastRollRate();
|
||||
|
||||
// Update avatar head position based on measured gyro rates
|
||||
const float MAX_PITCH = 45;
|
||||
const float MIN_PITCH = -45;
|
||||
const float MAX_YAW = 85;
|
||||
const float MIN_YAW = -85;
|
||||
const float MAX_ROLL = 50;
|
||||
|
@ -235,7 +223,6 @@ void Avatar::updateHeadFromGyros(float deltaTime, SerialInterface* serialInterfa
|
|||
addHeadYaw(measuredYawRate * deltaTime);
|
||||
addHeadRoll(measuredRollRate * deltaTime);
|
||||
|
||||
setHeadPitch(glm::clamp(getHeadPitch(), MIN_PITCH, MAX_PITCH));
|
||||
setHeadYaw(glm::clamp(getHeadYaw(), MIN_YAW, MAX_YAW));
|
||||
setHeadRoll(glm::clamp(getHeadRoll(), MIN_ROLL, MAX_ROLL));
|
||||
|
||||
|
@ -291,9 +278,53 @@ bool Avatar::getIsNearInteractingOther() {
|
|||
return _avatarTouch.getAbleToReachOtherAvatar();
|
||||
}
|
||||
|
||||
void Avatar::updateFromMouse(int mouseX, int mouseY, int screenWidth, int screenHeight) {
|
||||
// Update yaw based on mouse behavior
|
||||
const float MOUSE_MOVE_RADIUS = 0.25f;
|
||||
const float MOUSE_ROTATE_SPEED = 5.0f;
|
||||
const float MOUSE_PITCH_SPEED = 3.0f;
|
||||
const float MAX_YAW_TO_ADD = 180.f;
|
||||
const int TITLE_BAR_HEIGHT = 46;
|
||||
float mouseLocationX = (float)mouseX / (float)screenWidth - 0.5f;
|
||||
float mouseLocationY = (float)mouseY / (float)screenHeight - 0.5f;
|
||||
|
||||
if ((mouseX > 1) && (mouseX < screenWidth) && (mouseY > TITLE_BAR_HEIGHT) && (mouseY < screenHeight)) {
|
||||
//
|
||||
// Mouse must be inside screen (not at edge) and not on title bar for movement to happen
|
||||
//
|
||||
if (fabs(mouseLocationX) > MOUSE_MOVE_RADIUS) {
|
||||
// Add Yaw
|
||||
float mouseYawAdd = (fabs(mouseLocationX) - MOUSE_MOVE_RADIUS) / (0.5f - MOUSE_MOVE_RADIUS) * MOUSE_ROTATE_SPEED;
|
||||
bool rightTurning = (mouseLocationX > 0.f);
|
||||
if (_isMouseTurningRight == rightTurning) {
|
||||
_cumulativeMouseYaw += mouseYawAdd;
|
||||
} else {
|
||||
_cumulativeMouseYaw = 0;
|
||||
_isMouseTurningRight = rightTurning;
|
||||
}
|
||||
if (_cumulativeMouseYaw < MAX_YAW_TO_ADD) {
|
||||
setBodyYaw(getBodyYaw() - (rightTurning ? mouseYawAdd : -mouseYawAdd));
|
||||
}
|
||||
} else {
|
||||
_cumulativeMouseYaw = 0;
|
||||
}
|
||||
if (fabs(mouseLocationY) > MOUSE_MOVE_RADIUS) {
|
||||
float mousePitchAdd = (fabs(mouseLocationY) - MOUSE_MOVE_RADIUS) / (0.5f - MOUSE_MOVE_RADIUS) * MOUSE_PITCH_SPEED;
|
||||
bool downPitching = (mouseLocationY > 0.f);
|
||||
setHeadPitch(getHeadPitch() + (downPitching ? mousePitchAdd : -mousePitchAdd));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void Avatar::simulate(float deltaTime) {
|
||||
|
||||
//figure out if the mouse cursor is over any body spheres...
|
||||
// copy velocity so we can use it later for acceleration
|
||||
glm::vec3 oldVelocity = getVelocity();
|
||||
|
||||
// figure out if the mouse cursor is over any body spheres...
|
||||
checkForMouseRayTouching();
|
||||
|
||||
// update balls
|
||||
|
@ -315,7 +346,7 @@ void Avatar::simulate(float deltaTime) {
|
|||
_avatarTouch.simulate(deltaTime);
|
||||
|
||||
// apply gravity and collision with the ground/floor
|
||||
if (USING_AVATAR_GRAVITY) {
|
||||
if (_isMine && USING_AVATAR_GRAVITY) {
|
||||
if (_position.y > _pelvisStandingHeight + 0.01f) {
|
||||
_velocity += _gravity * (GRAVITY_SCALE * deltaTime);
|
||||
} else if (_position.y < _pelvisStandingHeight) {
|
||||
|
@ -364,10 +395,12 @@ void Avatar::simulate(float deltaTime) {
|
|||
// add thrust to velocity
|
||||
_velocity += _thrust * deltaTime;
|
||||
|
||||
// calculate speed
|
||||
// calculate speed
|
||||
_speed = glm::length(_velocity);
|
||||
|
||||
//pitch and roll the body as a function of forward speed and turning delta
|
||||
const float BODY_PITCH_WHILE_WALKING = 20.0;
|
||||
const float BODY_ROLL_WHILE_TURNING = 0.2;
|
||||
float forwardComponentOfVelocity = glm::dot(_orientation.getFront(), _velocity);
|
||||
_bodyPitch += BODY_PITCH_WHILE_WALKING * deltaTime * forwardComponentOfVelocity;
|
||||
_bodyRoll += BODY_ROLL_WHILE_TURNING * deltaTime * _speed * _bodyYawDelta;
|
||||
|
@ -403,10 +436,15 @@ void Avatar::simulate(float deltaTime) {
|
|||
}
|
||||
}
|
||||
|
||||
// Compute instantaneous acceleration
|
||||
float acceleration = glm::distance(getVelocity(), oldVelocity) / deltaTime;
|
||||
const float ACCELERATION_PITCH_DECAY = 0.4f;
|
||||
|
||||
|
||||
|
||||
|
||||
// Decay HeadPitch as a function of acceleration, so that you look straight ahead when
|
||||
// you start moving, but don't do this with an HMD like the Oculus.
|
||||
if (!OculusManager::isConnected()) {
|
||||
setHeadPitch(getHeadPitch() * (1.f - acceleration * ACCELERATION_PITCH_DECAY * deltaTime));
|
||||
}
|
||||
// Get head position data from network for other people
|
||||
if (!_isMine) {
|
||||
_head.leanSideways = getHeadLeanSideways();
|
||||
|
@ -595,110 +633,6 @@ void Avatar::updateHandMovementAndTouching(float deltaTime) {
|
|||
|
||||
void Avatar::updateHead(float deltaTime) {
|
||||
|
||||
/*
|
||||
// Decay head back to center if turned on
|
||||
if (_isMine && _returnHeadToCenter) {
|
||||
// Decay back toward center
|
||||
_headPitch *= (1.0f - DECAY * _head.returnSpringScale * 2 * deltaTime);
|
||||
_headYaw *= (1.0f - DECAY * _head.returnSpringScale * 2 * deltaTime);
|
||||
_headRoll *= (1.0f - DECAY * _head.returnSpringScale * 2 * deltaTime);
|
||||
}
|
||||
|
||||
// For invensense gyro, decay only slightly when roughly centered
|
||||
if (_isMine) {
|
||||
const float RETURN_RANGE = 15.0;
|
||||
const float RETURN_STRENGTH = 2.0;
|
||||
if (fabs(_headPitch) < RETURN_RANGE) { _headPitch *= (1.0f - RETURN_STRENGTH * deltaTime); }
|
||||
if (fabs(_headYaw) < RETURN_RANGE) { _headYaw *= (1.0f - RETURN_STRENGTH * deltaTime); }
|
||||
if (fabs(_headRoll) < RETURN_RANGE) { _headRoll *= (1.0f - RETURN_STRENGTH * deltaTime); }
|
||||
}
|
||||
|
||||
if (_head.noise) {
|
||||
// Move toward new target
|
||||
_headPitch += (_head.pitchTarget - _headPitch) * 10 * deltaTime; // (1.f - DECAY*deltaTime)*Pitch + ;
|
||||
_headYaw += (_head.yawTarget - _headYaw ) * 10 * deltaTime; // (1.f - DECAY*deltaTime);
|
||||
_headRoll *= 1.f - (DECAY * deltaTime);
|
||||
}
|
||||
|
||||
_head.leanForward *= (1.f - DECAY * 30 * deltaTime);
|
||||
_head.leanSideways *= (1.f - DECAY * 30 * deltaTime);
|
||||
|
||||
// Update where the avatar's eyes are
|
||||
//
|
||||
// First, decide if we are making eye contact or not
|
||||
if (randFloat() < 0.005) {
|
||||
_head.eyeContact = !_head.eyeContact;
|
||||
_head.eyeContact = 1;
|
||||
if (!_head.eyeContact) {
|
||||
// If we just stopped making eye contact,move the eyes markedly away
|
||||
_head.eyeballPitch[0] = _head.eyeballPitch[1] = _head.eyeballPitch[0] + 5.0 + (randFloat() - 0.5) * 10;
|
||||
_head.eyeballYaw [0] = _head.eyeballYaw [1] = _head.eyeballYaw [0] + 5.0 + (randFloat() - 0.5) * 5;
|
||||
} else {
|
||||
// If now making eye contact, turn head to look right at viewer
|
||||
SetNewHeadTarget(0,0);
|
||||
}
|
||||
}
|
||||
|
||||
const float DEGREES_BETWEEN_VIEWER_EYES = 3;
|
||||
const float DEGREES_TO_VIEWER_MOUTH = 7;
|
||||
|
||||
if (_head.eyeContact) {
|
||||
// Should we pick a new eye contact target?
|
||||
if (randFloat() < 0.01) {
|
||||
// Choose where to look next
|
||||
if (randFloat() < 0.1) {
|
||||
_head.eyeContactTarget = MOUTH;
|
||||
} else {
|
||||
if (randFloat() < 0.5) _head.eyeContactTarget = LEFT_EYE; else _head.eyeContactTarget = RIGHT_EYE;
|
||||
}
|
||||
}
|
||||
// Set eyeball pitch and yaw to make contact
|
||||
float eye_target_yaw_adjust = 0;
|
||||
float eye_target_pitch_adjust = 0;
|
||||
if (_head.eyeContactTarget == LEFT_EYE) eye_target_yaw_adjust = DEGREES_BETWEEN_VIEWER_EYES;
|
||||
if (_head.eyeContactTarget == RIGHT_EYE) eye_target_yaw_adjust = -DEGREES_BETWEEN_VIEWER_EYES;
|
||||
if (_head.eyeContactTarget == MOUTH) eye_target_pitch_adjust = DEGREES_TO_VIEWER_MOUTH;
|
||||
|
||||
_head.eyeballPitch[0] = _head.eyeballPitch[1] = -_headPitch + eye_target_pitch_adjust;
|
||||
_head.eyeballYaw[0] = _head.eyeballYaw[1] = -_headYaw + eye_target_yaw_adjust;
|
||||
}
|
||||
|
||||
if (_head.noise)
|
||||
{
|
||||
_headPitch += (randFloat() - 0.5) * 0.2 * _head.noiseEnvelope;
|
||||
_headYaw += (randFloat() - 0.5) * 0.3 *_head.noiseEnvelope;
|
||||
//PupilSize += (randFloat() - 0.5) * 0.001*NoiseEnvelope;
|
||||
|
||||
if (randFloat() < 0.005) _head.mouthWidth = MouthWidthChoices[rand()%3];
|
||||
|
||||
if (!_head.eyeContact) {
|
||||
if (randFloat() < 0.01) _head.eyeballPitch[0] = _head.eyeballPitch[1] = (randFloat() - 0.5) * 20;
|
||||
if (randFloat() < 0.01) _head.eyeballYaw[0] = _head.eyeballYaw[1] = (randFloat()- 0.5) * 10;
|
||||
}
|
||||
|
||||
if ((randFloat() < 0.005) && (fabs(_head.pitchTarget - _headPitch) < 1.0) && (fabs(_head.yawTarget - _headYaw) < 1.0)) {
|
||||
SetNewHeadTarget((randFloat()-0.5) * 20.0, (randFloat()-0.5) * 45.0);
|
||||
}
|
||||
|
||||
if (0) {
|
||||
|
||||
// Pick new target
|
||||
_head.pitchTarget = (randFloat() - 0.5) * 45;
|
||||
_head.yawTarget = (randFloat() - 0.5) * 22;
|
||||
}
|
||||
if (randFloat() < 0.01)
|
||||
{
|
||||
_head.eyebrowPitch[0] = _head.eyebrowPitch[1] = BrowPitchAngle[rand()%3];
|
||||
_head.eyebrowRoll [0] = _head.eyebrowRoll[1] = BrowRollAngle[rand()%5];
|
||||
_head.eyebrowRoll [1] *=-1;
|
||||
}
|
||||
}
|
||||
|
||||
// Update audio trailing average for rendering facial animations
|
||||
const float AUDIO_AVERAGING_SECS = 0.05;
|
||||
_head.averageLoudness = (1.f - deltaTime / AUDIO_AVERAGING_SECS) * _head.averageLoudness +
|
||||
(deltaTime / AUDIO_AVERAGING_SECS) * _audioLoudness;
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
|
@ -843,7 +777,7 @@ void Avatar::setDisplayingHead(bool displayingHead) {
|
|||
}
|
||||
|
||||
static TextRenderer* textRenderer() {
|
||||
static TextRenderer* renderer = new TextRenderer(SANS_FONT_FAMILY, 24);
|
||||
static TextRenderer* renderer = new TextRenderer(SANS_FONT_FAMILY, 24, -1, false, TextRenderer::SHADOW_EFFECT);
|
||||
return renderer;
|
||||
}
|
||||
|
||||
|
@ -923,6 +857,7 @@ void Avatar::render(bool lookingInMirror, glm::vec3 cameraPosition) {
|
|||
glScalef(chatMessageScale, chatMessageScale, 1.0f);
|
||||
|
||||
glDisable(GL_LIGHTING);
|
||||
glDepthMask(false);
|
||||
if (_keyState == NO_KEY_DOWN) {
|
||||
textRenderer()->draw(-width/2, 0, _chatMessage.c_str());
|
||||
|
||||
|
@ -938,6 +873,7 @@ void Avatar::render(bool lookingInMirror, glm::vec3 cameraPosition) {
|
|||
textRenderer()->draw(width/2 - lastWidth, 0, _chatMessage.c_str() + lastIndex);
|
||||
}
|
||||
glEnable(GL_LIGHTING);
|
||||
glDepthMask(true);
|
||||
|
||||
glPopMatrix();
|
||||
}
|
||||
|
@ -1410,6 +1346,84 @@ void Avatar::processTransmitterData(unsigned char* packetData, int numBytes) {
|
|||
|
||||
}
|
||||
}
|
||||
//
|
||||
// Process UDP data from version 2 Transmitter acting as Hand
|
||||
//
|
||||
void Avatar::processTransmitterDataV2(unsigned char* packetData, int numBytes) {
|
||||
if (numBytes == 3 + sizeof(_transmitterHandLastRotationRates) +
|
||||
sizeof(_transmitterHandLastAcceleration)) {
|
||||
memcpy(_transmitterHandLastRotationRates, packetData + 2,
|
||||
sizeof(_transmitterHandLastRotationRates));
|
||||
memcpy(_transmitterHandLastAcceleration, packetData + 3 +
|
||||
sizeof(_transmitterHandLastRotationRates),
|
||||
sizeof(_transmitterHandLastAcceleration));
|
||||
// Convert from transmitter units to internal units
|
||||
for (int i = 0; i < 3; i++) {
|
||||
_transmitterHandLastRotationRates[i] *= 180.f / PI;
|
||||
_transmitterHandLastAcceleration[i] *= GRAVITY_EARTH;
|
||||
}
|
||||
if (!_isTransmitterV2Connected) {
|
||||
printf("Transmitter V2 Connected.\n");
|
||||
_isTransmitterV2Connected = true;
|
||||
}
|
||||
} else {
|
||||
printf("Transmitter V2 packet read error.\n");
|
||||
}
|
||||
}
|
||||
|
||||
void Avatar::transmitterV2RenderLevels(int width, int height) {
|
||||
|
||||
char val[50];
|
||||
const int LEVEL_CORNER_X = 10;
|
||||
const int LEVEL_CORNER_Y = 400;
|
||||
|
||||
// Draw the numeric degree/sec values from the gyros
|
||||
sprintf(val, "Yaw %4.1f", _transmitterHandLastRotationRates[1]);
|
||||
drawtext(LEVEL_CORNER_X, LEVEL_CORNER_Y, 0.10, 0, 1.0, 1, val, 0, 1, 0);
|
||||
sprintf(val, "Pitch %4.1f", _transmitterHandLastRotationRates[0]);
|
||||
drawtext(LEVEL_CORNER_X, LEVEL_CORNER_Y + 15, 0.10, 0, 1.0, 1, val, 0, 1, 0);
|
||||
sprintf(val, "Roll %4.1f", _transmitterHandLastRotationRates[2]);
|
||||
drawtext(LEVEL_CORNER_X, LEVEL_CORNER_Y + 30, 0.10, 0, 1.0, 1, val, 0, 1, 0);
|
||||
sprintf(val, "X %4.3f", _transmitterHandLastAcceleration[0]);
|
||||
drawtext(LEVEL_CORNER_X, LEVEL_CORNER_Y + 45, 0.10, 0, 1.0, 1, val, 0, 1, 0);
|
||||
sprintf(val, "Y %4.3f", _transmitterHandLastAcceleration[1]);
|
||||
drawtext(LEVEL_CORNER_X, LEVEL_CORNER_Y + 60, 0.10, 0, 1.0, 1, val, 0, 1, 0);
|
||||
sprintf(val, "Z %4.3f", _transmitterHandLastAcceleration[2]);
|
||||
drawtext(LEVEL_CORNER_X, LEVEL_CORNER_Y + 75, 0.10, 0, 1.0, 1, val, 0, 1, 0);
|
||||
|
||||
// Draw the levels as horizontal lines
|
||||
const int LEVEL_CENTER = 150;
|
||||
const float ACCEL_VIEW_SCALING = 50.f;
|
||||
glLineWidth(2.0);
|
||||
glColor4f(1, 1, 1, 1);
|
||||
glBegin(GL_LINES);
|
||||
// Gyro rates
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER, LEVEL_CORNER_Y - 3);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER + _transmitterHandLastRotationRates[1], LEVEL_CORNER_Y - 3);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER, LEVEL_CORNER_Y + 12);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER + _transmitterHandLastRotationRates[0], LEVEL_CORNER_Y + 12);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER, LEVEL_CORNER_Y + 27);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER + _transmitterHandLastRotationRates[2], LEVEL_CORNER_Y + 27);
|
||||
// Acceleration
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER, LEVEL_CORNER_Y + 42);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER + (int)(_transmitterHandLastAcceleration[0] * ACCEL_VIEW_SCALING),
|
||||
LEVEL_CORNER_Y + 42);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER, LEVEL_CORNER_Y + 57);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER + (int)(_transmitterHandLastAcceleration[1] * ACCEL_VIEW_SCALING),
|
||||
LEVEL_CORNER_Y + 57);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER, LEVEL_CORNER_Y + 72);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER + (int)(_transmitterHandLastAcceleration[2] * ACCEL_VIEW_SCALING),
|
||||
LEVEL_CORNER_Y + 72);
|
||||
|
||||
glEnd();
|
||||
// Draw green vertical centerline
|
||||
glColor4f(0, 1, 0, 0.5);
|
||||
glBegin(GL_LINES);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER, LEVEL_CORNER_Y - 6);
|
||||
glVertex2f(LEVEL_CORNER_X + LEVEL_CENTER, LEVEL_CORNER_Y + 30);
|
||||
glEnd();
|
||||
}
|
||||
|
||||
|
||||
void Avatar::setHeadFromGyros(glm::vec3* eulerAngles, glm::vec3* angularVelocity, float deltaTime, float smoothingTime) {
|
||||
//
|
||||
|
|
|
@ -82,11 +82,8 @@ public:
|
|||
|
||||
void reset();
|
||||
void updateHeadFromGyros(float frametime, SerialInterface * serialInterface, glm::vec3 * gravity);
|
||||
void updateFromMouse(int mouseX, int mouseY, int screenWidth, int screenHeight);
|
||||
void setNoise (float mag) {_head.noise = mag;}
|
||||
void setRenderYaw(float y) {_renderYaw = y;}
|
||||
void setRenderPitch(float p) {_renderPitch = p;}
|
||||
float getRenderYaw() {return _renderYaw;}
|
||||
float getRenderPitch() {return _renderPitch;}
|
||||
float getLastMeasuredHeadYaw() const {return _head.yawRate;}
|
||||
float getBodyYaw() {return _bodyYaw;};
|
||||
void addBodyYaw(float y) {_bodyYaw += y;};
|
||||
|
@ -130,6 +127,12 @@ public:
|
|||
|
||||
// Related to getting transmitter UDP data used to animate the avatar hand
|
||||
void processTransmitterData(unsigned char * packetData, int numBytes);
|
||||
void processTransmitterDataV2(unsigned char * packetData, int numBytes);
|
||||
const bool isTransmitterV2Connected() const { return _isTransmitterV2Connected; };
|
||||
const float* getTransmitterHandLastAcceleration() const { return _transmitterHandLastAcceleration; };
|
||||
const float* getTransmitterHandLastRotationRates() const { return _transmitterHandLastRotationRates; };
|
||||
void transmitterV2RenderLevels(int width, int height);
|
||||
|
||||
float getTransmitterHz() { return _transmitterHz; };
|
||||
|
||||
void writeAvatarDataToFile();
|
||||
|
@ -184,6 +187,9 @@ private:
|
|||
float _transmitterHz;
|
||||
int _transmitterPackets;
|
||||
glm::vec3 _transmitterInitialReading;
|
||||
float _transmitterHandLastRotationRates[3];
|
||||
float _transmitterHandLastAcceleration[3];
|
||||
bool _isTransmitterV2Connected;
|
||||
float _pelvisStandingHeight;
|
||||
float _height;
|
||||
Balls* _balls;
|
||||
|
@ -194,6 +200,8 @@ private:
|
|||
glm::vec3 _mouseRayOrigin;
|
||||
glm::vec3 _mouseRayDirection;
|
||||
glm::vec3 _cameraPosition;
|
||||
float _cumulativeMouseYaw;
|
||||
bool _isMouseTurningRight;
|
||||
|
||||
//AvatarJointID _jointTouched;
|
||||
|
||||
|
|
|
@ -221,9 +221,6 @@ void Head::simulate(float deltaTime, bool isMine) {
|
|||
(deltaTime / AUDIO_AVERAGING_SECS) * audioLoudness;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void Head::render(bool lookingInMirror, float bodyYaw) {
|
||||
|
||||
int side = 0;
|
||||
|
@ -292,14 +289,19 @@ void Head::render(bool lookingInMirror, float bodyYaw) {
|
|||
glPopMatrix();
|
||||
|
||||
// Mouth
|
||||
const float MIN_LOUDNESS_SCALE_WIDTH = 0.7f;
|
||||
const float WIDTH_SENSITIVITY = 60.f;
|
||||
const float HEIGHT_SENSITIVITY = 30.f;
|
||||
const float MIN_LOUDNESS_SCALE_HEIGHT = 1.0f;
|
||||
glPushMatrix();
|
||||
glTranslatef(0,-0.35,0.75);
|
||||
glColor3f(0,0,0);
|
||||
glRotatef(mouthPitch, 1, 0, 0);
|
||||
glRotatef(mouthYaw, 0, 0, 1);
|
||||
if (averageLoudness > 1.f) {
|
||||
glScalef(mouthWidth * (.7f + sqrt(averageLoudness) /60.f),
|
||||
mouthHeight * (1.f + sqrt(averageLoudness) /30.f), 1);
|
||||
|
||||
if ((averageLoudness > 1.f) && (averageLoudness < 10000.f)) {
|
||||
glScalef(mouthWidth * (MIN_LOUDNESS_SCALE_WIDTH + sqrt(averageLoudness) / WIDTH_SENSITIVITY),
|
||||
mouthHeight * (MIN_LOUDNESS_SCALE_HEIGHT + sqrt(averageLoudness) / HEIGHT_SENSITIVITY), 1);
|
||||
} else {
|
||||
glScalef(mouthWidth, mouthHeight, 1);
|
||||
}
|
||||
|
|
|
@ -198,7 +198,7 @@ void Log::setCharacterSize(unsigned width, unsigned height) {
|
|||
}
|
||||
|
||||
static TextRenderer* textRenderer() {
|
||||
static TextRenderer* renderer = new TextRenderer(FONT_FAMILY);
|
||||
static TextRenderer* renderer = new TextRenderer(FONT_FAMILY, -1, -1, false, TextRenderer::SHADOW_EFFECT);
|
||||
return renderer;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,15 +20,15 @@ using namespace std;
|
|||
namespace { // everything in here only exists while compiling this .cpp file
|
||||
|
||||
// one sample buffer per channel
|
||||
unsigned const N_SAMPLES_ALLOC = Oscilloscope::MAX_SAMPLES * Oscilloscope::MAX_CHANNELS;
|
||||
unsigned const MAX_SAMPLES = Oscilloscope::MAX_SAMPLES_PER_CHANNEL * Oscilloscope::MAX_CHANNELS;
|
||||
|
||||
// adding an x-coordinate yields twice the amount of vertices
|
||||
unsigned const MAX_COORDS = Oscilloscope::MAX_SAMPLES * 2;
|
||||
unsigned const MAX_COORDS_PER_CHANNEL = Oscilloscope::MAX_SAMPLES_PER_CHANNEL * 2;
|
||||
// allocated once for each channel
|
||||
unsigned const N_COORDS_ALLOC = MAX_COORDS * Oscilloscope::MAX_CHANNELS;
|
||||
unsigned const MAX_COORDS = MAX_COORDS_PER_CHANNEL * Oscilloscope::MAX_CHANNELS;
|
||||
|
||||
// total amount of memory to allocate (in 16-bit integers)`
|
||||
unsigned const N_ALLOC_TOTAL = N_SAMPLES_ALLOC + N_COORDS_ALLOC;
|
||||
// total amount of memory to allocate (in 16-bit integers)
|
||||
unsigned const N_INT16_TO_ALLOC = MAX_SAMPLES + MAX_COORDS;
|
||||
}
|
||||
|
||||
|
||||
|
@ -40,13 +40,13 @@ Oscilloscope::Oscilloscope(int w, int h, bool isEnabled) :
|
|||
|
||||
// allocate enough space for the sample data and to turn it into
|
||||
// vertices and since they're all 'short', do so in one shot
|
||||
_arrSamples = new short[N_ALLOC_TOTAL];
|
||||
memset(_arrSamples, 0, N_ALLOC_TOTAL * sizeof(short));
|
||||
_arrVertices = _arrSamples + N_SAMPLES_ALLOC;
|
||||
_arrSamples = new short[N_INT16_TO_ALLOC];
|
||||
memset(_arrSamples, 0, N_INT16_TO_ALLOC * sizeof(short));
|
||||
_arrVertices = _arrSamples + MAX_SAMPLES;
|
||||
|
||||
// initialize write positions to start of each channel's region
|
||||
for (unsigned ch = 0; ch < MAX_CHANNELS; ++ch) {
|
||||
_arrWritePos[ch] = MAX_SAMPLES * ch;
|
||||
_arrWritePos[ch] = MAX_SAMPLES_PER_CHANNEL * ch;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,8 +62,8 @@ void Oscilloscope::addSamples(unsigned ch, short const* data, unsigned n) {
|
|||
}
|
||||
|
||||
// determine start/end offset of this channel's region
|
||||
unsigned baseOffs = MAX_SAMPLES * ch;
|
||||
unsigned endOffs = baseOffs + MAX_SAMPLES;
|
||||
unsigned baseOffs = MAX_SAMPLES_PER_CHANNEL * ch;
|
||||
unsigned endOffs = baseOffs + MAX_SAMPLES_PER_CHANNEL;
|
||||
|
||||
// fetch write position for this channel
|
||||
unsigned writePos = _arrWritePos[ch];
|
||||
|
@ -98,16 +98,16 @@ void Oscilloscope::render(int x, int y) {
|
|||
int lowpass = -int(std::numeric_limits<short>::min()) * _valLowpass;
|
||||
unsigned downsample = _valDownsample;
|
||||
// keep half of the buffer for writing and ensure an even vertex count
|
||||
unsigned usedWidth = min(_valWidth, MAX_SAMPLES / (downsample * 2)) & ~1u;
|
||||
unsigned usedWidth = min(_valWidth, MAX_SAMPLES_PER_CHANNEL / (downsample * 2)) & ~1u;
|
||||
unsigned usedSamples = usedWidth * downsample;
|
||||
|
||||
// expand samples to vertex data
|
||||
for (unsigned ch = 0; ch < MAX_CHANNELS; ++ch) {
|
||||
// for each channel: determine memory regions
|
||||
short const* basePtr = _arrSamples + MAX_SAMPLES * ch;
|
||||
short const* endPtr = basePtr + MAX_SAMPLES;
|
||||
short const* basePtr = _arrSamples + MAX_SAMPLES_PER_CHANNEL * ch;
|
||||
short const* endPtr = basePtr + MAX_SAMPLES_PER_CHANNEL;
|
||||
short const* inPtr = _arrSamples + _arrWritePos[ch];
|
||||
short* outPtr = _arrVertices + MAX_COORDS * ch;
|
||||
short* outPtr = _arrVertices + MAX_COORDS_PER_CHANNEL * ch;
|
||||
int sample = 0, x = usedWidth;
|
||||
for (int i = int(usedSamples); --i >= 0 ;) {
|
||||
if (inPtr == basePtr) {
|
||||
|
@ -125,24 +125,25 @@ void Oscilloscope::render(int x, int y) {
|
|||
}
|
||||
|
||||
// set up rendering state (vertex data lives at _arrVertices)
|
||||
glLineWidth(1.0);
|
||||
glDisable(GL_LINE_SMOOTH);
|
||||
glPushMatrix();
|
||||
glLineWidth(2.0);
|
||||
glTranslatef((float)x + 0.0f, (float)y + _valHeight / 2.0f, 0.0f);
|
||||
glScaled(1.0f, _valHeight / 32767.0f, 1.0f);
|
||||
glVertexPointer(2, GL_SHORT, 0, _arrVertices);
|
||||
glEnableClientState(GL_VERTEX_ARRAY);
|
||||
|
||||
// batch channel 0
|
||||
// render channel 0
|
||||
glColor3f(1.0f, 1.0f, 1.0f);
|
||||
glDrawArrays(GL_LINES, MAX_SAMPLES * 0, usedWidth);
|
||||
glDrawArrays(GL_LINES, MAX_SAMPLES_PER_CHANNEL * 0, usedWidth);
|
||||
|
||||
// batch channel 1
|
||||
// render channel 1
|
||||
glColor3f(0.0f, 1.0f ,1.0f);
|
||||
glDrawArrays(GL_LINES, MAX_SAMPLES * 1, usedWidth);
|
||||
glDrawArrays(GL_LINES, MAX_SAMPLES_PER_CHANNEL * 1, usedWidth);
|
||||
|
||||
// batch channel 2
|
||||
glColor3f(1.0f, 1.0f ,0.0f);
|
||||
glDrawArrays(GL_LINES, MAX_SAMPLES * 2, usedWidth);
|
||||
// render channel 2
|
||||
glColor3f(0.0f, 1.0f ,1.0f);
|
||||
glDrawArrays(GL_LINES, MAX_SAMPLES_PER_CHANNEL * 2, usedWidth);
|
||||
|
||||
// reset rendering state
|
||||
glDisableClientState(GL_VERTEX_ARRAY);
|
||||
|
|
|
@ -13,19 +13,19 @@
|
|||
|
||||
class Oscilloscope {
|
||||
public:
|
||||
static unsigned const MAX_CHANNELS = 3;
|
||||
static unsigned const MAX_SAMPLES = 4096; // per channel
|
||||
|
||||
Oscilloscope(int width, int height, bool isEnabled);
|
||||
~Oscilloscope();
|
||||
|
||||
volatile bool enabled;
|
||||
volatile bool inputPaused;
|
||||
|
||||
void addSamples(unsigned ch, short const* data, unsigned n);
|
||||
|
||||
void render(int x, int y);
|
||||
|
||||
static unsigned const MAX_CHANNELS = 3;
|
||||
static unsigned const MAX_SAMPLES_PER_CHANNEL = 4096;
|
||||
|
||||
volatile bool enabled;
|
||||
volatile bool inputPaused;
|
||||
|
||||
void setLowpass(float w) { assert(w > 0.0f && w <= 1.0f); _valLowpass = w; }
|
||||
void setDownsampling(unsigned f) { assert(f > 0); _valDownsample = f; }
|
||||
|
||||
|
@ -34,8 +34,7 @@ private:
|
|||
Oscilloscope(Oscilloscope const&); // = delete;
|
||||
Oscilloscope& operator=(Oscilloscope const&); // = delete;
|
||||
|
||||
// implementation
|
||||
inline short* bufferBase(int i, int channel);
|
||||
// state variables
|
||||
|
||||
unsigned _valWidth;
|
||||
unsigned _valHeight;
|
||||
|
|
|
@ -187,7 +187,7 @@ void SerialInterface::readData() {
|
|||
convertHexToInt(sensorBuffer + 10, accelYRate);
|
||||
convertHexToInt(sensorBuffer + 14, accelXRate);
|
||||
|
||||
const float LSB_TO_METERS_PER_SECOND2 = 1.f / 16384.f * 9.80665f;
|
||||
const float LSB_TO_METERS_PER_SECOND2 = 1.f / 16384.f * GRAVITY_EARTH;
|
||||
// From MPU-9150 register map, with setting on
|
||||
// highest resolution = +/- 2G
|
||||
|
||||
|
|
|
@ -181,7 +181,7 @@ double diffclock(timeval *clock1,timeval *clock2)
|
|||
|
||||
static TextRenderer* textRenderer(int mono) {
|
||||
static TextRenderer* monoRenderer = new TextRenderer(MONO_FONT_FAMILY);
|
||||
static TextRenderer* proportionalRenderer = new TextRenderer(SANS_FONT_FAMILY);
|
||||
static TextRenderer* proportionalRenderer = new TextRenderer(SANS_FONT_FAMILY, -1, -1, false, TextRenderer::SHADOW_EFFECT);
|
||||
return mono ? monoRenderer : proportionalRenderer;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <cmath>
|
||||
#include <iostream> // to load voxels from file
|
||||
#include <fstream> // to load voxels from file
|
||||
#include <glm/gtc/random.hpp>
|
||||
#include <SharedUtil.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <PerfStat.h>
|
||||
|
@ -20,6 +21,8 @@
|
|||
#include <pthread.h>
|
||||
#include "Log.h"
|
||||
#include "VoxelConstants.h"
|
||||
#include "InterfaceConfig.h"
|
||||
#include "renderer/ProgramObject.h"
|
||||
|
||||
#include "VoxelSystem.h"
|
||||
|
||||
|
@ -497,6 +500,37 @@ void VoxelSystem::init() {
|
|||
// delete the indices and normals arrays that are no longer needed
|
||||
delete[] indicesArray;
|
||||
delete[] normalsArray;
|
||||
|
||||
// create our simple fragment shader
|
||||
switchToResourcesParentIfRequired();
|
||||
_perlinModulateProgram = new ProgramObject();
|
||||
_perlinModulateProgram->addShaderFromSourceFile(QGLShader::Vertex, "resources/shaders/perlin_modulate.vert");
|
||||
_perlinModulateProgram->addShaderFromSourceFile(QGLShader::Fragment, "resources/shaders/perlin_modulate.frag");
|
||||
_perlinModulateProgram->link();
|
||||
|
||||
_perlinModulateProgram->setUniformValue("permutationNormalTexture", 0);
|
||||
|
||||
// create the permutation/normal texture
|
||||
glGenTextures(1, &_permutationNormalTextureID);
|
||||
glBindTexture(GL_TEXTURE_2D, _permutationNormalTextureID);
|
||||
|
||||
// the first line consists of random permutation offsets
|
||||
unsigned char data[256 * 2 * 3];
|
||||
for (int i = 0; i < 256 * 3; i++) {
|
||||
data[i] = rand() % 256;
|
||||
}
|
||||
// the next, random unit normals
|
||||
for (int i = 256 * 3; i < 256 * 3 * 2; i += 3) {
|
||||
glm::vec3 randvec = glm::sphericalRand(1.0f);
|
||||
data[i] = ((randvec.x + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 1] = ((randvec.y + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 2] = ((randvec.z + 1.0f) / 2.0f) * 255.0f;
|
||||
}
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 256, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
}
|
||||
|
||||
void VoxelSystem::updateFullVBOs() {
|
||||
|
@ -606,11 +640,17 @@ void VoxelSystem::render() {
|
|||
glBindBuffer(GL_ARRAY_BUFFER, _vboColorsID);
|
||||
glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0);
|
||||
|
||||
_perlinModulateProgram->bind();
|
||||
glBindTexture(GL_TEXTURE_2D, _permutationNormalTextureID);
|
||||
|
||||
// draw the number of voxels we have
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _vboIndicesID);
|
||||
glScalef(TREE_SCALE, TREE_SCALE, TREE_SCALE);
|
||||
glDrawElements(GL_TRIANGLES, 36 * _voxelsInReadArrays, GL_UNSIGNED_INT, 0);
|
||||
|
||||
_perlinModulateProgram->release();
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
// deactivate vertex and color arrays after drawing
|
||||
glDisableClientState(GL_VERTEX_ARRAY);
|
||||
glDisableClientState(GL_NORMAL_ARRAY);
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include "Util.h"
|
||||
#include "world.h"
|
||||
|
||||
class ProgramObject;
|
||||
|
||||
const int NUM_CHILDREN = 8;
|
||||
|
||||
class VoxelSystem : public AgentData {
|
||||
|
@ -133,6 +135,9 @@ private:
|
|||
pthread_mutex_t _bufferWriteLock;
|
||||
pthread_mutex_t _treeLock;
|
||||
|
||||
ProgramObject* _perlinModulateProgram;
|
||||
GLuint _permutationNormalTextureID;
|
||||
|
||||
ViewFrustum* _viewFrustum;
|
||||
ViewFrustum _lastKnowViewFrustum;
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
using namespace std;
|
||||
|
||||
const int MAX_CONTENT_LENGTH = 140;
|
||||
const int MAX_CONTENT_LENGTH = 80;
|
||||
|
||||
ChatEntry::ChatEntry() : _cursorPos(0) {
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ bool ChatEntry::keyPressEvent(QKeyEvent* event) {
|
|||
event->ignore();
|
||||
return true;
|
||||
}
|
||||
if (_contents.size() != MAX_CONTENT_LENGTH) {
|
||||
if (_contents.size() < MAX_CONTENT_LENGTH) {
|
||||
_contents.insert(_cursorPos, 1, text.at(0).toAscii());
|
||||
_cursorPos++;
|
||||
}
|
||||
|
@ -74,7 +74,19 @@ bool ChatEntry::keyPressEvent(QKeyEvent* event) {
|
|||
}
|
||||
|
||||
void ChatEntry::render(int screenWidth, int screenHeight) {
|
||||
drawtext(20, screenHeight - 150, 0.10, 0, 1.0, 0, _contents.c_str(), 1, 1, 1);
|
||||
// draw a gray background so that we can actually see what we're typing
|
||||
int bottom = screenHeight - 150, top = screenHeight - 165;
|
||||
int left = 20, right = left + 600;
|
||||
|
||||
glColor3f(0.2f, 0.2f, 0.2f);
|
||||
glBegin(GL_QUADS);
|
||||
glVertex2f(left - 5, bottom + 7);
|
||||
glVertex2f(right + 5, bottom + 7);
|
||||
glVertex2f(right + 5, top - 3);
|
||||
glVertex2f(left - 5, top - 3);
|
||||
glEnd();
|
||||
|
||||
drawtext(left, bottom, 0.10, 0, 1.0, 0, _contents.c_str(), 1, 1, 1);
|
||||
|
||||
float width = 0;
|
||||
for (string::iterator it = _contents.begin(), end = it + _cursorPos; it != end; it++) {
|
||||
|
@ -82,7 +94,7 @@ void ChatEntry::render(int screenWidth, int screenHeight) {
|
|||
}
|
||||
glDisable(GL_LINE_SMOOTH);
|
||||
glBegin(GL_LINE_STRIP);
|
||||
glVertex2f(20 + width, screenHeight - 165);
|
||||
glVertex2f(20 + width, screenHeight - 150);
|
||||
glVertex2f(left + width, top + 2);
|
||||
glVertex2f(left + width, bottom + 2);
|
||||
glEnd();
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@ Glyph::Glyph(int textureID, const QPoint& location, const QRect& bounds, int wid
|
|||
_textureID(textureID), _location(location), _bounds(bounds), _width(width) {
|
||||
}
|
||||
|
||||
TextRenderer::TextRenderer(const char* family, int pointSize, int weight, bool italic)
|
||||
: _font(family, pointSize, weight, italic),
|
||||
_metrics(_font), _x(IMAGE_SIZE), _y(IMAGE_SIZE), _rowHeight(0) {
|
||||
TextRenderer::TextRenderer(const char* family, int pointSize, int weight,
|
||||
bool italic, EffectType effectType, int effectThickness)
|
||||
: _font(family, pointSize, weight, italic), _metrics(_font), _effectType(effectType),
|
||||
_effectThickness(effectThickness), _x(IMAGE_SIZE), _y(IMAGE_SIZE), _rowHeight(0) {
|
||||
_font.setKerning(false);
|
||||
}
|
||||
|
||||
|
@ -97,6 +98,14 @@ const Glyph& TextRenderer::getGlyph(char c) {
|
|||
glyph = Glyph(0, QPoint(), QRect(), _metrics.width(ch));
|
||||
return glyph;
|
||||
}
|
||||
// grow the bounds to account for effect, if any
|
||||
if (_effectType == SHADOW_EFFECT) {
|
||||
bounds.adjust(-_effectThickness, 0, 0, _effectThickness);
|
||||
|
||||
} else if (_effectType == OUTLINE_EFFECT) {
|
||||
bounds.adjust(-_effectThickness, -_effectThickness, _effectThickness, _effectThickness);
|
||||
}
|
||||
|
||||
// grow the bounds to account for antialiasing
|
||||
bounds.adjust(-1, -1, 1, 1);
|
||||
|
||||
|
@ -128,6 +137,23 @@ const Glyph& TextRenderer::getGlyph(char c) {
|
|||
image.fill(0);
|
||||
QPainter painter(&image);
|
||||
painter.setFont(_font);
|
||||
if (_effectType == SHADOW_EFFECT) {
|
||||
for (int i = 0; i < _effectThickness; i++) {
|
||||
painter.drawText(-bounds.x() - i, -bounds.y() + i, ch);
|
||||
}
|
||||
} else if (_effectType == OUTLINE_EFFECT) {
|
||||
QPainterPath path;
|
||||
QFont font = _font;
|
||||
font.setStyleStrategy(QFont::ForceOutline);
|
||||
path.addText(-bounds.x() - 0.5, -bounds.y() + 0.5, font, ch);
|
||||
QPen pen;
|
||||
pen.setWidth(_effectThickness);
|
||||
pen.setJoinStyle(Qt::RoundJoin);
|
||||
pen.setCapStyle(Qt::RoundCap);
|
||||
painter.setPen(pen);
|
||||
painter.setRenderHint(QPainter::Antialiasing);
|
||||
painter.drawPath(path);
|
||||
}
|
||||
painter.setPen(QColor(255, 255, 255));
|
||||
painter.drawText(-bounds.x(), -bounds.y(), ch);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,10 @@ class Glyph;
|
|||
class TextRenderer {
|
||||
public:
|
||||
|
||||
TextRenderer(const char* family, int pointSize = -1, int weight = -1, bool italic = false);
|
||||
enum EffectType { NO_EFFECT, SHADOW_EFFECT, OUTLINE_EFFECT };
|
||||
|
||||
TextRenderer(const char* family, int pointSize = -1, int weight = -1, bool italic = false,
|
||||
EffectType effect = NO_EFFECT, int effectThickness = 2);
|
||||
~TextRenderer();
|
||||
|
||||
const QFontMetrics& metrics() const { return _metrics; }
|
||||
|
@ -42,6 +45,12 @@ private:
|
|||
|
||||
// the font metrics
|
||||
QFontMetrics _metrics;
|
||||
|
||||
// the type of effect to apply
|
||||
EffectType _effectType;
|
||||
|
||||
// the thickness of the effect
|
||||
int _effectThickness;
|
||||
|
||||
// maps characters to cached glyph info
|
||||
QHash<char, Glyph> _glyphs;
|
||||
|
|
|
@ -15,5 +15,6 @@
|
|||
const float WORLD_SIZE = 10.0;
|
||||
#define PI 3.14159265
|
||||
#define PIf 3.14159265f
|
||||
#define GRAVITY_EARTH 9.80665f;
|
||||
|
||||
#endif
|
||||
|
|
22
libraries/audio/CMakeLists.txt
Normal file
22
libraries/audio/CMakeLists.txt
Normal file
|
@ -0,0 +1,22 @@
|
|||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
set(ROOT_DIR ../..)
|
||||
set(MACRO_DIR ${ROOT_DIR}/cmake/macros)
|
||||
|
||||
# setup for find modules
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../cmake/modules/")
|
||||
|
||||
set(TARGET_NAME audio)
|
||||
|
||||
include(${MACRO_DIR}/SetupHifiLibrary.cmake)
|
||||
setup_hifi_library(${TARGET_NAME})
|
||||
|
||||
include(${MACRO_DIR}/IncludeGLM.cmake)
|
||||
include_glm(${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
include(${MACRO_DIR}/LinkHifiLibrary.cmake)
|
||||
link_hifi_library(shared ${TARGET_NAME} ${ROOT_DIR})
|
||||
|
||||
# link the threads library
|
||||
find_package(Threads REQUIRED)
|
||||
target_link_libraries(${TARGET_NAME} ${CMAKE_THREAD_LIBS_INIT})
|
64
libraries/audio/src/AudioInjectionManager.cpp
Normal file
64
libraries/audio/src/AudioInjectionManager.cpp
Normal file
|
@ -0,0 +1,64 @@
|
|||
//
|
||||
// AudioInjectionManager.cpp
|
||||
// hifi
|
||||
//
|
||||
// Created by Stephen Birarda on 5/16/13.
|
||||
// Copyright (c) 2012 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "SharedUtil.h"
|
||||
#include "PacketHeaders.h"
|
||||
|
||||
#include "AudioInjectionManager.h"
|
||||
|
||||
UDPSocket* AudioInjectionManager::_injectorSocket = NULL;
|
||||
sockaddr AudioInjectionManager::_destinationSocket;
|
||||
AudioInjector* AudioInjectionManager::_injectors[50] = {};
|
||||
|
||||
AudioInjector* AudioInjectionManager::injectorWithSamplesFromFile(const char* filename) {
|
||||
for (int i = 0; i < MAX_CONCURRENT_INJECTORS; i++) {
|
||||
if (!_injectors[i]) {
|
||||
_injectors[i] = new AudioInjector(filename);
|
||||
return _injectors[i];
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
AudioInjector* AudioInjectionManager::injectorWithCapacity(int capacity) {
|
||||
for (int i = 0; i < MAX_CONCURRENT_INJECTORS; i++) {
|
||||
if (!_injectors[i]) {
|
||||
_injectors[i] = new AudioInjector(capacity);
|
||||
return _injectors[i];
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void* AudioInjectionManager::injectAudioViaThread(void* args) {
|
||||
AudioInjector* injector = (AudioInjector*) args;
|
||||
|
||||
injector->injectAudio(_injectorSocket, &_destinationSocket);
|
||||
|
||||
// if this an injector inside the injection manager's array we're responsible for deletion
|
||||
for (int i = 0; i < MAX_CONCURRENT_INJECTORS; i++) {
|
||||
if (_injectors[i] == injector) {
|
||||
// pointer matched - delete this injector
|
||||
delete injector;
|
||||
|
||||
// set the pointer to NULL so we can reuse this spot
|
||||
_injectors[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
pthread_exit(0);
|
||||
}
|
||||
|
||||
void AudioInjectionManager::threadInjector(AudioInjector* injector) {
|
||||
pthread_t audioInjectThread;
|
||||
pthread_create(&audioInjectThread, NULL, injectAudioViaThread, (void*) injector);
|
||||
}
|
36
libraries/audio/src/AudioInjectionManager.h
Normal file
36
libraries/audio/src/AudioInjectionManager.h
Normal file
|
@ -0,0 +1,36 @@
|
|||
//
|
||||
// AudioInjectionManager.h
|
||||
// hifi
|
||||
//
|
||||
// Created by Stephen Birarda on 5/16/13.
|
||||
// Copyright (c) 2012 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#ifndef __hifi__AudioInjectionManager__
|
||||
#define __hifi__AudioInjectionManager__
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "UDPSocket.h"
|
||||
#include "AudioInjector.h"
|
||||
|
||||
const int MAX_CONCURRENT_INJECTORS = 50;
|
||||
|
||||
class AudioInjectionManager {
|
||||
public:
|
||||
static AudioInjector* injectorWithCapacity(int capacity);
|
||||
static AudioInjector* injectorWithSamplesFromFile(const char* filename);
|
||||
|
||||
static void threadInjector(AudioInjector* injector);
|
||||
|
||||
static void setInjectorSocket(UDPSocket* injectorSocket) { _injectorSocket = injectorSocket;}
|
||||
static void setDestinationSocket(sockaddr& destinationSocket) { _destinationSocket = destinationSocket; }
|
||||
private:
|
||||
static void* injectAudioViaThread(void* args);
|
||||
|
||||
static UDPSocket* _injectorSocket;
|
||||
static sockaddr _destinationSocket;
|
||||
static AudioInjector* _injectors[MAX_CONCURRENT_INJECTORS];
|
||||
};
|
||||
|
||||
#endif /* defined(__hifi__AudioInjectionManager__) */
|
|
@ -3,27 +3,24 @@
|
|||
// hifi
|
||||
//
|
||||
// Created by Stephen Birarda on 4/23/13.
|
||||
//
|
||||
// Copyright (c) 2012 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#include <sys/time.h>
|
||||
#include <fstream>
|
||||
#include <cstring>
|
||||
|
||||
#include "SharedUtil.h"
|
||||
#include "PacketHeaders.h"
|
||||
#include <SharedUtil.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <UDPSocket.h>
|
||||
|
||||
#include "AudioInjector.h"
|
||||
|
||||
const int BUFFER_LENGTH_BYTES = 512;
|
||||
const int BUFFER_LENGTH_SAMPLES = BUFFER_LENGTH_BYTES / sizeof(int16_t);
|
||||
const float SAMPLE_RATE = 22050.0f;
|
||||
const float BUFFER_SEND_INTERVAL_USECS = (BUFFER_LENGTH_SAMPLES / SAMPLE_RATE) * 1000000;
|
||||
const int MAX_INJECTOR_VOLUME = 0xFF;
|
||||
|
||||
AudioInjector::AudioInjector(const char* filename) :
|
||||
_position(),
|
||||
_bearing(0),
|
||||
_attenuationModifier(255),
|
||||
_volume(MAX_INJECTOR_VOLUME),
|
||||
_indexOfNextSlot(0),
|
||||
_isInjectingAudio(false)
|
||||
{
|
||||
|
@ -50,7 +47,7 @@ AudioInjector::AudioInjector(int maxNumSamples) :
|
|||
_numTotalSamples(maxNumSamples),
|
||||
_position(),
|
||||
_bearing(0),
|
||||
_attenuationModifier(255),
|
||||
_volume(MAX_INJECTOR_VOLUME),
|
||||
_indexOfNextSlot(0),
|
||||
_isInjectingAudio(false)
|
||||
{
|
||||
|
@ -62,10 +59,50 @@ AudioInjector::~AudioInjector() {
|
|||
delete[] _audioSampleArray;
|
||||
}
|
||||
|
||||
void AudioInjector::setPosition(float* position) {
|
||||
_position[0] = position[0];
|
||||
_position[1] = position[1];
|
||||
_position[2] = position[2];
|
||||
void AudioInjector::injectAudio(UDPSocket* injectorSocket, sockaddr* destinationSocket) {
|
||||
if (_audioSampleArray) {
|
||||
_isInjectingAudio = true;
|
||||
|
||||
timeval startTime;
|
||||
|
||||
// one byte for header, 3 positional floats, 1 bearing float, 1 attenuation modifier byte
|
||||
int leadingBytes = 1 + (sizeof(float) * 4) + 1;
|
||||
unsigned char dataPacket[BUFFER_LENGTH_BYTES + leadingBytes];
|
||||
|
||||
dataPacket[0] = PACKET_HEADER_INJECT_AUDIO;
|
||||
unsigned char *currentPacketPtr = dataPacket + 1;
|
||||
|
||||
memcpy(currentPacketPtr, &_position, sizeof(_position));
|
||||
currentPacketPtr += sizeof(_position);
|
||||
|
||||
*currentPacketPtr = _volume;
|
||||
currentPacketPtr++;
|
||||
|
||||
memcpy(currentPacketPtr, &_bearing, sizeof(_bearing));
|
||||
currentPacketPtr += sizeof(_bearing);
|
||||
|
||||
for (int i = 0; i < _numTotalSamples; i += BUFFER_LENGTH_SAMPLES) {
|
||||
gettimeofday(&startTime, NULL);
|
||||
|
||||
int numSamplesToCopy = BUFFER_LENGTH_SAMPLES;
|
||||
|
||||
if (_numTotalSamples - i < BUFFER_LENGTH_SAMPLES) {
|
||||
numSamplesToCopy = _numTotalSamples - i;
|
||||
memset(currentPacketPtr + numSamplesToCopy, 0, BUFFER_LENGTH_BYTES - (numSamplesToCopy * sizeof(int16_t)));
|
||||
}
|
||||
|
||||
memcpy(currentPacketPtr, _audioSampleArray + i, numSamplesToCopy * sizeof(int16_t));
|
||||
|
||||
injectorSocket->send(destinationSocket, dataPacket, sizeof(dataPacket));
|
||||
|
||||
double usecToSleep = BUFFER_SEND_INTERVAL_USECS - (usecTimestampNow() - usecTimestamp(&startTime));
|
||||
if (usecToSleep > 0) {
|
||||
usleep(usecToSleep);
|
||||
}
|
||||
}
|
||||
|
||||
_isInjectingAudio = false;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioInjector::addSample(const int16_t sample) {
|
||||
|
@ -82,63 +119,3 @@ void AudioInjector::addSamples(int16_t* sampleBuffer, int numSamples) {
|
|||
_indexOfNextSlot += numSamples;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioInjector::injectAudio() {
|
||||
if (_audioSampleArray) {
|
||||
_isInjectingAudio = true;
|
||||
|
||||
timeval startTime;
|
||||
|
||||
// one byte for header, 3 positional floats, 1 bearing float, 1 attenuation modifier byte
|
||||
int leadingBytes = 1 + (sizeof(float) * 4) + 1;
|
||||
unsigned char dataPacket[BUFFER_LENGTH_BYTES + leadingBytes];
|
||||
|
||||
dataPacket[0] = PACKET_HEADER_INJECT_AUDIO;
|
||||
unsigned char *currentPacketPtr = dataPacket + 1;
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
memcpy(currentPacketPtr, &_position[i], sizeof(float));
|
||||
currentPacketPtr += sizeof(float);
|
||||
}
|
||||
|
||||
*currentPacketPtr = _attenuationModifier;
|
||||
currentPacketPtr++;
|
||||
|
||||
memcpy(currentPacketPtr, &_bearing, sizeof(float));
|
||||
currentPacketPtr += sizeof(float);
|
||||
|
||||
for (int i = 0; i < _numTotalSamples; i += BUFFER_LENGTH_SAMPLES) {
|
||||
gettimeofday(&startTime, NULL);
|
||||
|
||||
int numSamplesToCopy = BUFFER_LENGTH_SAMPLES;
|
||||
|
||||
if (_numTotalSamples - i < BUFFER_LENGTH_SAMPLES) {
|
||||
numSamplesToCopy = _numTotalSamples - i;
|
||||
memset(currentPacketPtr + numSamplesToCopy, 0, BUFFER_LENGTH_BYTES - (numSamplesToCopy * sizeof(int16_t)));
|
||||
}
|
||||
|
||||
memcpy(currentPacketPtr, _audioSampleArray + i, numSamplesToCopy * sizeof(int16_t));
|
||||
|
||||
_injectorSocket->send(&_destinationSocket, dataPacket, sizeof(dataPacket));
|
||||
|
||||
double usecToSleep = BUFFER_SEND_INTERVAL_USECS - (usecTimestampNow() - usecTimestamp(&startTime));
|
||||
if (usecToSleep > 0) {
|
||||
usleep(usecToSleep);
|
||||
}
|
||||
}
|
||||
|
||||
_isInjectingAudio = false;
|
||||
}
|
||||
}
|
||||
|
||||
void* injectAudioViaThread(void* args) {
|
||||
AudioInjector* parentInjector = (AudioInjector*) args;
|
||||
parentInjector->injectAudio();
|
||||
|
||||
pthread_exit(0);
|
||||
}
|
||||
|
||||
void AudioInjector::threadInjectionOfAudio() {
|
||||
pthread_t audioInjectThread;
|
||||
pthread_create(&audioInjectThread, NULL, injectAudioViaThread, (void*) this);
|
||||
}
|
55
libraries/audio/src/AudioInjector.h
Normal file
55
libraries/audio/src/AudioInjector.h
Normal file
|
@ -0,0 +1,55 @@
|
|||
//
|
||||
// AudioInjector.h
|
||||
// hifi
|
||||
//
|
||||
// Created by Stephen Birarda on 4/23/13.
|
||||
// Copyright (c) 2012 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
|
||||
#ifndef __hifi__AudioInjector__
|
||||
#define __hifi__AudioInjector__
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
|
||||
const int BUFFER_LENGTH_BYTES = 512;
|
||||
const int BUFFER_LENGTH_SAMPLES = BUFFER_LENGTH_BYTES / sizeof(int16_t);
|
||||
const float SAMPLE_RATE = 22050.0f;
|
||||
const float BUFFER_SEND_INTERVAL_USECS = (BUFFER_LENGTH_SAMPLES / SAMPLE_RATE) * 1000000;
|
||||
|
||||
class AudioInjector {
|
||||
friend class AudioInjectionManager;
|
||||
|
||||
public:
|
||||
AudioInjector(const char* filename);
|
||||
AudioInjector(int maxNumSamples);
|
||||
~AudioInjector();
|
||||
|
||||
void injectAudio(UDPSocket* injectorSocket, sockaddr* destinationSocket);
|
||||
|
||||
bool isInjectingAudio() const { return _isInjectingAudio; }
|
||||
void setIsInjectingAudio(bool isInjectingAudio) { _isInjectingAudio = isInjectingAudio; }
|
||||
|
||||
unsigned char getVolume() const { return _volume; }
|
||||
void setVolume(unsigned char volume) { _volume = volume; }
|
||||
|
||||
const glm::vec3& getPosition() const { return _position; }
|
||||
void setPosition(const glm::vec3& position) { _position = position; }
|
||||
|
||||
float getBearing() const { return _bearing; }
|
||||
void setBearing(float bearing) { _bearing = bearing; }
|
||||
|
||||
void addSample(const int16_t sample);
|
||||
void addSamples(int16_t* sampleBuffer, int numSamples);
|
||||
private:
|
||||
int16_t* _audioSampleArray;
|
||||
int _numTotalSamples;
|
||||
glm::vec3 _position;
|
||||
float _bearing;
|
||||
unsigned char _volume;
|
||||
int _indexOfNextSlot;
|
||||
bool _isInjectingAudio;
|
||||
};
|
||||
|
||||
#endif /* defined(__hifi__AudioInjector__) */
|
|
@ -236,4 +236,13 @@ void AvatarData::setBodyRoll(float bodyRoll) {
|
|||
_bodyRoll = bodyRoll;
|
||||
}
|
||||
|
||||
void AvatarData::setHeadPitch(float p) {
|
||||
// Set head pitch and apply limits
|
||||
const float MAX_PITCH = 60;
|
||||
const float MIN_PITCH = -60;
|
||||
_headPitch = glm::clamp(p, MIN_PITCH, MAX_PITCH);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -73,13 +73,13 @@ public:
|
|||
void setBodyRoll(float bodyRoll);
|
||||
|
||||
// Head Rotation
|
||||
void setHeadPitch(float p) {_headPitch = p; }
|
||||
void setHeadPitch(float p);
|
||||
void setHeadYaw(float y) {_headYaw = y; }
|
||||
void setHeadRoll(float r) {_headRoll = r; };
|
||||
float getHeadPitch() const { return _headPitch; };
|
||||
float getHeadYaw() const { return _headYaw; };
|
||||
float getHeadRoll() const { return _headRoll; };
|
||||
void addHeadPitch(float p) {_headPitch -= p; }
|
||||
void addHeadPitch(float p) { setHeadPitch(_headPitch - p); }
|
||||
void addHeadYaw(float y){_headYaw -= y; }
|
||||
void addHeadRoll(float r){_headRoll += r; }
|
||||
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
//
|
||||
// AudioInjector.h
|
||||
// hifi
|
||||
//
|
||||
// Created by Stephen Birarda on 4/23/13.
|
||||
//
|
||||
//
|
||||
|
||||
#ifndef __hifi__AudioInjector__
|
||||
#define __hifi__AudioInjector__
|
||||
|
||||
#include <iostream>
|
||||
#include <netinet/in.h>
|
||||
|
||||
#include "UDPSocket.h"
|
||||
|
||||
class AudioInjector {
|
||||
public:
|
||||
AudioInjector(const char* filename);
|
||||
AudioInjector(int maxNumSamples);
|
||||
~AudioInjector();
|
||||
|
||||
bool isInjectingAudio() const { return _isInjectingAudio; }
|
||||
|
||||
void setPosition(float* position);
|
||||
void setBearing(float bearing) { _bearing = bearing; }
|
||||
void setAttenuationModifier(unsigned char attenuationModifier) { _attenuationModifier = attenuationModifier; }
|
||||
void setInjectorSocket(UDPSocket* injectorSocket) { _injectorSocket = injectorSocket; }
|
||||
void setDestinationSocket(sockaddr* destinationSocket) { _destinationSocket = *destinationSocket; }
|
||||
|
||||
void addSample(const int16_t sample);
|
||||
void addSamples(int16_t* sampleBuffer, int numSamples);
|
||||
|
||||
void injectAudio();
|
||||
void threadInjectionOfAudio();
|
||||
private:
|
||||
int16_t* _audioSampleArray;
|
||||
int _numTotalSamples;
|
||||
float _position[3];
|
||||
float _bearing;
|
||||
unsigned char _attenuationModifier;
|
||||
int _indexOfNextSlot;
|
||||
UDPSocket* _injectorSocket;
|
||||
sockaddr _destinationSocket;
|
||||
bool _isInjectingAudio;
|
||||
};
|
||||
|
||||
#endif /* defined(__hifi__AudioInjector__) */
|
Loading…
Reference in a new issue