mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-25 22:15:15 +02:00
3537 lines
142 KiB
C++
3537 lines
142 KiB
C++
//
|
|
// Application.cpp
|
|
// interface
|
|
//
|
|
// Created by Andrzej Kapolka on 5/10/13.
|
|
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
|
|
|
#include <sstream>
|
|
|
|
#include <stdlib.h>
|
|
#include <cmath>
|
|
|
|
#ifdef _WIN32
|
|
#include "Syssocket.h"
|
|
#include "Systime.h"
|
|
#else
|
|
#include <sys/time.h>
|
|
#include <arpa/inet.h>
|
|
#include <ifaddrs.h>
|
|
#endif
|
|
|
|
#include <glm/gtx/quaternion.hpp>
|
|
#include <glm/gtx/vector_angle.hpp>
|
|
|
|
// include this before QGLWidget, which includes an earlier version of OpenGL
|
|
#include "InterfaceConfig.h"
|
|
|
|
#include <QActionGroup>
|
|
#include <QColorDialog>
|
|
#include <QDesktopWidget>
|
|
#include <QCheckBox>
|
|
#include <QImage>
|
|
#include <QKeyEvent>
|
|
#include <QMainWindow>
|
|
#include <QMenuBar>
|
|
#include <QMouseEvent>
|
|
#include <QNetworkAccessManager>
|
|
#include <QOpenGLFramebufferObject>
|
|
#include <QWheelEvent>
|
|
#include <QSettings>
|
|
#include <QShortcut>
|
|
#include <QTimer>
|
|
#include <QUrl>
|
|
#include <QtDebug>
|
|
#include <QFileDialog>
|
|
#include <QDesktopServices>
|
|
|
|
#include <NodeTypes.h>
|
|
#include <AudioInjectionManager.h>
|
|
#include <AudioInjector.h>
|
|
#include <Logging.h>
|
|
#include <OctalCode.h>
|
|
#include <PacketHeaders.h>
|
|
#include <PairingHandler.h>
|
|
#include <PerfStat.h>
|
|
|
|
#include <VoxelSceneStats.h>
|
|
|
|
#include "Application.h"
|
|
#include "LogDisplay.h"
|
|
#include "Menu.h"
|
|
#include "Swatch.h"
|
|
#include "Util.h"
|
|
#include "devices/LeapManager.h"
|
|
#include "devices/OculusManager.h"
|
|
#include "renderer/ProgramObject.h"
|
|
#include "ui/TextRenderer.h"
|
|
#include "InfoView.h"
|
|
|
|
using namespace std;
|
|
|
|
// Starfield information
|
|
static char STAR_FILE[] = "http://s3-us-west-1.amazonaws.com/highfidelity/stars.txt";
|
|
static char STAR_CACHE_FILE[] = "cachedStars.txt";
|
|
|
|
static const int BANDWIDTH_METER_CLICK_MAX_DRAG_LENGTH = 6; // farther dragged clicks are ignored
|
|
|
|
const int IDLE_SIMULATE_MSECS = 16; // How often should call simulate and other stuff
|
|
// in the idle loop? (60 FPS is default)
|
|
static QTimer* idleTimer = NULL;
|
|
|
|
const int STARTUP_JITTER_SAMPLES = PACKET_LENGTH_SAMPLES_PER_CHANNEL / 2;
|
|
// Startup optimistically with small jitter buffer that
|
|
// will start playback on the second received audio packet.
|
|
|
|
|
|
void messageHandler(QtMsgType type, const QMessageLogContext& context, const QString &message) {
|
|
fprintf(stdout, "%s", message.toLocal8Bit().constData());
|
|
LogDisplay::instance.addMessage(message.toLocal8Bit().constData());
|
|
}
|
|
|
|
Application::Application(int& argc, char** argv, timeval &startup_time) :
|
|
QApplication(argc, argv),
|
|
_window(new QMainWindow(desktop())),
|
|
_glWidget(new GLCanvas()),
|
|
_displayLevels(false),
|
|
_frameCount(0),
|
|
_fps(120.0f),
|
|
_justStarted(true),
|
|
_voxelImporter(_window),
|
|
_wantToKillLocalVoxels(false),
|
|
_audioScope(256, 200, true),
|
|
_mouseX(0),
|
|
_mouseY(0),
|
|
_touchAvgX(0.0f),
|
|
_touchAvgY(0.0f),
|
|
_isTouchPressed(false),
|
|
_yawFromTouch(0.0f),
|
|
_pitchFromTouch(0.0f),
|
|
_mousePressed(false),
|
|
_isHoverVoxel(false),
|
|
_isHoverVoxelSounding(false),
|
|
_mouseVoxelScale(1.0f / 1024.0f),
|
|
_justEditedVoxel(false),
|
|
_nudgeStarted(false),
|
|
_lookingAlongX(false),
|
|
_lookingAwayFromOrigin(true),
|
|
_isLookingAtOtherAvatar(false),
|
|
_lookatIndicatorScale(1.0f),
|
|
_perfStatsOn(false),
|
|
_chatEntryOn(false),
|
|
_oculusProgram(0),
|
|
_oculusDistortionScale(1.25),
|
|
#ifndef _WIN32
|
|
_audio(&_audioScope, STARTUP_JITTER_SAMPLES),
|
|
#endif
|
|
_stopNetworkReceiveThread(false),
|
|
_voxelProcessor(),
|
|
_voxelEditSender(this),
|
|
_packetCount(0),
|
|
_packetsPerSecond(0),
|
|
_bytesPerSecond(0),
|
|
_bytesCount(0),
|
|
_swatch(NULL),
|
|
_pasteMode(false)
|
|
{
|
|
_applicationStartupTime = startup_time;
|
|
_window->setWindowTitle("Interface");
|
|
|
|
qInstallMessageHandler(messageHandler);
|
|
|
|
unsigned int listenPort = 0; // bind to an ephemeral port by default
|
|
const char** constArgv = const_cast<const char**>(argv);
|
|
const char* portStr = getCmdOption(argc, constArgv, "--listenPort");
|
|
if (portStr) {
|
|
listenPort = atoi(portStr);
|
|
}
|
|
|
|
NodeList::createInstance(NODE_TYPE_AGENT, listenPort);
|
|
|
|
NodeList::getInstance()->addHook(&_voxels);
|
|
NodeList::getInstance()->addHook(this);
|
|
|
|
|
|
// network receive thread and voxel parsing thread are both controlled by the --nonblocking command line
|
|
_enableProcessVoxelsThread = _enableNetworkThread = !cmdOptionExists(argc, constArgv, "--nonblocking");
|
|
if (!_enableNetworkThread) {
|
|
NodeList::getInstance()->getNodeSocket()->setBlocking(false);
|
|
}
|
|
|
|
// setup QSettings
|
|
#ifdef Q_OS_MAC
|
|
QString resourcesPath = QCoreApplication::applicationDirPath() + "/../Resources";
|
|
#else
|
|
QString resourcesPath = QCoreApplication::applicationDirPath() + "/resources";
|
|
#endif
|
|
|
|
// read the ApplicationInfo.ini file for Name/Version/Domain information
|
|
QSettings applicationInfo(resourcesPath + "/info/ApplicationInfo.ini", QSettings::IniFormat);
|
|
|
|
// set the associated application properties
|
|
applicationInfo.beginGroup("INFO");
|
|
|
|
setApplicationName(applicationInfo.value("name").toString());
|
|
setApplicationVersion(applicationInfo.value("version").toString());
|
|
setOrganizationName(applicationInfo.value("organizationName").toString());
|
|
setOrganizationDomain(applicationInfo.value("organizationDomain").toString());
|
|
|
|
_settings = new QSettings(this);
|
|
|
|
// check if there is a saved domain server hostname
|
|
// this must be done now instead of with the other setting checks to allow manual override with
|
|
// --domain or --local options
|
|
NodeList::getInstance()->loadData(_settings);
|
|
|
|
// Check to see if the user passed in a command line option for loading a local
|
|
// Voxel File.
|
|
_voxelsFilename = getCmdOption(argc, constArgv, "-i");
|
|
|
|
// the callback for our instance of NodeList is attachNewHeadToNode
|
|
NodeList::getInstance()->linkedDataCreateCallback = &attachNewHeadToNode;
|
|
|
|
#ifdef _WIN32
|
|
WSADATA WsaData;
|
|
int wsaresult = WSAStartup(MAKEWORD(2,2), &WsaData);
|
|
#endif
|
|
|
|
// tell the NodeList instance who to tell the domain server we care about
|
|
const char nodeTypesOfInterest[] = {NODE_TYPE_AUDIO_MIXER, NODE_TYPE_AVATAR_MIXER, NODE_TYPE_VOXEL_SERVER};
|
|
NodeList::getInstance()->setNodeTypesOfInterest(nodeTypesOfInterest, sizeof(nodeTypesOfInterest));
|
|
|
|
// start the nodeList threads
|
|
NodeList::getInstance()->startSilentNodeRemovalThread();
|
|
|
|
_window->setCentralWidget(_glWidget);
|
|
|
|
// call Menu getInstance static method to set up the menu
|
|
_window->setMenuBar(Menu::getInstance());
|
|
|
|
_networkAccessManager = new QNetworkAccessManager(this);
|
|
|
|
QRect available = desktop()->availableGeometry();
|
|
_window->resize(available.size());
|
|
_window->setVisible(true);
|
|
_glWidget->setFocusPolicy(Qt::StrongFocus);
|
|
_glWidget->setFocus();
|
|
|
|
// enable mouse tracking; otherwise, we only get drag events
|
|
_glWidget->setMouseTracking(true);
|
|
|
|
// initialization continues in initializeGL when OpenGL context is ready
|
|
|
|
// Tell our voxel edit sender about our known jurisdictions
|
|
_voxelEditSender.setVoxelServerJurisdictions(&_voxelServerJurisdictions);
|
|
}
|
|
|
|
Application::~Application() {
|
|
NodeList::getInstance()->removeHook(&_voxels);
|
|
NodeList::getInstance()->removeHook(this);
|
|
|
|
_sharedVoxelSystem.changeTree(new VoxelTree);
|
|
|
|
_audio.shutdown();
|
|
|
|
delete Menu::getInstance();
|
|
|
|
delete _oculusProgram;
|
|
delete _settings;
|
|
delete _networkAccessManager;
|
|
delete _followMode;
|
|
delete _glWidget;
|
|
}
|
|
|
|
void Application::initializeGL() {
|
|
qDebug( "Created Display Window.\n" );
|
|
|
|
// initialize glut for shape drawing; Qt apparently initializes it on OS X
|
|
#ifndef __APPLE__
|
|
int argc = 0;
|
|
glutInit(&argc, 0);
|
|
#endif
|
|
|
|
// Before we render anything, let's set up our viewFrustumOffsetCamera with a sufficiently large
|
|
// field of view and near and far clip to make it interesting.
|
|
//viewFrustumOffsetCamera.setFieldOfView(90.0);
|
|
_viewFrustumOffsetCamera.setNearClip(0.1);
|
|
_viewFrustumOffsetCamera.setFarClip(500.0 * TREE_SCALE);
|
|
|
|
initDisplay();
|
|
qDebug( "Initialized Display.\n" );
|
|
|
|
init();
|
|
qDebug( "Init() complete.\n" );
|
|
|
|
// Check to see if the user passed in a command line option for randomizing colors
|
|
bool wantColorRandomizer = !arguments().contains("--NoColorRandomizer");
|
|
|
|
// Check to see if the user passed in a command line option for loading a local
|
|
// Voxel File. If so, load it now.
|
|
if (!_voxelsFilename.isEmpty()) {
|
|
_voxels.loadVoxelsFile(_voxelsFilename.constData(), wantColorRandomizer);
|
|
qDebug("Local Voxel File loaded.\n");
|
|
}
|
|
|
|
// create thread for receipt of data via UDP
|
|
if (_enableNetworkThread) {
|
|
pthread_create(&_networkReceiveThread, NULL, networkReceive, NULL);
|
|
qDebug("Network receive thread created.\n");
|
|
}
|
|
|
|
// create thread for parsing of voxel data independent of the main network and rendering threads
|
|
_voxelProcessor.initialize(_enableProcessVoxelsThread);
|
|
_voxelEditSender.initialize(_enableProcessVoxelsThread);
|
|
if (_enableProcessVoxelsThread) {
|
|
qDebug("Voxel parsing thread created.\n");
|
|
}
|
|
|
|
// call terminate before exiting
|
|
connect(this, SIGNAL(aboutToQuit()), SLOT(terminate()));
|
|
|
|
// call our timer function every second
|
|
QTimer* timer = new QTimer(this);
|
|
connect(timer, SIGNAL(timeout()), SLOT(timer()));
|
|
timer->start(1000);
|
|
|
|
// call our idle function whenever we can
|
|
idleTimer = new QTimer(this);
|
|
connect(idleTimer, SIGNAL(timeout()), SLOT(idle()));
|
|
idleTimer->start(0);
|
|
_idleLoopStdev.reset();
|
|
|
|
if (_justStarted) {
|
|
float startupTime = (usecTimestampNow() - usecTimestamp(&_applicationStartupTime)) / 1000000.0;
|
|
_justStarted = false;
|
|
char title[50];
|
|
sprintf(title, "Interface: %4.2f seconds\n", startupTime);
|
|
qDebug("%s", title);
|
|
_window->setWindowTitle(title);
|
|
|
|
const char LOGSTASH_INTERFACE_START_TIME_KEY[] = "interface-start-time";
|
|
|
|
// ask the Logstash class to record the startup time
|
|
Logging::stashValue(STAT_TYPE_TIMER, LOGSTASH_INTERFACE_START_TIME_KEY, startupTime);
|
|
}
|
|
|
|
// update before the first render
|
|
update(0.0f);
|
|
|
|
// now that things are drawn - if this is an OS X release build we can check for an update
|
|
#if defined(Q_OS_MAC) && defined(QT_NO_DEBUG)
|
|
Menu::getInstance()->checkForUpdates();
|
|
#endif
|
|
|
|
InfoView::showFirstTime(Menu::getInstance());
|
|
}
|
|
|
|
void Application::paintGL() {
|
|
PerfStat("display");
|
|
|
|
glEnable(GL_LINE_SMOOTH);
|
|
|
|
if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
|
_myCamera.setTightness (100.0f);
|
|
_myCamera.setTargetPosition(_myAvatar.getUprightHeadPosition());
|
|
_myCamera.setTargetRotation(_myAvatar.getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PIf, 0.0f)));
|
|
|
|
} else if (OculusManager::isConnected()) {
|
|
_myCamera.setUpShift (0.0f);
|
|
_myCamera.setDistance (0.0f);
|
|
_myCamera.setTightness (0.0f); // Camera is directly connected to head without smoothing
|
|
_myCamera.setTargetPosition(_myAvatar.getHeadJointPosition());
|
|
_myCamera.setTargetRotation(_myAvatar.getHead().getOrientation());
|
|
|
|
} else if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
|
_myCamera.setTightness(0.0f); // In first person, camera follows head exactly without delay
|
|
_myCamera.setTargetPosition(_myAvatar.getUprightEyeLevelPosition());
|
|
_myCamera.setTargetRotation(_myAvatar.getHead().getCameraOrientation());
|
|
|
|
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
|
_myCamera.setTargetPosition(_myAvatar.getUprightHeadPosition());
|
|
_myCamera.setTargetRotation(_myAvatar.getHead().getCameraOrientation());
|
|
}
|
|
|
|
// Update camera position
|
|
_myCamera.update( 1.f/_fps );
|
|
|
|
|
|
// Note: whichCamera is used to pick between the normal camera myCamera for our
|
|
// main camera, vs, an alternate camera. The alternate camera we support right now
|
|
// is the viewFrustumOffsetCamera. But theoretically, we could use this same mechanism
|
|
// to add other cameras.
|
|
//
|
|
// Why have two cameras? Well, one reason is that because in the case of the renderViewFrustum()
|
|
// code, we want to keep the state of "myCamera" intact, so we can render what the view frustum of
|
|
// myCamera is. But we also want to do meaningful camera transforms on OpenGL for the offset camera
|
|
Camera whichCamera = _myCamera;
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::DisplayFrustum)) {
|
|
|
|
ViewFrustumOffset viewFrustumOffset = Menu::getInstance()->getViewFrustumOffset();
|
|
|
|
// set the camera to third-person view but offset so we can see the frustum
|
|
_viewFrustumOffsetCamera.setTargetPosition(_myCamera.getTargetPosition());
|
|
_viewFrustumOffsetCamera.setTargetRotation(_myCamera.getTargetRotation() * glm::quat(glm::radians(glm::vec3(
|
|
viewFrustumOffset.pitch, viewFrustumOffset.yaw, viewFrustumOffset.roll))));
|
|
_viewFrustumOffsetCamera.setUpShift(viewFrustumOffset.up);
|
|
_viewFrustumOffsetCamera.setDistance(viewFrustumOffset.distance);
|
|
_viewFrustumOffsetCamera.initialize(); // force immediate snap to ideal position and orientation
|
|
_viewFrustumOffsetCamera.update(1.f/_fps);
|
|
whichCamera = _viewFrustumOffsetCamera;
|
|
}
|
|
|
|
if (OculusManager::isConnected()) {
|
|
displayOculus(whichCamera);
|
|
|
|
} else {
|
|
_glowEffect.prepare();
|
|
|
|
glMatrixMode(GL_MODELVIEW);
|
|
glPushMatrix();
|
|
glLoadIdentity();
|
|
displaySide(whichCamera);
|
|
glPopMatrix();
|
|
|
|
_glowEffect.render();
|
|
|
|
displayOverlay();
|
|
}
|
|
|
|
_frameCount++;
|
|
}
|
|
|
|
void Application::resetCamerasOnResizeGL(Camera& camera, int width, int height) {
|
|
float aspectRatio = ((float)width/(float)height); // based on screen resize
|
|
|
|
if (OculusManager::isConnected()) {
|
|
// more magic numbers; see Oculus SDK docs, p. 32
|
|
camera.setAspectRatio(aspectRatio *= 0.5);
|
|
camera.setFieldOfView(2 * atan((0.0468 * _oculusDistortionScale) / 0.041) * (180 / PIf));
|
|
} else {
|
|
camera.setAspectRatio(aspectRatio);
|
|
camera.setFieldOfView(Menu::getInstance()->getFieldOfView());
|
|
}
|
|
}
|
|
|
|
void Application::resizeGL(int width, int height) {
|
|
resetCamerasOnResizeGL(_viewFrustumOffsetCamera, width, height);
|
|
resetCamerasOnResizeGL(_myCamera, width, height);
|
|
|
|
glViewport(0, 0, width, height); // shouldn't this account for the menu???
|
|
|
|
updateProjectionMatrix();
|
|
glLoadIdentity();
|
|
}
|
|
|
|
void Application::updateProjectionMatrix() {
|
|
glMatrixMode(GL_PROJECTION);
|
|
glLoadIdentity();
|
|
|
|
// Tell our viewFrustum about this change, using the application camera
|
|
loadViewFrustum(_myCamera, _viewFrustum);
|
|
|
|
float left, right, bottom, top, nearVal, farVal;
|
|
glm::vec4 nearClipPlane, farClipPlane;
|
|
computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);
|
|
|
|
// If we're in Display Frustum mode, then we want to use the slightly adjust near/far clip values of the
|
|
// _viewFrustumOffsetCamera, so that we can see more of the application content in the application's frustum
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::DisplayFrustum)) {
|
|
nearVal = _viewFrustumOffsetCamera.getNearClip();
|
|
farVal = _viewFrustumOffsetCamera.getFarClip();
|
|
}
|
|
glFrustum(left, right, bottom, top, nearVal, farVal);
|
|
|
|
glMatrixMode(GL_MODELVIEW);
|
|
}
|
|
|
|
void Application::controlledBroadcastToNodes(unsigned char* broadcastData, size_t dataBytes,
|
|
const char* nodeTypes, int numNodeTypes) {
|
|
Application* self = getInstance();
|
|
for (int i = 0; i < numNodeTypes; ++i) {
|
|
|
|
// Intercept data to voxel server when voxels are disabled
|
|
if (nodeTypes[i] == NODE_TYPE_VOXEL_SERVER && !Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) {
|
|
continue;
|
|
}
|
|
|
|
// Perform the broadcast for one type
|
|
int nReceivingNodes = NodeList::getInstance()->broadcastToNodes(broadcastData, dataBytes, & nodeTypes[i], 1);
|
|
|
|
// Feed number of bytes to corresponding channel of the bandwidth meter, if any (done otherwise)
|
|
BandwidthMeter::ChannelIndex channel;
|
|
switch (nodeTypes[i]) {
|
|
case NODE_TYPE_AGENT:
|
|
case NODE_TYPE_AVATAR_MIXER:
|
|
channel = BandwidthMeter::AVATARS;
|
|
break;
|
|
case NODE_TYPE_VOXEL_SERVER:
|
|
channel = BandwidthMeter::VOXELS;
|
|
break;
|
|
default:
|
|
continue;
|
|
}
|
|
self->_bandwidthMeter.outputStream(channel).updateValue(nReceivingNodes * dataBytes);
|
|
}
|
|
}
|
|
|
|
void Application::keyPressEvent(QKeyEvent* event) {
|
|
if (activeWindow() == _window) {
|
|
if (_chatEntryOn) {
|
|
if (_chatEntry.keyPressEvent(event)) {
|
|
_myAvatar.setKeyState(event->key() == Qt::Key_Backspace || event->key() == Qt::Key_Delete ?
|
|
DELETE_KEY_DOWN : INSERT_KEY_DOWN);
|
|
_myAvatar.setChatMessage(string(_chatEntry.getContents().size(), SOLID_BLOCK_CHAR));
|
|
|
|
} else {
|
|
_myAvatar.setChatMessage(_chatEntry.getContents());
|
|
_chatEntry.clear();
|
|
_chatEntryOn = false;
|
|
setMenuShortcutsEnabled(true);
|
|
}
|
|
return;
|
|
}
|
|
|
|
//this is for switching between modes for the leap rave glove test
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::SimulateLeapHand)
|
|
|| Menu::getInstance()->isOptionChecked(MenuOption::TestRaveGlove)) {
|
|
_myAvatar.getHand().setRaveGloveEffectsMode((QKeyEvent*)event);
|
|
}
|
|
|
|
bool isShifted = event->modifiers().testFlag(Qt::ShiftModifier);
|
|
switch (event->key()) {
|
|
case Qt::Key_Shift:
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelSelectMode)) {
|
|
_pasteMode = true;
|
|
}
|
|
break;
|
|
case Qt::Key_BracketLeft:
|
|
case Qt::Key_BracketRight:
|
|
case Qt::Key_BraceLeft:
|
|
case Qt::Key_BraceRight:
|
|
case Qt::Key_ParenLeft:
|
|
case Qt::Key_ParenRight:
|
|
case Qt::Key_Less:
|
|
case Qt::Key_Greater:
|
|
case Qt::Key_Comma:
|
|
case Qt::Key_Period:
|
|
Menu::getInstance()->handleViewFrustumOffsetKeyModifier(event->key());
|
|
break;
|
|
case Qt::Key_Semicolon:
|
|
_audio.ping();
|
|
break;
|
|
case Qt::Key_Apostrophe:
|
|
_audioScope.inputPaused = !_audioScope.inputPaused;
|
|
break;
|
|
case Qt::Key_L:
|
|
_displayLevels = !_displayLevels;
|
|
break;
|
|
|
|
case Qt::Key_E:
|
|
if (_nudgeStarted) {
|
|
_nudgeGuidePosition.y += _mouseVoxel.s;
|
|
} else {
|
|
if (!_myAvatar.getDriveKeys(UP)) {
|
|
_myAvatar.jump();
|
|
}
|
|
_myAvatar.setDriveKeys(UP, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_C:
|
|
if (isShifted) {
|
|
Menu::getInstance()->triggerOption(MenuOption::OcclusionCulling);
|
|
} else if (_nudgeStarted) {
|
|
_nudgeGuidePosition.y -= _mouseVoxel.s;
|
|
} else {
|
|
_myAvatar.setDriveKeys(DOWN, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_W:
|
|
if (_nudgeStarted) {
|
|
if (_lookingAlongX) {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.x += _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.x -= _mouseVoxel.s;
|
|
}
|
|
} else {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.z += _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.z -= _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else {
|
|
_myAvatar.setDriveKeys(FWD, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_S:
|
|
if (isShifted) {
|
|
_voxels.collectStatsForTreesAndVBOs();
|
|
} else if (_nudgeStarted) {
|
|
if (_lookingAlongX) {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.x -= _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.x += _mouseVoxel.s;
|
|
}
|
|
} else {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.z -= _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.z += _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else {
|
|
_myAvatar.setDriveKeys(BACK, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_Space:
|
|
resetSensors();
|
|
_audio.reset();
|
|
break;
|
|
|
|
case Qt::Key_G:
|
|
if (isShifted) {
|
|
Menu::getInstance()->triggerOption(MenuOption::Gravity);
|
|
} else {
|
|
Menu::getInstance()->triggerOption(MenuOption::VoxelGetColorMode);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_A:
|
|
if (isShifted) {
|
|
Menu::getInstance()->triggerOption(MenuOption::Atmosphere);
|
|
} else if (_nudgeStarted) {
|
|
if (_lookingAlongX) {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.z -= _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.z += _mouseVoxel.s;
|
|
}
|
|
} else {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.x += _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.x -= _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else {
|
|
_myAvatar.setDriveKeys(ROT_LEFT, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_D:
|
|
if (_nudgeStarted) {
|
|
if (_lookingAlongX) {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.z += _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.z -= _mouseVoxel.s;
|
|
}
|
|
} else {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.x -= _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.x += _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else {
|
|
_myAvatar.setDriveKeys(ROT_RIGHT, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_Return:
|
|
case Qt::Key_Enter:
|
|
if (_nudgeStarted) {
|
|
nudgeVoxels();
|
|
} else {
|
|
_chatEntryOn = true;
|
|
_myAvatar.setKeyState(NO_KEY_DOWN);
|
|
_myAvatar.setChatMessage(string());
|
|
setMenuShortcutsEnabled(false);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_Up:
|
|
if (_nudgeStarted && !isShifted) {
|
|
if (_lookingAlongX) {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.x += _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.x -= _mouseVoxel.s;
|
|
}
|
|
} else {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.z += _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.z -= _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else if (_nudgeStarted && isShifted) {
|
|
_nudgeGuidePosition.y += _mouseVoxel.s;
|
|
} else {
|
|
_myAvatar.setDriveKeys(isShifted ? UP : FWD, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_Down:
|
|
if (_nudgeStarted && !isShifted) {
|
|
if (_lookingAlongX) {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.x -= _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.x += _mouseVoxel.s;
|
|
}
|
|
} else {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.z -= _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.z += _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else if (_nudgeStarted && isShifted) {
|
|
_nudgeGuidePosition.y -= _mouseVoxel.s;
|
|
} else {
|
|
_myAvatar.setDriveKeys(isShifted ? DOWN : BACK, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_Left:
|
|
if (_nudgeStarted) {
|
|
if (_lookingAlongX) {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.z -= _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.z += _mouseVoxel.s;
|
|
}
|
|
} else {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.x += _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.x -= _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else {
|
|
_myAvatar.setDriveKeys(isShifted ? LEFT : ROT_LEFT, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_Right:
|
|
if (_nudgeStarted) {
|
|
if (_lookingAlongX) {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.z += _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.z -= _mouseVoxel.s;
|
|
}
|
|
} else {
|
|
if (_lookingAwayFromOrigin) {
|
|
_nudgeGuidePosition.x -= _mouseVoxel.s;
|
|
} else {
|
|
_nudgeGuidePosition.x += _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else {
|
|
_myAvatar.setDriveKeys(isShifted ? RIGHT : ROT_RIGHT, 1);
|
|
}
|
|
break;
|
|
|
|
case Qt::Key_I:
|
|
if (isShifted) {
|
|
_myCamera.setEyeOffsetOrientation(glm::normalize(
|
|
glm::quat(glm::vec3(0.002f, 0, 0)) * _myCamera.getEyeOffsetOrientation()));
|
|
} else {
|
|
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0, 0.001, 0));
|
|
}
|
|
updateProjectionMatrix();
|
|
break;
|
|
|
|
case Qt::Key_K:
|
|
if (isShifted) {
|
|
_myCamera.setEyeOffsetOrientation(glm::normalize(
|
|
glm::quat(glm::vec3(-0.002f, 0, 0)) * _myCamera.getEyeOffsetOrientation()));
|
|
} else {
|
|
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0, -0.001, 0));
|
|
}
|
|
updateProjectionMatrix();
|
|
break;
|
|
|
|
case Qt::Key_J:
|
|
if (isShifted) {
|
|
_viewFrustum.setFocalLength(_viewFrustum.getFocalLength() - 0.1f);
|
|
|
|
} else {
|
|
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(-0.001, 0, 0));
|
|
}
|
|
updateProjectionMatrix();
|
|
break;
|
|
|
|
case Qt::Key_M:
|
|
if (isShifted) {
|
|
_viewFrustum.setFocalLength(_viewFrustum.getFocalLength() + 0.1f);
|
|
|
|
} else {
|
|
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0.001, 0, 0));
|
|
}
|
|
updateProjectionMatrix();
|
|
break;
|
|
|
|
case Qt::Key_U:
|
|
if (isShifted) {
|
|
_myCamera.setEyeOffsetOrientation(glm::normalize(
|
|
glm::quat(glm::vec3(0, 0, -0.002f)) * _myCamera.getEyeOffsetOrientation()));
|
|
} else {
|
|
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0, 0, -0.001));
|
|
}
|
|
updateProjectionMatrix();
|
|
break;
|
|
|
|
case Qt::Key_Y:
|
|
if (isShifted) {
|
|
_myCamera.setEyeOffsetOrientation(glm::normalize(
|
|
glm::quat(glm::vec3(0, 0, 0.002f)) * _myCamera.getEyeOffsetOrientation()));
|
|
} else {
|
|
_myCamera.setEyeOffsetPosition(_myCamera.getEyeOffsetPosition() + glm::vec3(0, 0, 0.001));
|
|
}
|
|
updateProjectionMatrix();
|
|
break;
|
|
case Qt::Key_H:
|
|
Menu::getInstance()->triggerOption(MenuOption::Mirror);
|
|
break;
|
|
case Qt::Key_F:
|
|
if (isShifted) {
|
|
Menu::getInstance()->triggerOption(MenuOption::DisplayFrustum);
|
|
} else {
|
|
Menu::getInstance()->triggerOption(MenuOption::Fullscreen);
|
|
}
|
|
break;
|
|
case Qt::Key_V:
|
|
if (isShifted) {
|
|
Menu::getInstance()->triggerOption(MenuOption::Voxels);
|
|
} else {
|
|
Menu::getInstance()->triggerOption(MenuOption::VoxelAddMode);
|
|
_nudgeStarted = false;
|
|
}
|
|
break;
|
|
case Qt::Key_P:
|
|
Menu::getInstance()->triggerOption(MenuOption::FirstPerson);
|
|
break;
|
|
case Qt::Key_R:
|
|
if (isShifted) {
|
|
Menu::getInstance()->triggerOption(MenuOption::FrustumRenderMode);
|
|
} else {
|
|
Menu::getInstance()->triggerOption(MenuOption::VoxelDeleteMode);
|
|
_nudgeStarted = false;
|
|
}
|
|
break;
|
|
case Qt::Key_B:
|
|
Menu::getInstance()->triggerOption(MenuOption::VoxelColorMode);
|
|
_nudgeStarted = false;
|
|
break;
|
|
case Qt::Key_O:
|
|
Menu::getInstance()->triggerOption(MenuOption::VoxelSelectMode);
|
|
_nudgeStarted = false;
|
|
break;
|
|
case Qt::Key_Slash:
|
|
Menu::getInstance()->triggerOption(MenuOption::Stats);
|
|
break;
|
|
case Qt::Key_Backspace:
|
|
case Qt::Key_Delete:
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelDeleteMode) ||
|
|
Menu::getInstance()->isOptionChecked(MenuOption::VoxelSelectMode)) {
|
|
deleteVoxelUnderCursor();
|
|
}
|
|
break;
|
|
case Qt::Key_Plus:
|
|
_myAvatar.increaseSize();
|
|
break;
|
|
case Qt::Key_Minus:
|
|
_myAvatar.decreaseSize();
|
|
break;
|
|
|
|
case Qt::Key_1:
|
|
case Qt::Key_2:
|
|
case Qt::Key_3:
|
|
case Qt::Key_4:
|
|
case Qt::Key_5:
|
|
case Qt::Key_6:
|
|
case Qt::Key_7:
|
|
case Qt::Key_8:
|
|
_swatch.handleEvent(event->key(), Menu::getInstance()->isOptionChecked(MenuOption::VoxelGetColorMode));
|
|
break;
|
|
default:
|
|
event->ignore();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::keyReleaseEvent(QKeyEvent* event) {
|
|
if (activeWindow() == _window) {
|
|
if (_chatEntryOn) {
|
|
_myAvatar.setKeyState(NO_KEY_DOWN);
|
|
return;
|
|
}
|
|
|
|
switch (event->key()) {
|
|
case Qt::Key_Shift:
|
|
_pasteMode = false;
|
|
break;
|
|
case Qt::Key_E:
|
|
_myAvatar.setDriveKeys(UP, 0);
|
|
break;
|
|
|
|
case Qt::Key_C:
|
|
_myAvatar.setDriveKeys(DOWN, 0);
|
|
break;
|
|
|
|
case Qt::Key_W:
|
|
_myAvatar.setDriveKeys(FWD, 0);
|
|
break;
|
|
|
|
case Qt::Key_S:
|
|
_myAvatar.setDriveKeys(BACK, 0);
|
|
break;
|
|
|
|
case Qt::Key_A:
|
|
_myAvatar.setDriveKeys(ROT_LEFT, 0);
|
|
break;
|
|
|
|
case Qt::Key_D:
|
|
_myAvatar.setDriveKeys(ROT_RIGHT, 0);
|
|
break;
|
|
|
|
case Qt::Key_Up:
|
|
_myAvatar.setDriveKeys(FWD, 0);
|
|
_myAvatar.setDriveKeys(UP, 0);
|
|
break;
|
|
|
|
case Qt::Key_Down:
|
|
_myAvatar.setDriveKeys(BACK, 0);
|
|
_myAvatar.setDriveKeys(DOWN, 0);
|
|
break;
|
|
|
|
case Qt::Key_Left:
|
|
_myAvatar.setDriveKeys(LEFT, 0);
|
|
_myAvatar.setDriveKeys(ROT_LEFT, 0);
|
|
break;
|
|
|
|
case Qt::Key_Right:
|
|
_myAvatar.setDriveKeys(RIGHT, 0);
|
|
_myAvatar.setDriveKeys(ROT_RIGHT, 0);
|
|
break;
|
|
|
|
default:
|
|
event->ignore();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::mouseMoveEvent(QMouseEvent* event) {
|
|
if (activeWindow() == _window) {
|
|
_mouseX = event->x();
|
|
_mouseY = event->y();
|
|
|
|
// detect drag
|
|
glm::vec3 mouseVoxelPos(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z);
|
|
if (!_justEditedVoxel && mouseVoxelPos != _lastMouseVoxelPos) {
|
|
if (event->buttons().testFlag(Qt::LeftButton)) {
|
|
maybeEditVoxelUnderCursor();
|
|
|
|
} else if (event->buttons().testFlag(Qt::RightButton) && Menu::getInstance()->isVoxelModeActionChecked()) {
|
|
deleteVoxelUnderCursor();
|
|
}
|
|
}
|
|
|
|
_pieMenu.mouseMoveEvent(_mouseX, _mouseY);
|
|
}
|
|
}
|
|
|
|
const bool MAKE_SOUND_ON_VOXEL_HOVER = false;
|
|
const bool MAKE_SOUND_ON_VOXEL_CLICK = true;
|
|
const float HOVER_VOXEL_FREQUENCY = 7040.f;
|
|
const float HOVER_VOXEL_DECAY = 0.999f;
|
|
|
|
void Application::mousePressEvent(QMouseEvent* event) {
|
|
if (activeWindow() == _window) {
|
|
if (event->button() == Qt::LeftButton) {
|
|
_mouseX = event->x();
|
|
_mouseY = event->y();
|
|
_mouseDragStartedX = _mouseX;
|
|
_mouseDragStartedY = _mouseY;
|
|
_mouseVoxelDragging = _mouseVoxel;
|
|
_mousePressed = true;
|
|
|
|
maybeEditVoxelUnderCursor();
|
|
|
|
if (!_palette.isActive() && (!_isHoverVoxel || _isLookingAtOtherAvatar)) {
|
|
_pieMenu.mousePressEvent(_mouseX, _mouseY);
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelSelectMode) && _pasteMode) {
|
|
pasteVoxels();
|
|
}
|
|
|
|
if (MAKE_SOUND_ON_VOXEL_CLICK && _isHoverVoxel && !_isHoverVoxelSounding) {
|
|
_hoverVoxelOriginalColor[0] = _hoverVoxel.red;
|
|
_hoverVoxelOriginalColor[1] = _hoverVoxel.green;
|
|
_hoverVoxelOriginalColor[2] = _hoverVoxel.blue;
|
|
_hoverVoxelOriginalColor[3] = 1;
|
|
const float RED_CLICK_FREQUENCY = 1000.f;
|
|
const float GREEN_CLICK_FREQUENCY = 1250.f;
|
|
const float BLUE_CLICK_FREQUENCY = 1330.f;
|
|
const float MIDDLE_A_FREQUENCY = 440.f;
|
|
float frequency = MIDDLE_A_FREQUENCY +
|
|
(_hoverVoxel.red / 255.f * RED_CLICK_FREQUENCY +
|
|
_hoverVoxel.green / 255.f * GREEN_CLICK_FREQUENCY +
|
|
_hoverVoxel.blue / 255.f * BLUE_CLICK_FREQUENCY) / 3.f;
|
|
|
|
_audio.startCollisionSound(1.0, frequency, 0.0, HOVER_VOXEL_DECAY);
|
|
_isHoverVoxelSounding = true;
|
|
}
|
|
|
|
} else if (event->button() == Qt::RightButton && Menu::getInstance()->isVoxelModeActionChecked()) {
|
|
deleteVoxelUnderCursor();
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::mouseReleaseEvent(QMouseEvent* event) {
|
|
if (activeWindow() == _window) {
|
|
if (event->button() == Qt::LeftButton) {
|
|
_mouseX = event->x();
|
|
_mouseY = event->y();
|
|
_mousePressed = false;
|
|
checkBandwidthMeterClick();
|
|
|
|
_pieMenu.mouseReleaseEvent(_mouseX, _mouseY);
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::touchUpdateEvent(QTouchEvent* event) {
|
|
bool validTouch = false;
|
|
if (activeWindow() == _window) {
|
|
const QList<QTouchEvent::TouchPoint>& tPoints = event->touchPoints();
|
|
_touchAvgX = 0.0f;
|
|
_touchAvgY = 0.0f;
|
|
int numTouches = tPoints.count();
|
|
if (numTouches > 1) {
|
|
for (int i = 0; i < numTouches; ++i) {
|
|
_touchAvgX += tPoints[i].pos().x();
|
|
_touchAvgY += tPoints[i].pos().y();
|
|
}
|
|
_touchAvgX /= (float)(numTouches);
|
|
_touchAvgY /= (float)(numTouches);
|
|
validTouch = true;
|
|
}
|
|
}
|
|
if (!_isTouchPressed) {
|
|
_touchDragStartedAvgX = _touchAvgX;
|
|
_touchDragStartedAvgY = _touchAvgY;
|
|
}
|
|
_isTouchPressed = validTouch;
|
|
}
|
|
|
|
void Application::touchBeginEvent(QTouchEvent* event) {
|
|
touchUpdateEvent(event);
|
|
_lastTouchAvgX = _touchAvgX;
|
|
_lastTouchAvgY = _touchAvgY;
|
|
}
|
|
|
|
void Application::touchEndEvent(QTouchEvent* event) {
|
|
_touchDragStartedAvgX = _touchAvgX;
|
|
_touchDragStartedAvgY = _touchAvgY;
|
|
_isTouchPressed = false;
|
|
}
|
|
|
|
const bool USE_MOUSEWHEEL = false;
|
|
void Application::wheelEvent(QWheelEvent* event) {
|
|
// Wheel Events disabled for now because they are also activated by touch look pitch up/down.
|
|
if (USE_MOUSEWHEEL && (activeWindow() == _window)) {
|
|
if (!Menu::getInstance()->isVoxelModeActionChecked()) {
|
|
event->ignore();
|
|
return;
|
|
}
|
|
if (event->delta() > 0) {
|
|
increaseVoxelSize();
|
|
} else {
|
|
decreaseVoxelSize();
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::sendPingPackets() {
|
|
|
|
const char nodesToPing[] = {NODE_TYPE_VOXEL_SERVER, NODE_TYPE_AUDIO_MIXER, NODE_TYPE_AVATAR_MIXER};
|
|
|
|
uint64_t currentTime = usecTimestampNow();
|
|
unsigned char pingPacket[numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_PING) + sizeof(currentTime)];
|
|
int numHeaderBytes = populateTypeAndVersion(pingPacket, PACKET_TYPE_PING);
|
|
|
|
memcpy(pingPacket + numHeaderBytes, ¤tTime, sizeof(currentTime));
|
|
getInstance()->controlledBroadcastToNodes(pingPacket, sizeof(pingPacket),
|
|
nodesToPing, sizeof(nodesToPing));
|
|
}
|
|
|
|
void Application::sendAvatarFaceVideoMessage(int frameCount, const QByteArray& data) {
|
|
unsigned char packet[MAX_PACKET_SIZE];
|
|
unsigned char* packetPosition = packet;
|
|
|
|
packetPosition += populateTypeAndVersion(packetPosition, PACKET_TYPE_AVATAR_FACE_VIDEO);
|
|
|
|
*(uint16_t*)packetPosition = NodeList::getInstance()->getOwnerID();
|
|
packetPosition += sizeof(uint16_t);
|
|
|
|
*(uint32_t*)packetPosition = frameCount;
|
|
packetPosition += sizeof(uint32_t);
|
|
|
|
*(uint32_t*)packetPosition = data.size();
|
|
packetPosition += sizeof(uint32_t);
|
|
|
|
uint32_t* offsetPosition = (uint32_t*)packetPosition;
|
|
packetPosition += sizeof(uint32_t);
|
|
|
|
int headerSize = packetPosition - packet;
|
|
|
|
// break the data up into submessages of the maximum size (at least one, for zero-length packets)
|
|
*offsetPosition = 0;
|
|
do {
|
|
int payloadSize = min(data.size() - (int)*offsetPosition, MAX_PACKET_SIZE - headerSize);
|
|
memcpy(packetPosition, data.constData() + *offsetPosition, payloadSize);
|
|
getInstance()->controlledBroadcastToNodes(packet, headerSize + payloadSize, &NODE_TYPE_AVATAR_MIXER, 1);
|
|
*offsetPosition += payloadSize;
|
|
|
|
} while (*offsetPosition < data.size());
|
|
}
|
|
|
|
// Every second, check the frame rates and other stuff
|
|
void Application::timer() {
|
|
gettimeofday(&_timerEnd, NULL);
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::TestPing)) {
|
|
sendPingPackets();
|
|
}
|
|
|
|
_fps = (float)_frameCount / ((float)diffclock(&_timerStart, &_timerEnd) / 1000.f);
|
|
_packetsPerSecond = (float)_packetCount / ((float)diffclock(&_timerStart, &_timerEnd) / 1000.f);
|
|
_bytesPerSecond = (float)_bytesCount / ((float)diffclock(&_timerStart, &_timerEnd) / 1000.f);
|
|
_frameCount = 0;
|
|
_packetCount = 0;
|
|
_bytesCount = 0;
|
|
|
|
gettimeofday(&_timerStart, NULL);
|
|
|
|
// if we haven't detected gyros, check for them now
|
|
if (!_serialHeadSensor.isActive()) {
|
|
_serialHeadSensor.pair();
|
|
}
|
|
|
|
// ask the node list to check in with the domain server
|
|
NodeList::getInstance()->sendDomainServerCheckIn();
|
|
}
|
|
|
|
static glm::vec3 getFaceVector(BoxFace face) {
|
|
switch (face) {
|
|
case MIN_X_FACE:
|
|
return glm::vec3(-1, 0, 0);
|
|
|
|
case MAX_X_FACE:
|
|
return glm::vec3(1, 0, 0);
|
|
|
|
case MIN_Y_FACE:
|
|
return glm::vec3(0, -1, 0);
|
|
|
|
case MAX_Y_FACE:
|
|
return glm::vec3(0, 1, 0);
|
|
|
|
case MIN_Z_FACE:
|
|
return glm::vec3(0, 0, -1);
|
|
|
|
case MAX_Z_FACE:
|
|
return glm::vec3(0, 0, 1);
|
|
}
|
|
}
|
|
|
|
void Application::idle() {
|
|
|
|
timeval check;
|
|
gettimeofday(&check, NULL);
|
|
|
|
// Only run simulation code if more than IDLE_SIMULATE_MSECS have passed since last time we ran
|
|
|
|
double timeSinceLastUpdate = diffclock(&_lastTimeUpdated, &check);
|
|
if (timeSinceLastUpdate > IDLE_SIMULATE_MSECS) {
|
|
const float BIGGEST_DELTA_TIME_SECS = 0.25f;
|
|
update(glm::clamp((float)timeSinceLastUpdate / 1000.f, 0.f, BIGGEST_DELTA_TIME_SECS));
|
|
_glWidget->updateGL();
|
|
_lastTimeUpdated = check;
|
|
_idleLoopStdev.addValue(timeSinceLastUpdate);
|
|
|
|
// Record standard deviation and reset counter if needed
|
|
const int STDEV_SAMPLES = 500;
|
|
if (_idleLoopStdev.getSamples() > STDEV_SAMPLES) {
|
|
_idleLoopMeasuredJitter = _idleLoopStdev.getStDev();
|
|
_idleLoopStdev.reset();
|
|
}
|
|
|
|
// After finishing all of the above work, restart the idle timer, allowing 2ms to process events.
|
|
idleTimer->start(2);
|
|
}
|
|
}
|
|
void Application::terminate() {
|
|
// Close serial port
|
|
// close(serial_fd);
|
|
|
|
LeapManager::terminate();
|
|
|
|
Menu::getInstance()->saveSettings();
|
|
_settings->sync();
|
|
|
|
if (_enableNetworkThread) {
|
|
_stopNetworkReceiveThread = true;
|
|
pthread_join(_networkReceiveThread, NULL);
|
|
}
|
|
|
|
_voxelProcessor.terminate();
|
|
_voxelEditSender.terminate();
|
|
}
|
|
|
|
static Avatar* processAvatarMessageHeader(unsigned char*& packetData, size_t& dataBytes) {
|
|
// record the packet for stats-tracking
|
|
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AVATARS).updateValue(dataBytes);
|
|
Node* avatarMixerNode = NodeList::getInstance()->soloNodeOfType(NODE_TYPE_AVATAR_MIXER);
|
|
if (avatarMixerNode) {
|
|
avatarMixerNode->recordBytesReceived(dataBytes);
|
|
}
|
|
|
|
// skip the header
|
|
int numBytesPacketHeader = numBytesForPacketHeader(packetData);
|
|
packetData += numBytesPacketHeader;
|
|
dataBytes -= numBytesPacketHeader;
|
|
|
|
// read the node id
|
|
uint16_t nodeID = *(uint16_t*)packetData;
|
|
packetData += sizeof(nodeID);
|
|
dataBytes -= sizeof(nodeID);
|
|
|
|
// make sure the node exists
|
|
Node* node = NodeList::getInstance()->nodeWithID(nodeID);
|
|
if (!node || !node->getLinkedData()) {
|
|
return NULL;
|
|
}
|
|
Avatar* avatar = static_cast<Avatar*>(node->getLinkedData());
|
|
return avatar->isInitialized() ? avatar : NULL;
|
|
}
|
|
|
|
void Application::processAvatarVoxelURLMessage(unsigned char* packetData, size_t dataBytes) {
|
|
Avatar* avatar = processAvatarMessageHeader(packetData, dataBytes);
|
|
if (!avatar) {
|
|
return;
|
|
}
|
|
QUrl url = QUrl::fromEncoded(QByteArray((char*)packetData, dataBytes));
|
|
|
|
// invoke the set URL function on the simulate/render thread
|
|
QMetaObject::invokeMethod(avatar->getVoxels(), "setVoxelURL", Q_ARG(QUrl, url));
|
|
}
|
|
|
|
void Application::processAvatarFaceVideoMessage(unsigned char* packetData, size_t dataBytes) {
|
|
Avatar* avatar = processAvatarMessageHeader(packetData, dataBytes);
|
|
if (!avatar) {
|
|
return;
|
|
}
|
|
avatar->getHead().getFace().processVideoMessage(packetData, dataBytes);
|
|
}
|
|
|
|
void Application::checkBandwidthMeterClick() {
|
|
// ... to be called upon button release
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Bandwidth) &&
|
|
glm::compMax(glm::abs(glm::ivec2(_mouseX - _mouseDragStartedX, _mouseY - _mouseDragStartedY))) <= BANDWIDTH_METER_CLICK_MAX_DRAG_LENGTH &&
|
|
_bandwidthMeter.isWithinArea(_mouseX, _mouseY, _glWidget->width(), _glWidget->height())) {
|
|
|
|
// The bandwidth meter is visible, the click didn't get dragged too far and
|
|
// we actually hit the bandwidth meter
|
|
Menu::getInstance()->bandwidthDetails();
|
|
}
|
|
}
|
|
|
|
void Application::setFullscreen(bool fullscreen) {
|
|
_window->setWindowState(fullscreen ? (_window->windowState() | Qt::WindowFullScreen) :
|
|
(_window->windowState() & ~Qt::WindowFullScreen));
|
|
updateCursor();
|
|
}
|
|
|
|
void Application::setRenderVoxels(bool voxelRender) {
|
|
_voxelEditSender.setShouldSend(voxelRender);
|
|
if (!voxelRender) {
|
|
doKillLocalVoxels();
|
|
}
|
|
}
|
|
|
|
void Application::doKillLocalVoxels() {
|
|
_wantToKillLocalVoxels = true;
|
|
}
|
|
|
|
const glm::vec3 Application::getMouseVoxelWorldCoordinates(const VoxelDetail _mouseVoxel) {
|
|
return glm::vec3((_mouseVoxel.x + _mouseVoxel.s / 2.f) * TREE_SCALE,
|
|
(_mouseVoxel.y + _mouseVoxel.s / 2.f) * TREE_SCALE,
|
|
(_mouseVoxel.z + _mouseVoxel.s / 2.f) * TREE_SCALE);
|
|
}
|
|
|
|
const float NUDGE_PRECISION_MIN = 1 / pow(2.0, 12.0);
|
|
|
|
void Application::decreaseVoxelSize() {
|
|
if (_nudgeStarted) {
|
|
if (_mouseVoxelScale >= NUDGE_PRECISION_MIN) {
|
|
_mouseVoxelScale /= 2;
|
|
}
|
|
} else {
|
|
_mouseVoxelScale /= 2;
|
|
}
|
|
}
|
|
|
|
void Application::increaseVoxelSize() {
|
|
if (_nudgeStarted) {
|
|
if (_mouseVoxelScale < _nudgeVoxel.s) {
|
|
_mouseVoxelScale *= 2;
|
|
}
|
|
} else {
|
|
_mouseVoxelScale *= 2;
|
|
}
|
|
}
|
|
|
|
const int MAXIMUM_EDIT_VOXEL_MESSAGE_SIZE = 1500;
|
|
struct SendVoxelsOperationArgs {
|
|
unsigned char* newBaseOctCode;
|
|
};
|
|
|
|
bool Application::sendVoxelsOperation(VoxelNode* node, void* extraData) {
|
|
SendVoxelsOperationArgs* args = (SendVoxelsOperationArgs*)extraData;
|
|
if (node->isColored()) {
|
|
unsigned char* nodeOctalCode = node->getOctalCode();
|
|
|
|
unsigned char* codeColorBuffer = NULL;
|
|
int codeLength = 0;
|
|
int bytesInCode = 0;
|
|
int codeAndColorLength;
|
|
|
|
// If the newBase is NULL, then don't rebase
|
|
if (args->newBaseOctCode) {
|
|
codeColorBuffer = rebaseOctalCode(nodeOctalCode, args->newBaseOctCode, true);
|
|
codeLength = numberOfThreeBitSectionsInCode(codeColorBuffer);
|
|
bytesInCode = bytesRequiredForCodeLength(codeLength);
|
|
codeAndColorLength = bytesInCode + SIZE_OF_COLOR_DATA;
|
|
} else {
|
|
codeLength = numberOfThreeBitSectionsInCode(nodeOctalCode);
|
|
bytesInCode = bytesRequiredForCodeLength(codeLength);
|
|
codeAndColorLength = bytesInCode + SIZE_OF_COLOR_DATA;
|
|
codeColorBuffer = new unsigned char[codeAndColorLength];
|
|
memcpy(codeColorBuffer, nodeOctalCode, bytesInCode);
|
|
}
|
|
|
|
// copy the colors over
|
|
codeColorBuffer[bytesInCode + RED_INDEX ] = node->getColor()[RED_INDEX ];
|
|
codeColorBuffer[bytesInCode + GREEN_INDEX] = node->getColor()[GREEN_INDEX];
|
|
codeColorBuffer[bytesInCode + BLUE_INDEX ] = node->getColor()[BLUE_INDEX ];
|
|
|
|
getInstance()->_voxelEditSender.queueVoxelEditMessage(PACKET_TYPE_SET_VOXEL_DESTRUCTIVE,
|
|
codeColorBuffer, codeAndColorLength);
|
|
|
|
delete[] codeColorBuffer;
|
|
}
|
|
return true; // keep going
|
|
}
|
|
|
|
void Application::exportVoxels() {
|
|
QString desktopLocation = QStandardPaths::writableLocation(QStandardPaths::DesktopLocation);
|
|
QString suggestedName = desktopLocation.append("/voxels.svo");
|
|
|
|
QString fileNameString = QFileDialog::getSaveFileName(_glWidget, tr("Export Voxels"), suggestedName,
|
|
tr("Sparse Voxel Octree Files (*.svo)"));
|
|
QByteArray fileNameAscii = fileNameString.toLocal8Bit();
|
|
const char* fileName = fileNameAscii.data();
|
|
VoxelNode* selectedNode = _voxels.getVoxelAt(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
|
|
if (selectedNode) {
|
|
VoxelTree exportTree;
|
|
_voxels.copySubTreeIntoNewTree(selectedNode, &exportTree, true);
|
|
exportTree.writeToSVOFile(fileName);
|
|
}
|
|
|
|
// restore the main window's active state
|
|
_window->activateWindow();
|
|
}
|
|
|
|
void Application::importVoxels() {
|
|
if (_voxelImporter.exec()) {
|
|
qDebug("[DEBUG] Import succedded.\n");
|
|
} else {
|
|
qDebug("[DEBUG] Import failed.\n");
|
|
}
|
|
|
|
// restore the main window's active state
|
|
_window->activateWindow();
|
|
}
|
|
|
|
void Application::cutVoxels() {
|
|
copyVoxels();
|
|
deleteVoxelUnderCursor();
|
|
}
|
|
|
|
void Application::copyVoxels() {
|
|
// switch to and clear the clipboard first...
|
|
_sharedVoxelSystem.killLocalVoxels();
|
|
if (_sharedVoxelSystem.getTree() != &_clipboard) {
|
|
_clipboard.eraseAllVoxels();
|
|
_sharedVoxelSystem.changeTree(&_clipboard);
|
|
}
|
|
|
|
// then copy onto it if there is something to copy
|
|
VoxelNode* selectedNode = _voxels.getVoxelAt(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
|
|
if (selectedNode) {
|
|
_voxels.copySubTreeIntoNewTree(selectedNode, &_sharedVoxelSystem, true);
|
|
}
|
|
}
|
|
|
|
void Application::pasteVoxels() {
|
|
unsigned char* calculatedOctCode = NULL;
|
|
VoxelNode* selectedNode = _voxels.getVoxelAt(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
|
|
|
|
// Recurse the clipboard tree, where everything is root relative, and send all the colored voxels to
|
|
// the server as an set voxel message, this will also rebase the voxels to the new location
|
|
SendVoxelsOperationArgs args;
|
|
|
|
// we only need the selected voxel to get the newBaseOctCode, which we can actually calculate from the
|
|
// voxel size/position details. If we don't have an actual selectedNode then use the mouseVoxel to create a
|
|
// target octalCode for where the user is pointing.
|
|
if (selectedNode) {
|
|
args.newBaseOctCode = selectedNode->getOctalCode();
|
|
} else {
|
|
args.newBaseOctCode = calculatedOctCode = pointToVoxel(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
|
|
}
|
|
|
|
_sharedVoxelSystem.getTree()->recurseTreeWithOperation(sendVoxelsOperation, &args);
|
|
|
|
if (_sharedVoxelSystem.getTree() != &_clipboard) {
|
|
_sharedVoxelSystem.killLocalVoxels();
|
|
_sharedVoxelSystem.changeTree(&_clipboard);
|
|
}
|
|
|
|
_voxelEditSender.flushQueue();
|
|
|
|
if (calculatedOctCode) {
|
|
delete[] calculatedOctCode;
|
|
}
|
|
}
|
|
|
|
void Application::findAxisAlignment() {
|
|
glm::vec3 direction = _myAvatar.getMouseRayDirection();
|
|
if (fabs(direction.z) > fabs(direction.x)) {
|
|
_lookingAlongX = false;
|
|
if (direction.z < 0) {
|
|
_lookingAwayFromOrigin = false;
|
|
} else {
|
|
_lookingAwayFromOrigin = true;
|
|
}
|
|
} else {
|
|
_lookingAlongX = true;
|
|
if (direction.x < 0) {
|
|
_lookingAwayFromOrigin = false;
|
|
} else {
|
|
_lookingAwayFromOrigin = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::nudgeVoxels() {
|
|
VoxelNode* selectedNode = _voxels.getVoxelAt(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
|
|
if (!Menu::getInstance()->isOptionChecked(MenuOption::VoxelSelectMode) && selectedNode) {
|
|
Menu::getInstance()->triggerOption(MenuOption::VoxelSelectMode);
|
|
}
|
|
|
|
if (!_nudgeStarted && selectedNode) {
|
|
_nudgeVoxel = _mouseVoxel;
|
|
_nudgeStarted = true;
|
|
_nudgeGuidePosition = glm::vec3(_nudgeVoxel.x, _nudgeVoxel.y, _nudgeVoxel.z);
|
|
findAxisAlignment();
|
|
} else {
|
|
// calculate nudgeVec
|
|
glm::vec3 nudgeVec(_nudgeGuidePosition.x - _nudgeVoxel.x, _nudgeGuidePosition.y - _nudgeVoxel.y, _nudgeGuidePosition.z - _nudgeVoxel.z);
|
|
|
|
VoxelNode* nodeToNudge = _voxels.getVoxelAt(_nudgeVoxel.x, _nudgeVoxel.y, _nudgeVoxel.z, _nudgeVoxel.s);
|
|
|
|
if (nodeToNudge) {
|
|
_voxels.getTree()->nudgeSubTree(nodeToNudge, nudgeVec, _voxelEditSender);
|
|
_nudgeStarted = false;
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::deleteVoxels() {
|
|
deleteVoxelUnderCursor();
|
|
}
|
|
|
|
void Application::setListenModeNormal() {
|
|
_audio.setListenMode(AudioRingBuffer::NORMAL);
|
|
}
|
|
|
|
void Application::setListenModePoint() {
|
|
_audio.setListenMode(AudioRingBuffer::OMNI_DIRECTIONAL_POINT);
|
|
_audio.setListenRadius(1.0);
|
|
}
|
|
|
|
void Application::setListenModeSingleSource() {
|
|
_audio.setListenMode(AudioRingBuffer::SELECTED_SOURCES);
|
|
_audio.clearListenSources();
|
|
|
|
glm::vec3 mouseRayOrigin = _myAvatar.getMouseRayOrigin();
|
|
glm::vec3 mouseRayDirection = _myAvatar.getMouseRayDirection();
|
|
glm::vec3 eyePositionIgnored;
|
|
uint16_t nodeID;
|
|
|
|
if (isLookingAtOtherAvatar(mouseRayOrigin, mouseRayDirection, eyePositionIgnored, nodeID)) {
|
|
_audio.addListenSource(nodeID);
|
|
}
|
|
}
|
|
|
|
void Application::initDisplay() {
|
|
glEnable(GL_BLEND);
|
|
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_CONSTANT_ALPHA, GL_ONE);
|
|
glShadeModel(GL_SMOOTH);
|
|
glEnable(GL_LIGHTING);
|
|
glEnable(GL_LIGHT0);
|
|
glEnable(GL_DEPTH_TEST);
|
|
}
|
|
|
|
void Application::init() {
|
|
_voxels.init();
|
|
_sharedVoxelSystemViewFrustum.setPosition(glm::vec3(TREE_SCALE / 2.0f,
|
|
TREE_SCALE / 2.0f,
|
|
3.0f * TREE_SCALE / 2.0f));
|
|
_sharedVoxelSystemViewFrustum.setNearClip(TREE_SCALE / 2.0f);
|
|
_sharedVoxelSystemViewFrustum.setFarClip(3.0f * TREE_SCALE / 2.0f);
|
|
_sharedVoxelSystemViewFrustum.setFieldOfView(90);
|
|
_sharedVoxelSystemViewFrustum.setOrientation(glm::quat());
|
|
_sharedVoxelSystemViewFrustum.calculate();
|
|
_sharedVoxelSystem.setViewFrustum(&_sharedVoxelSystemViewFrustum);
|
|
_sharedVoxelSystem.init();
|
|
VoxelTree* tmpTree = _sharedVoxelSystem.getTree();
|
|
_sharedVoxelSystem.changeTree(&_clipboard);
|
|
delete tmpTree;
|
|
|
|
_voxelImporter.init();
|
|
|
|
_environment.init();
|
|
|
|
_glowEffect.init();
|
|
_ambientOcclusionEffect.init();
|
|
|
|
_handControl.setScreenDimensions(_glWidget->width(), _glWidget->height());
|
|
|
|
_headMouseX = _mouseX = _glWidget->width() / 2;
|
|
_headMouseY = _mouseY = _glWidget->height() / 2;
|
|
QCursor::setPos(_headMouseX, _headMouseY);
|
|
|
|
_myAvatar.init();
|
|
_myAvatar.setPosition(START_LOCATION);
|
|
_myCamera.setMode(CAMERA_MODE_FIRST_PERSON);
|
|
_myCamera.setModeShiftRate(1.0f);
|
|
_myAvatar.setDisplayingLookatVectors(false);
|
|
|
|
|
|
OculusManager::connect();
|
|
if (OculusManager::isConnected()) {
|
|
QMetaObject::invokeMethod(Menu::getInstance()->getActionForOption(MenuOption::Fullscreen),
|
|
"trigger",
|
|
Qt::QueuedConnection);
|
|
}
|
|
|
|
LeapManager::initialize();
|
|
|
|
gettimeofday(&_timerStart, NULL);
|
|
gettimeofday(&_lastTimeUpdated, NULL);
|
|
|
|
Menu::getInstance()->loadSettings();
|
|
if (Menu::getInstance()->getAudioJitterBufferSamples() != 0) {
|
|
_audio.setJitterBufferSamples(Menu::getInstance()->getAudioJitterBufferSamples());
|
|
}
|
|
|
|
qDebug("Loaded settings.\n");
|
|
|
|
Avatar::sendAvatarVoxelURLMessage(_myAvatar.getVoxels()->getVoxelURL());
|
|
|
|
_palette.init(_glWidget->width(), _glWidget->height());
|
|
_palette.addAction(Menu::getInstance()->getActionForOption(MenuOption::VoxelAddMode), 0, 0);
|
|
_palette.addAction(Menu::getInstance()->getActionForOption(MenuOption::VoxelDeleteMode), 0, 1);
|
|
_palette.addTool(&_swatch);
|
|
_palette.addAction(Menu::getInstance()->getActionForOption(MenuOption::VoxelColorMode), 0, 2);
|
|
_palette.addAction(Menu::getInstance()->getActionForOption(MenuOption::VoxelGetColorMode), 0, 3);
|
|
_palette.addAction(Menu::getInstance()->getActionForOption(MenuOption::VoxelSelectMode), 0, 4);
|
|
|
|
_pieMenu.init("./resources/images/hifi-interface-tools-v2-pie.svg",
|
|
_glWidget->width(),
|
|
_glWidget->height());
|
|
|
|
_followMode = new QAction(this);
|
|
connect(_followMode, SIGNAL(triggered()), this, SLOT(toggleFollowMode()));
|
|
_pieMenu.addAction(_followMode);
|
|
}
|
|
|
|
|
|
const float MAX_AVATAR_EDIT_VELOCITY = 1.0f;
|
|
const float MAX_VOXEL_EDIT_DISTANCE = 20.0f;
|
|
const float HEAD_SPHERE_RADIUS = 0.07;
|
|
|
|
|
|
static uint16_t DEFAULT_NODE_ID_REF = 1;
|
|
|
|
|
|
Avatar* Application::isLookingAtOtherAvatar(glm::vec3& mouseRayOrigin, glm::vec3& mouseRayDirection,
|
|
glm::vec3& eyePosition, uint16_t& nodeID = DEFAULT_NODE_ID_REF) {
|
|
|
|
NodeList* nodeList = NodeList::getInstance();
|
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
|
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
|
Avatar* avatar = (Avatar *) node->getLinkedData();
|
|
glm::vec3 headPosition = avatar->getHead().getPosition();
|
|
float distance;
|
|
if (rayIntersectsSphere(mouseRayOrigin, mouseRayDirection, headPosition,
|
|
HEAD_SPHERE_RADIUS * avatar->getScale(), distance)) {
|
|
eyePosition = avatar->getHead().getEyePosition();
|
|
_lookatIndicatorScale = avatar->getScale();
|
|
_lookatOtherPosition = headPosition;
|
|
nodeID = avatar->getOwningNode()->getNodeID();
|
|
return avatar;
|
|
}
|
|
}
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
bool Application::isLookingAtMyAvatar(Avatar* avatar) {
|
|
glm::vec3 theirLookat = avatar->getHead().getLookAtPosition();
|
|
glm::vec3 myHeadPosition = _myAvatar.getHead().getPosition();
|
|
|
|
if (pointInSphere(theirLookat, myHeadPosition, HEAD_SPHERE_RADIUS * _myAvatar.getScale())) {
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
void Application::renderLookatIndicator(glm::vec3 pointOfInterest, Camera& whichCamera) {
|
|
|
|
const float DISTANCE_FROM_HEAD_SPHERE = 0.1f * _lookatIndicatorScale;
|
|
const float INDICATOR_RADIUS = 0.1f * _lookatIndicatorScale;
|
|
const float YELLOW[] = { 1.0f, 1.0f, 0.0f };
|
|
const int NUM_SEGMENTS = 30;
|
|
glm::vec3 haloOrigin(pointOfInterest.x, pointOfInterest.y + DISTANCE_FROM_HEAD_SPHERE, pointOfInterest.z);
|
|
glColor3f(YELLOW[0], YELLOW[1], YELLOW[2]);
|
|
renderCircle(haloOrigin, INDICATOR_RADIUS, IDENTITY_UP, NUM_SEGMENTS);
|
|
}
|
|
|
|
void maybeBeginFollowIndicator(bool& began) {
|
|
if (!began) {
|
|
Application::getInstance()->getGlowEffect()->begin();
|
|
glLineWidth(5);
|
|
glBegin(GL_LINES);
|
|
began = true;
|
|
}
|
|
}
|
|
|
|
void Application::renderFollowIndicator() {
|
|
NodeList* nodeList = NodeList::getInstance();
|
|
|
|
// initialize lazily so that we don't enable the glow effect unnecessarily
|
|
bool began = false;
|
|
|
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); ++node) {
|
|
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
|
Avatar* avatar = (Avatar *) node->getLinkedData();
|
|
Avatar* leader = NULL;
|
|
|
|
if (avatar->getLeaderID() != UNKNOWN_NODE_ID) {
|
|
if (avatar->getLeaderID() == NodeList::getInstance()->getOwnerID()) {
|
|
leader = &_myAvatar;
|
|
} else {
|
|
for (NodeList::iterator it = nodeList->begin(); it != nodeList->end(); ++it) {
|
|
if(it->getNodeID() == avatar->getLeaderID()
|
|
&& it->getType() == NODE_TYPE_AGENT) {
|
|
leader = (Avatar*) it->getLinkedData();
|
|
}
|
|
}
|
|
}
|
|
|
|
if (leader != NULL) {
|
|
maybeBeginFollowIndicator(began);
|
|
glColor3f(1.f, 0.f, 0.f);
|
|
glVertex3f((avatar->getHead().getPosition().x + avatar->getPosition().x) / 2.f,
|
|
(avatar->getHead().getPosition().y + avatar->getPosition().y) / 2.f,
|
|
(avatar->getHead().getPosition().z + avatar->getPosition().z) / 2.f);
|
|
glColor3f(0.f, 1.f, 0.f);
|
|
glVertex3f((leader->getHead().getPosition().x + leader->getPosition().x) / 2.f,
|
|
(leader->getHead().getPosition().y + leader->getPosition().y) / 2.f,
|
|
(leader->getHead().getPosition().z + leader->getPosition().z) / 2.f);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (_myAvatar.getLeadingAvatar() != NULL) {
|
|
maybeBeginFollowIndicator(began);
|
|
glColor3f(1.f, 0.f, 0.f);
|
|
glVertex3f((_myAvatar.getHead().getPosition().x + _myAvatar.getPosition().x) / 2.f,
|
|
(_myAvatar.getHead().getPosition().y + _myAvatar.getPosition().y) / 2.f,
|
|
(_myAvatar.getHead().getPosition().z + _myAvatar.getPosition().z) / 2.f);
|
|
glColor3f(0.f, 1.f, 0.f);
|
|
glVertex3f((_myAvatar.getLeadingAvatar()->getHead().getPosition().x + _myAvatar.getLeadingAvatar()->getPosition().x) / 2.f,
|
|
(_myAvatar.getLeadingAvatar()->getHead().getPosition().y + _myAvatar.getLeadingAvatar()->getPosition().y) / 2.f,
|
|
(_myAvatar.getLeadingAvatar()->getHead().getPosition().z + _myAvatar.getLeadingAvatar()->getPosition().z) / 2.f);
|
|
}
|
|
|
|
if (began) {
|
|
glEnd();
|
|
_glowEffect.end();
|
|
}
|
|
}
|
|
|
|
void Application::update(float deltaTime) {
|
|
|
|
// Use Transmitter Hand to move hand if connected, else use mouse
|
|
if (_myTransmitter.isConnected()) {
|
|
const float HAND_FORCE_SCALING = 0.01f;
|
|
glm::vec3 estimatedRotation = _myTransmitter.getEstimatedRotation();
|
|
glm::vec3 handForce(-estimatedRotation.z, -estimatedRotation.x, estimatedRotation.y);
|
|
_myAvatar.setMovedHandOffset(handForce * HAND_FORCE_SCALING);
|
|
} else {
|
|
// update behaviors for avatar hand movement: handControl takes mouse values as input,
|
|
// and gives back 3D values modulated for smooth transitioning between interaction modes.
|
|
_handControl.update(_mouseX, _mouseY);
|
|
_myAvatar.setMovedHandOffset(_handControl.getValues());
|
|
}
|
|
|
|
// tell my avatar if the mouse is being pressed...
|
|
_myAvatar.setMousePressed(_mousePressed);
|
|
|
|
// check what's under the mouse and update the mouse voxel
|
|
glm::vec3 mouseRayOrigin, mouseRayDirection;
|
|
_viewFrustum.computePickRay(_mouseX / (float)_glWidget->width(),
|
|
_mouseY / (float)_glWidget->height(), mouseRayOrigin, mouseRayDirection);
|
|
|
|
// tell my avatar the posiion and direction of the ray projected ino the world based on the mouse position
|
|
_myAvatar.setMouseRay(mouseRayOrigin, mouseRayDirection);
|
|
|
|
// Set where I am looking based on my mouse ray (so that other people can see)
|
|
glm::vec3 lookAtSpot;
|
|
|
|
// Update faceshift
|
|
_faceshift.update();
|
|
|
|
// if we have faceshift, use that to compute the lookat direction
|
|
glm::vec3 lookAtRayOrigin = mouseRayOrigin, lookAtRayDirection = mouseRayDirection;
|
|
if (_faceshift.isActive()) {
|
|
lookAtRayOrigin = _myAvatar.getHead().calculateAverageEyePosition();
|
|
lookAtRayDirection = _myAvatar.getHead().getOrientation() * glm::quat(glm::radians(glm::vec3(
|
|
_faceshift.getEstimatedEyePitch(), _faceshift.getEstimatedEyeYaw(), 0.0f))) * glm::vec3(0.0f, 0.0f, -1.0f);
|
|
}
|
|
|
|
_isLookingAtOtherAvatar = isLookingAtOtherAvatar(lookAtRayOrigin, lookAtRayDirection, lookAtSpot);
|
|
if (_isLookingAtOtherAvatar) {
|
|
// If the mouse is over another avatar's head...
|
|
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
|
|
} else if (_isHoverVoxel && !_faceshift.isActive()) {
|
|
// Look at the hovered voxel
|
|
lookAtSpot = getMouseVoxelWorldCoordinates(_hoverVoxel);
|
|
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
|
|
} else {
|
|
// Just look in direction of the mouse ray
|
|
const float FAR_AWAY_STARE = TREE_SCALE;
|
|
lookAtSpot = lookAtRayOrigin + lookAtRayDirection * FAR_AWAY_STARE;
|
|
_myAvatar.getHead().setLookAtPosition(lookAtSpot);
|
|
}
|
|
|
|
// Find the voxel we are hovering over, and respond if clicked
|
|
float distance;
|
|
BoxFace face;
|
|
|
|
// If we have clicked on a voxel, update it's color
|
|
if (_isHoverVoxelSounding) {
|
|
VoxelNode* hoveredNode = _voxels.getVoxelAt(_hoverVoxel.x, _hoverVoxel.y, _hoverVoxel.z, _hoverVoxel.s);
|
|
if (hoveredNode) {
|
|
float bright = _audio.getCollisionSoundMagnitude();
|
|
nodeColor clickColor = { 255 * bright + _hoverVoxelOriginalColor[0] * (1.f - bright),
|
|
_hoverVoxelOriginalColor[1] * (1.f - bright),
|
|
_hoverVoxelOriginalColor[2] * (1.f - bright), 1 };
|
|
hoveredNode->setColor(clickColor);
|
|
if (bright < 0.01f) {
|
|
hoveredNode->setColor(_hoverVoxelOriginalColor);
|
|
_isHoverVoxelSounding = false;
|
|
}
|
|
} else {
|
|
// Voxel is not found, clear all
|
|
_isHoverVoxelSounding = false;
|
|
_isHoverVoxel = false;
|
|
}
|
|
} else {
|
|
// Check for a new hover voxel
|
|
glm::vec4 oldVoxel(_hoverVoxel.x, _hoverVoxel.y, _hoverVoxel.z, _hoverVoxel.s);
|
|
_isHoverVoxel = _voxels.findRayIntersection(mouseRayOrigin, mouseRayDirection, _hoverVoxel, distance, face);
|
|
if (MAKE_SOUND_ON_VOXEL_HOVER && _isHoverVoxel && glm::vec4(_hoverVoxel.x, _hoverVoxel.y, _hoverVoxel.z, _hoverVoxel.s) != oldVoxel) {
|
|
_hoverVoxelOriginalColor[0] = _hoverVoxel.red;
|
|
_hoverVoxelOriginalColor[1] = _hoverVoxel.green;
|
|
_hoverVoxelOriginalColor[2] = _hoverVoxel.blue;
|
|
_hoverVoxelOriginalColor[3] = 1;
|
|
_audio.startCollisionSound(1.0, HOVER_VOXEL_FREQUENCY * _hoverVoxel.s * TREE_SCALE, 0.0, HOVER_VOXEL_DECAY);
|
|
_isHoverVoxelSounding = true;
|
|
}
|
|
}
|
|
|
|
// If we are dragging on a voxel, add thrust according to the amount the mouse is dragging
|
|
const float VOXEL_GRAB_THRUST = 0.0f;
|
|
if (_mousePressed && (_mouseVoxel.s != 0)) {
|
|
glm::vec2 mouseDrag(_mouseX - _mouseDragStartedX, _mouseY - _mouseDragStartedY);
|
|
glm::quat orientation = _myAvatar.getOrientation();
|
|
glm::vec3 front = orientation * IDENTITY_FRONT;
|
|
glm::vec3 up = orientation * IDENTITY_UP;
|
|
glm::vec3 towardVoxel = getMouseVoxelWorldCoordinates(_mouseVoxelDragging)
|
|
- _myAvatar.getCameraPosition();
|
|
towardVoxel = front * glm::length(towardVoxel);
|
|
glm::vec3 lateralToVoxel = glm::cross(up, glm::normalize(towardVoxel)) * glm::length(towardVoxel);
|
|
_voxelThrust = glm::vec3(0, 0, 0);
|
|
_voxelThrust += towardVoxel * VOXEL_GRAB_THRUST * deltaTime * mouseDrag.y;
|
|
_voxelThrust += lateralToVoxel * VOXEL_GRAB_THRUST * deltaTime * mouseDrag.x;
|
|
|
|
// Add thrust from voxel grabbing to the avatar
|
|
_myAvatar.addThrust(_voxelThrust);
|
|
|
|
}
|
|
|
|
_mouseVoxel.s = 0.0f;
|
|
if (Menu::getInstance()->isVoxelModeActionChecked() &&
|
|
(fabs(_myAvatar.getVelocity().x) +
|
|
fabs(_myAvatar.getVelocity().y) +
|
|
fabs(_myAvatar.getVelocity().z)) / 3 < MAX_AVATAR_EDIT_VELOCITY) {
|
|
if (_voxels.findRayIntersection(mouseRayOrigin, mouseRayDirection, _mouseVoxel, distance, face)) {
|
|
if (distance < MAX_VOXEL_EDIT_DISTANCE) {
|
|
// find the nearest voxel with the desired scale
|
|
if (_mouseVoxelScale > _mouseVoxel.s) {
|
|
// choose the larger voxel that encompasses the one selected
|
|
_mouseVoxel.x = _mouseVoxelScale * floorf(_mouseVoxel.x / _mouseVoxelScale);
|
|
_mouseVoxel.y = _mouseVoxelScale * floorf(_mouseVoxel.y / _mouseVoxelScale);
|
|
_mouseVoxel.z = _mouseVoxelScale * floorf(_mouseVoxel.z / _mouseVoxelScale);
|
|
_mouseVoxel.s = _mouseVoxelScale;
|
|
|
|
} else {
|
|
glm::vec3 faceVector = getFaceVector(face);
|
|
if (_mouseVoxelScale < _mouseVoxel.s) {
|
|
// find the closest contained voxel
|
|
glm::vec3 pt = (mouseRayOrigin + mouseRayDirection * distance) / (float)TREE_SCALE -
|
|
faceVector * (_mouseVoxelScale * 0.5f);
|
|
_mouseVoxel.x = _mouseVoxelScale * floorf(pt.x / _mouseVoxelScale);
|
|
_mouseVoxel.y = _mouseVoxelScale * floorf(pt.y / _mouseVoxelScale);
|
|
_mouseVoxel.z = _mouseVoxelScale * floorf(pt.z / _mouseVoxelScale);
|
|
_mouseVoxel.s = _mouseVoxelScale;
|
|
}
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelAddMode)) {
|
|
// use the face to determine the side on which to create a neighbor
|
|
_mouseVoxel.x += faceVector.x * _mouseVoxel.s;
|
|
_mouseVoxel.y += faceVector.y * _mouseVoxel.s;
|
|
_mouseVoxel.z += faceVector.z * _mouseVoxel.s;
|
|
}
|
|
}
|
|
} else {
|
|
_mouseVoxel.s = 0.0f;
|
|
}
|
|
} else if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelAddMode)
|
|
|| Menu::getInstance()->isOptionChecked(MenuOption::VoxelSelectMode)) {
|
|
// place the voxel a fixed distance away
|
|
float worldMouseVoxelScale = _mouseVoxelScale * TREE_SCALE;
|
|
glm::vec3 pt = mouseRayOrigin + mouseRayDirection * (2.0f + worldMouseVoxelScale * 0.5f);
|
|
_mouseVoxel.x = _mouseVoxelScale * floorf(pt.x / worldMouseVoxelScale);
|
|
_mouseVoxel.y = _mouseVoxelScale * floorf(pt.y / worldMouseVoxelScale);
|
|
_mouseVoxel.z = _mouseVoxelScale * floorf(pt.z / worldMouseVoxelScale);
|
|
_mouseVoxel.s = _mouseVoxelScale;
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelDeleteMode)) {
|
|
// red indicates deletion
|
|
_mouseVoxel.red = 255;
|
|
_mouseVoxel.green = _mouseVoxel.blue = 0;
|
|
} else if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelSelectMode)) {
|
|
if (_nudgeStarted) {
|
|
_mouseVoxel.red = _mouseVoxel.green = _mouseVoxel.blue = 255;
|
|
} else {
|
|
// yellow indicates selection
|
|
_mouseVoxel.red = _mouseVoxel.green = 255;
|
|
_mouseVoxel.blue = 0;
|
|
}
|
|
} else { // _addVoxelMode->isChecked() || _colorVoxelMode->isChecked()
|
|
QColor paintColor = Menu::getInstance()->getActionForOption(MenuOption::VoxelPaintColor)->data().value<QColor>();
|
|
_mouseVoxel.red = paintColor.red();
|
|
_mouseVoxel.green = paintColor.green();
|
|
_mouseVoxel.blue = paintColor.blue();
|
|
}
|
|
|
|
// if we just edited, use the currently selected voxel as the "last" for drag detection
|
|
if (_justEditedVoxel) {
|
|
_lastMouseVoxelPos = glm::vec3(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z);
|
|
_justEditedVoxel = false;
|
|
}
|
|
}
|
|
|
|
// walking triggers the handControl to stop
|
|
if (_myAvatar.getMode() == AVATAR_MODE_WALKING) {
|
|
_handControl.stop();
|
|
}
|
|
|
|
// Update from Touch
|
|
if (_isTouchPressed) {
|
|
float TOUCH_YAW_SCALE = -50.0f;
|
|
float TOUCH_PITCH_SCALE = -50.0f;
|
|
_yawFromTouch += ((_touchAvgX - _lastTouchAvgX) * TOUCH_YAW_SCALE * deltaTime);
|
|
_pitchFromTouch += ((_touchAvgY - _lastTouchAvgY) * TOUCH_PITCH_SCALE * deltaTime);
|
|
|
|
_lastTouchAvgX = _touchAvgX;
|
|
_lastTouchAvgY = _touchAvgY;
|
|
}
|
|
|
|
// Leap finger-sensing device
|
|
LeapManager::enableFakeFingers(Menu::getInstance()->isOptionChecked(MenuOption::SimulateLeapHand));
|
|
_myAvatar.getHand().setRaveGloveActive(Menu::getInstance()->isOptionChecked(MenuOption::TestRaveGlove));
|
|
LeapManager::nextFrame(_myAvatar);
|
|
|
|
// Read serial port interface devices
|
|
if (_serialHeadSensor.isActive()) {
|
|
_serialHeadSensor.readData(deltaTime);
|
|
}
|
|
|
|
// Sample hardware, update view frustum if needed, and send avatar data to mixer/nodes
|
|
updateAvatar(deltaTime);
|
|
|
|
// read incoming packets from network
|
|
if (!_enableNetworkThread) {
|
|
networkReceive(0);
|
|
}
|
|
|
|
// parse voxel packets
|
|
if (!_enableProcessVoxelsThread) {
|
|
_voxelProcessor.threadRoutine();
|
|
_voxelEditSender.threadRoutine();
|
|
}
|
|
|
|
//loop through all the other avatars and simulate them...
|
|
NodeList* nodeList = NodeList::getInstance();
|
|
for(NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
|
node->lock();
|
|
if (node->getLinkedData() != NULL) {
|
|
Avatar *avatar = (Avatar *)node->getLinkedData();
|
|
if (!avatar->isInitialized()) {
|
|
avatar->init();
|
|
}
|
|
avatar->simulate(deltaTime, NULL, 0.f);
|
|
avatar->setMouseRay(mouseRayOrigin, mouseRayDirection);
|
|
}
|
|
node->unlock();
|
|
}
|
|
|
|
// Simulate myself
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Gravity)) {
|
|
_myAvatar.setGravity(_environment.getGravity(_myAvatar.getPosition()));
|
|
}
|
|
else {
|
|
_myAvatar.setGravity(glm::vec3(0.0f, 0.0f, 0.0f));
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::TransmitterDrive) && _myTransmitter.isConnected()) {
|
|
_myAvatar.simulate(deltaTime, &_myTransmitter, Menu::getInstance()->getGyroCameraSensitivity());
|
|
} else {
|
|
_myAvatar.simulate(deltaTime, NULL, Menu::getInstance()->getGyroCameraSensitivity());
|
|
}
|
|
|
|
// no transmitter drive implies transmitter pick
|
|
if (!Menu::getInstance()->isOptionChecked(MenuOption::TransmitterDrive) && _myTransmitter.isConnected()) {
|
|
_transmitterPickStart = _myAvatar.getSkeleton().joint[AVATAR_JOINT_CHEST].position;
|
|
glm::vec3 direction = _myAvatar.getOrientation() *
|
|
glm::quat(glm::radians(_myTransmitter.getEstimatedRotation())) * IDENTITY_FRONT;
|
|
|
|
// check against voxels, avatars
|
|
const float MAX_PICK_DISTANCE = 100.0f;
|
|
float minDistance = MAX_PICK_DISTANCE;
|
|
VoxelDetail detail;
|
|
float distance;
|
|
BoxFace face;
|
|
if (_voxels.findRayIntersection(_transmitterPickStart, direction, detail, distance, face)) {
|
|
minDistance = min(minDistance, distance);
|
|
}
|
|
for(NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
|
node->lock();
|
|
if (node->getLinkedData() != NULL) {
|
|
Avatar *avatar = (Avatar*)node->getLinkedData();
|
|
if (!avatar->isInitialized()) {
|
|
avatar->init();
|
|
}
|
|
if (avatar->findRayIntersection(_transmitterPickStart, direction, distance)) {
|
|
minDistance = min(minDistance, distance);
|
|
}
|
|
}
|
|
node->unlock();
|
|
}
|
|
_transmitterPickEnd = _transmitterPickStart + direction * minDistance;
|
|
|
|
} else {
|
|
_transmitterPickStart = _transmitterPickEnd = glm::vec3();
|
|
}
|
|
|
|
if (!OculusManager::isConnected()) {
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Mirror)) {
|
|
if (_myCamera.getMode() != CAMERA_MODE_MIRROR) {
|
|
_myCamera.setMode(CAMERA_MODE_MIRROR);
|
|
_myCamera.setModeShiftRate(100.0f);
|
|
}
|
|
} else if (Menu::getInstance()->isOptionChecked(MenuOption::FirstPerson)) {
|
|
if (_myCamera.getMode() != CAMERA_MODE_FIRST_PERSON) {
|
|
_myCamera.setMode(CAMERA_MODE_FIRST_PERSON);
|
|
_myCamera.setModeShiftRate(1.0f);
|
|
}
|
|
} else {
|
|
if (_myCamera.getMode() != CAMERA_MODE_THIRD_PERSON) {
|
|
_myCamera.setMode(CAMERA_MODE_THIRD_PERSON);
|
|
_myCamera.setModeShiftRate(1.0f);
|
|
}
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::OffAxisProjection)) {
|
|
float xSign = Menu::getInstance()->isOptionChecked(MenuOption::Mirror) ? 1.0f : -1.0f;
|
|
if (_faceshift.isActive()) {
|
|
const float EYE_OFFSET_SCALE = 0.025f;
|
|
glm::vec3 position = _faceshift.getHeadTranslation() * EYE_OFFSET_SCALE;
|
|
_myCamera.setEyeOffsetPosition(glm::vec3(position.x * xSign, position.y, position.z));
|
|
updateProjectionMatrix();
|
|
|
|
} else if (_webcam.isActive()) {
|
|
const float EYE_OFFSET_SCALE = 0.5f;
|
|
glm::vec3 position = _webcam.getEstimatedPosition() * EYE_OFFSET_SCALE;
|
|
_myCamera.setEyeOffsetPosition(glm::vec3(position.x * xSign, -position.y, position.z));
|
|
updateProjectionMatrix();
|
|
}
|
|
}
|
|
}
|
|
|
|
// Update bandwidth dialog, if any
|
|
BandwidthDialog* bandwidthDialog = Menu::getInstance()->getBandwidthDialog();
|
|
if (bandwidthDialog) {
|
|
bandwidthDialog->update();
|
|
}
|
|
|
|
VoxelStatsDialog* voxelStatsDialog = Menu::getInstance()->getVoxelStatsDialog();
|
|
if (voxelStatsDialog) {
|
|
voxelStatsDialog->update();
|
|
}
|
|
|
|
// Update audio stats for procedural sounds
|
|
#ifndef _WIN32
|
|
_audio.setLastAcceleration(_myAvatar.getThrust());
|
|
_audio.setLastVelocity(_myAvatar.getVelocity());
|
|
_audio.eventuallyAnalyzePing();
|
|
#endif
|
|
}
|
|
|
|
void Application::updateAvatar(float deltaTime) {
|
|
|
|
// rotate body yaw for yaw received from multitouch
|
|
_myAvatar.setOrientation(_myAvatar.getOrientation()
|
|
* glm::quat(glm::vec3(0, _yawFromTouch * deltaTime, 0)));
|
|
_yawFromTouch = 0.f;
|
|
|
|
// Update my avatar's state from gyros and/or webcam
|
|
_myAvatar.updateFromGyrosAndOrWebcam(Menu::getInstance()->isOptionChecked(MenuOption::GyroLook),
|
|
_pitchFromTouch);
|
|
|
|
if (_serialHeadSensor.isActive()) {
|
|
|
|
// Grab latest readings from the gyros
|
|
float measuredPitchRate = _serialHeadSensor.getLastPitchRate();
|
|
float measuredYawRate = _serialHeadSensor.getLastYawRate();
|
|
|
|
// Update gyro-based mouse (X,Y on screen)
|
|
const float MIN_MOUSE_RATE = 3.0;
|
|
const float HORIZONTAL_PIXELS_PER_DEGREE = 2880.f / 45.f;
|
|
const float VERTICAL_PIXELS_PER_DEGREE = 1800.f / 30.f;
|
|
if (powf(measuredYawRate * measuredYawRate +
|
|
measuredPitchRate * measuredPitchRate, 0.5) > MIN_MOUSE_RATE) {
|
|
_headMouseX -= measuredYawRate * HORIZONTAL_PIXELS_PER_DEGREE * deltaTime;
|
|
_headMouseY -= measuredPitchRate * VERTICAL_PIXELS_PER_DEGREE * deltaTime;
|
|
}
|
|
_headMouseX = max(_headMouseX, 0);
|
|
_headMouseX = min(_headMouseX, _glWidget->width());
|
|
_headMouseY = max(_headMouseY, 0);
|
|
_headMouseY = min(_headMouseY, _glWidget->height());
|
|
|
|
const float MIDPOINT_OF_SCREEN = 0.5;
|
|
|
|
// Only use gyro to set lookAt if mouse hasn't selected an avatar
|
|
if (!_isLookingAtOtherAvatar) {
|
|
|
|
// Set lookAtPosition if an avatar is at the center of the screen
|
|
glm::vec3 screenCenterRayOrigin, screenCenterRayDirection;
|
|
_viewFrustum.computePickRay(MIDPOINT_OF_SCREEN, MIDPOINT_OF_SCREEN, screenCenterRayOrigin, screenCenterRayDirection);
|
|
|
|
glm::vec3 eyePosition;
|
|
_isLookingAtOtherAvatar = isLookingAtOtherAvatar(screenCenterRayOrigin, screenCenterRayDirection, eyePosition);
|
|
if (_isLookingAtOtherAvatar) {
|
|
glm::vec3 myLookAtFromMouse(eyePosition);
|
|
_myAvatar.getHead().setLookAtPosition(myLookAtFromMouse);
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
if (OculusManager::isConnected()) {
|
|
float yaw, pitch, roll;
|
|
OculusManager::getEulerAngles(yaw, pitch, roll);
|
|
|
|
_myAvatar.getHead().setYaw(yaw + _yawFromTouch);
|
|
_myAvatar.getHead().setPitch(pitch + _pitchFromTouch);
|
|
_myAvatar.getHead().setRoll(roll);
|
|
}
|
|
|
|
// Get audio loudness data from audio input device
|
|
#ifndef _WIN32
|
|
_myAvatar.getHead().setAudioLoudness(_audio.getLastInputLoudness());
|
|
#endif
|
|
|
|
// Update Avatar with latest camera and view frustum data...
|
|
// NOTE: we get this from the view frustum, to make it simpler, since the
|
|
// loadViewFrumstum() method will get the correct details from the camera
|
|
// We could optimize this to not actually load the viewFrustum, since we don't
|
|
// actually need to calculate the view frustum planes to send these details
|
|
// to the server.
|
|
loadViewFrustum(_myCamera, _viewFrustum);
|
|
_myAvatar.setCameraPosition(_viewFrustum.getPosition());
|
|
_myAvatar.setCameraOrientation(_viewFrustum.getOrientation());
|
|
_myAvatar.setCameraFov(_viewFrustum.getFieldOfView());
|
|
_myAvatar.setCameraAspectRatio(_viewFrustum.getAspectRatio());
|
|
_myAvatar.setCameraNearClip(_viewFrustum.getNearClip());
|
|
_myAvatar.setCameraFarClip(_viewFrustum.getFarClip());
|
|
_myAvatar.setCameraEyeOffsetPosition(_viewFrustum.getEyeOffsetPosition());
|
|
|
|
NodeList* nodeList = NodeList::getInstance();
|
|
if (nodeList->getOwnerID() != UNKNOWN_NODE_ID) {
|
|
// if I know my ID, send head/hand data to the avatar mixer and voxel server
|
|
unsigned char broadcastString[MAX_PACKET_SIZE];
|
|
unsigned char* endOfBroadcastStringWrite = broadcastString;
|
|
|
|
endOfBroadcastStringWrite += populateTypeAndVersion(endOfBroadcastStringWrite, PACKET_TYPE_HEAD_DATA);
|
|
|
|
endOfBroadcastStringWrite += packNodeId(endOfBroadcastStringWrite, nodeList->getOwnerID());
|
|
|
|
endOfBroadcastStringWrite += _myAvatar.getBroadcastData(endOfBroadcastStringWrite);
|
|
|
|
const char nodeTypesOfInterest[] = { NODE_TYPE_VOXEL_SERVER, NODE_TYPE_AVATAR_MIXER };
|
|
controlledBroadcastToNodes(broadcastString, endOfBroadcastStringWrite - broadcastString,
|
|
nodeTypesOfInterest, sizeof(nodeTypesOfInterest));
|
|
|
|
// once in a while, send my voxel url
|
|
const float AVATAR_VOXEL_URL_SEND_INTERVAL = 1.0f; // seconds
|
|
if (shouldDo(AVATAR_VOXEL_URL_SEND_INTERVAL, deltaTime)) {
|
|
Avatar::sendAvatarVoxelURLMessage(_myAvatar.getVoxels()->getVoxelURL());
|
|
}
|
|
}
|
|
}
|
|
|
|
/////////////////////////////////////////////////////////////////////////////////////
|
|
// loadViewFrustum()
|
|
//
|
|
// Description: this will load the view frustum bounds for EITHER the head
|
|
// or the "myCamera".
|
|
//
|
|
void Application::loadViewFrustum(Camera& camera, ViewFrustum& viewFrustum) {
|
|
// We will use these below, from either the camera or head vectors calculated above
|
|
glm::vec3 position(camera.getPosition());
|
|
float fov = camera.getFieldOfView();
|
|
float nearClip = camera.getNearClip();
|
|
float farClip = camera.getFarClip();
|
|
float aspectRatio = camera.getAspectRatio();
|
|
|
|
glm::quat rotation = camera.getRotation();
|
|
|
|
// Set the viewFrustum up with the correct position and orientation of the camera
|
|
viewFrustum.setPosition(position);
|
|
viewFrustum.setOrientation(rotation);
|
|
|
|
// Also make sure it's got the correct lens details from the camera
|
|
viewFrustum.setAspectRatio(aspectRatio);
|
|
viewFrustum.setFieldOfView(fov);
|
|
viewFrustum.setNearClip(nearClip);
|
|
viewFrustum.setFarClip(farClip);
|
|
viewFrustum.setEyeOffsetPosition(camera.getEyeOffsetPosition());
|
|
viewFrustum.setEyeOffsetOrientation(camera.getEyeOffsetOrientation());
|
|
|
|
// Ask the ViewFrustum class to calculate our corners
|
|
viewFrustum.calculate();
|
|
}
|
|
|
|
// this shader is an adaptation (HLSL -> GLSL, removed conditional) of the one in the Oculus sample
|
|
// code (Samples/OculusRoomTiny/RenderTiny_D3D1X_Device.cpp), which is under the Apache license
|
|
// (http://www.apache.org/licenses/LICENSE-2.0)
|
|
static const char* DISTORTION_FRAGMENT_SHADER =
|
|
"#version 120\n"
|
|
"uniform sampler2D texture;"
|
|
"uniform vec2 lensCenter;"
|
|
"uniform vec2 screenCenter;"
|
|
"uniform vec2 scale;"
|
|
"uniform vec2 scaleIn;"
|
|
"uniform vec4 hmdWarpParam;"
|
|
"vec2 hmdWarp(vec2 in01) {"
|
|
" vec2 theta = (in01 - lensCenter) * scaleIn;"
|
|
" float rSq = theta.x * theta.x + theta.y * theta.y;"
|
|
" vec2 theta1 = theta * (hmdWarpParam.x + hmdWarpParam.y * rSq + "
|
|
" hmdWarpParam.z * rSq * rSq + hmdWarpParam.w * rSq * rSq * rSq);"
|
|
" return lensCenter + scale * theta1;"
|
|
"}"
|
|
"void main(void) {"
|
|
" vec2 tc = hmdWarp(gl_TexCoord[0].st);"
|
|
" vec2 below = step(screenCenter.st + vec2(-0.25, -0.5), tc.st);"
|
|
" vec2 above = vec2(1.0, 1.0) - step(screenCenter.st + vec2(0.25, 0.5), tc.st);"
|
|
" gl_FragColor = mix(vec4(0.0, 0.0, 0.0, 1.0), texture2D(texture, tc), "
|
|
" above.s * above.t * below.s * below.t);"
|
|
"}";
|
|
|
|
void Application::displayOculus(Camera& whichCamera) {
|
|
_glowEffect.prepare();
|
|
|
|
// magic numbers ahoy! in order to avoid pulling in the Oculus utility library that calculates
|
|
// the rendering parameters from the hardware stats, i just folded their calculations into
|
|
// constants using the stats for the current-model hardware as contained in the SDK file
|
|
// LibOVR/Src/Util/Util_Render_Stereo.cpp
|
|
|
|
// eye
|
|
|
|
// render the left eye view to the left side of the screen
|
|
glMatrixMode(GL_PROJECTION);
|
|
glPushMatrix();
|
|
glLoadIdentity();
|
|
glTranslatef(0.151976, 0, 0); // +h, see Oculus SDK docs p. 26
|
|
gluPerspective(whichCamera.getFieldOfView(), whichCamera.getAspectRatio(),
|
|
whichCamera.getNearClip(), whichCamera.getFarClip());
|
|
|
|
glViewport(0, 0, _glWidget->width() / 2, _glWidget->height());
|
|
glMatrixMode(GL_MODELVIEW);
|
|
glPushMatrix();
|
|
glLoadIdentity();
|
|
glTranslatef(0.032, 0, 0); // dip/2, see p. 27
|
|
|
|
displaySide(whichCamera);
|
|
|
|
// and the right eye to the right side
|
|
glMatrixMode(GL_PROJECTION);
|
|
glLoadIdentity();
|
|
glTranslatef(-0.151976, 0, 0); // -h
|
|
gluPerspective(whichCamera.getFieldOfView(), whichCamera.getAspectRatio(),
|
|
whichCamera.getNearClip(), whichCamera.getFarClip());
|
|
|
|
glViewport(_glWidget->width() / 2, 0, _glWidget->width() / 2, _glWidget->height());
|
|
glMatrixMode(GL_MODELVIEW);
|
|
glLoadIdentity();
|
|
glTranslatef(-0.032, 0, 0);
|
|
|
|
displaySide(whichCamera);
|
|
|
|
glPopMatrix();
|
|
|
|
// restore our normal viewport
|
|
glViewport(0, 0, _glWidget->width(), _glWidget->height());
|
|
|
|
QOpenGLFramebufferObject* fbo = _glowEffect.render(true);
|
|
glBindTexture(GL_TEXTURE_2D, fbo->texture());
|
|
|
|
if (_oculusProgram == 0) {
|
|
_oculusProgram = new ProgramObject();
|
|
_oculusProgram->addShaderFromSourceCode(QGLShader::Fragment, DISTORTION_FRAGMENT_SHADER);
|
|
_oculusProgram->link();
|
|
|
|
_textureLocation = _oculusProgram->uniformLocation("texture");
|
|
_lensCenterLocation = _oculusProgram->uniformLocation("lensCenter");
|
|
_screenCenterLocation = _oculusProgram->uniformLocation("screenCenter");
|
|
_scaleLocation = _oculusProgram->uniformLocation("scale");
|
|
_scaleInLocation = _oculusProgram->uniformLocation("scaleIn");
|
|
_hmdWarpParamLocation = _oculusProgram->uniformLocation("hmdWarpParam");
|
|
}
|
|
|
|
glMatrixMode(GL_PROJECTION);
|
|
glLoadIdentity();
|
|
gluOrtho2D(0, _glWidget->width(), 0, _glWidget->height());
|
|
glDisable(GL_DEPTH_TEST);
|
|
|
|
// for reference on setting these values, see SDK file Samples/OculusRoomTiny/RenderTiny_Device.cpp
|
|
|
|
float scaleFactor = 1.0 / _oculusDistortionScale;
|
|
float aspectRatio = (_glWidget->width() * 0.5) / _glWidget->height();
|
|
|
|
glDisable(GL_BLEND);
|
|
_oculusProgram->bind();
|
|
_oculusProgram->setUniformValue(_textureLocation, 0);
|
|
_oculusProgram->setUniformValue(_lensCenterLocation, 0.287994, 0.5); // see SDK docs, p. 29
|
|
_oculusProgram->setUniformValue(_screenCenterLocation, 0.25, 0.5);
|
|
_oculusProgram->setUniformValue(_scaleLocation, 0.25 * scaleFactor, 0.5 * scaleFactor * aspectRatio);
|
|
_oculusProgram->setUniformValue(_scaleInLocation, 4, 2 / aspectRatio);
|
|
_oculusProgram->setUniformValue(_hmdWarpParamLocation, 1.0, 0.22, 0.24, 0);
|
|
|
|
glColor3f(1, 0, 1);
|
|
glBegin(GL_QUADS);
|
|
glTexCoord2f(0, 0);
|
|
glVertex2f(0, 0);
|
|
glTexCoord2f(0.5, 0);
|
|
glVertex2f(_glWidget->width()/2, 0);
|
|
glTexCoord2f(0.5, 1);
|
|
glVertex2f(_glWidget->width() / 2, _glWidget->height());
|
|
glTexCoord2f(0, 1);
|
|
glVertex2f(0, _glWidget->height());
|
|
glEnd();
|
|
|
|
_oculusProgram->setUniformValue(_lensCenterLocation, 0.787994, 0.5);
|
|
_oculusProgram->setUniformValue(_screenCenterLocation, 0.75, 0.5);
|
|
|
|
glBegin(GL_QUADS);
|
|
glTexCoord2f(0.5, 0);
|
|
glVertex2f(_glWidget->width() / 2, 0);
|
|
glTexCoord2f(1, 0);
|
|
glVertex2f(_glWidget->width(), 0);
|
|
glTexCoord2f(1, 1);
|
|
glVertex2f(_glWidget->width(), _glWidget->height());
|
|
glTexCoord2f(0.5, 1);
|
|
glVertex2f(_glWidget->width() / 2, _glWidget->height());
|
|
glEnd();
|
|
|
|
glEnable(GL_BLEND);
|
|
glBindTexture(GL_TEXTURE_2D, 0);
|
|
_oculusProgram->release();
|
|
|
|
glPopMatrix();
|
|
}
|
|
|
|
const GLfloat WHITE_SPECULAR_COLOR[] = { 1.0f, 1.0f, 1.0f, 1.0f };
|
|
const GLfloat NO_SPECULAR_COLOR[] = { 0.0f, 0.0f, 0.0f, 1.0f };
|
|
|
|
void Application::setupWorldLight(Camera& whichCamera) {
|
|
|
|
// Setup 3D lights (after the camera transform, so that they are positioned in world space)
|
|
glEnable(GL_COLOR_MATERIAL);
|
|
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
|
|
|
|
glm::vec3 relativeSunLoc = glm::normalize(_environment.getClosestData(whichCamera.getPosition()).getSunLocation() -
|
|
whichCamera.getPosition());
|
|
GLfloat light_position0[] = { relativeSunLoc.x, relativeSunLoc.y, relativeSunLoc.z, 0.0 };
|
|
glLightfv(GL_LIGHT0, GL_POSITION, light_position0);
|
|
GLfloat ambient_color[] = { 0.7, 0.7, 0.8 };
|
|
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient_color);
|
|
GLfloat diffuse_color[] = { 0.8, 0.7, 0.7 };
|
|
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse_color);
|
|
|
|
glLightfv(GL_LIGHT0, GL_SPECULAR, WHITE_SPECULAR_COLOR);
|
|
glMaterialfv(GL_FRONT, GL_SPECULAR, WHITE_SPECULAR_COLOR);
|
|
glMateriali(GL_FRONT, GL_SHININESS, 96);
|
|
}
|
|
|
|
void Application::computeOffAxisFrustum(float& left, float& right, float& bottom, float& top, float& near,
|
|
float& far, glm::vec4& nearClipPlane, glm::vec4& farClipPlane) const {
|
|
|
|
_viewFrustum.computeOffAxisFrustum(left, right, bottom, top, near, far, nearClipPlane, farClipPlane);
|
|
|
|
// when mirrored, we must flip left and right
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Mirror)) {
|
|
float tmp = left;
|
|
left = -right;
|
|
right = -tmp;
|
|
}
|
|
}
|
|
|
|
void Application::displaySide(Camera& whichCamera) {
|
|
// transform by eye offset
|
|
|
|
// flip x if in mirror mode (also requires reversing winding order for backface culling)
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Mirror)) {
|
|
glScalef(-1.0f, 1.0f, 1.0f);
|
|
glFrontFace(GL_CW);
|
|
|
|
} else {
|
|
glFrontFace(GL_CCW);
|
|
}
|
|
|
|
glm::vec3 eyeOffsetPos = whichCamera.getEyeOffsetPosition();
|
|
glm::quat eyeOffsetOrient = whichCamera.getEyeOffsetOrientation();
|
|
glm::vec3 eyeOffsetAxis = glm::axis(eyeOffsetOrient);
|
|
glRotatef(-glm::angle(eyeOffsetOrient), eyeOffsetAxis.x, eyeOffsetAxis.y, eyeOffsetAxis.z);
|
|
glTranslatef(-eyeOffsetPos.x, -eyeOffsetPos.y, -eyeOffsetPos.z);
|
|
|
|
// transform view according to whichCamera
|
|
// could be myCamera (if in normal mode)
|
|
// or could be viewFrustumOffsetCamera if in offset mode
|
|
|
|
glm::quat rotation = whichCamera.getRotation();
|
|
glm::vec3 axis = glm::axis(rotation);
|
|
glRotatef(-glm::angle(rotation), axis.x, axis.y, axis.z);
|
|
|
|
glTranslatef(-whichCamera.getPosition().x, -whichCamera.getPosition().y, -whichCamera.getPosition().z);
|
|
|
|
// Setup 3D lights (after the camera transform, so that they are positioned in world space)
|
|
setupWorldLight(whichCamera);
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Stars)) {
|
|
if (!_stars.getFileLoaded()) {
|
|
_stars.readInput(STAR_FILE, STAR_CACHE_FILE, 0);
|
|
}
|
|
// should be the first rendering pass - w/o depth buffer / lighting
|
|
|
|
// compute starfield alpha based on distance from atmosphere
|
|
float alpha = 1.0f;
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Atmosphere)) {
|
|
const EnvironmentData& closestData = _environment.getClosestData(whichCamera.getPosition());
|
|
float height = glm::distance(whichCamera.getPosition(), closestData.getAtmosphereCenter());
|
|
if (height < closestData.getAtmosphereInnerRadius()) {
|
|
alpha = 0.0f;
|
|
|
|
} else if (height < closestData.getAtmosphereOuterRadius()) {
|
|
alpha = (height - closestData.getAtmosphereInnerRadius()) /
|
|
(closestData.getAtmosphereOuterRadius() - closestData.getAtmosphereInnerRadius());
|
|
}
|
|
}
|
|
|
|
// finally render the starfield
|
|
_stars.render(whichCamera.getFieldOfView(), whichCamera.getAspectRatio(), whichCamera.getNearClip(), alpha);
|
|
}
|
|
|
|
// draw the sky dome
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Atmosphere)) {
|
|
_environment.renderAtmospheres(whichCamera);
|
|
}
|
|
|
|
glEnable(GL_LIGHTING);
|
|
glEnable(GL_DEPTH_TEST);
|
|
|
|
// Enable to show line from me to the voxel I am touching
|
|
//renderLineToTouchedVoxel();
|
|
//renderThrustAtVoxel(_voxelThrust);
|
|
|
|
// draw a red sphere
|
|
float sphereRadius = 0.25f;
|
|
glColor3f(1,0,0);
|
|
glPushMatrix();
|
|
glutSolidSphere(sphereRadius, 15, 15);
|
|
glPopMatrix();
|
|
|
|
// disable specular lighting for ground and voxels
|
|
glMaterialfv(GL_FRONT, GL_SPECULAR, NO_SPECULAR_COLOR);
|
|
|
|
//draw a grid ground plane....
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::GroundPlane)) {
|
|
// draw grass plane with fog
|
|
glEnable(GL_FOG);
|
|
glEnable(GL_NORMALIZE);
|
|
const float FOG_COLOR[] = { 1.0f, 1.0f, 1.0f, 1.0f };
|
|
glFogfv(GL_FOG_COLOR, FOG_COLOR);
|
|
glFogi(GL_FOG_MODE, GL_EXP2);
|
|
glFogf(GL_FOG_DENSITY, 0.025f);
|
|
glPushMatrix();
|
|
const float GRASS_PLANE_SIZE = 256.0f;
|
|
glTranslatef(-GRASS_PLANE_SIZE * 0.5f, -0.01f, GRASS_PLANE_SIZE * 0.5f);
|
|
glScalef(GRASS_PLANE_SIZE, 1.0f, GRASS_PLANE_SIZE);
|
|
glRotatef(-90.0f, 1.0f, 0.0f, 0.0f);
|
|
glColor3ub(70, 134, 74);
|
|
const int GRASS_DIVISIONS = 40;
|
|
_geometryCache.renderSquare(GRASS_DIVISIONS, GRASS_DIVISIONS);
|
|
glPopMatrix();
|
|
glDisable(GL_FOG);
|
|
glDisable(GL_NORMALIZE);
|
|
|
|
//renderGroundPlaneGrid(EDGE_SIZE_GROUND_PLANE, _audio.getCollisionSoundMagnitude());
|
|
}
|
|
// Draw voxels
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) {
|
|
_voxels.render(Menu::getInstance()->isOptionChecked(MenuOption::VoxelTextures));
|
|
}
|
|
|
|
// restore default, white specular
|
|
glMaterialfv(GL_FRONT, GL_SPECULAR, WHITE_SPECULAR_COLOR);
|
|
|
|
// indicate what we'll be adding/removing in mouse mode, if anything
|
|
if (_mouseVoxel.s != 0) {
|
|
glDisable(GL_LIGHTING);
|
|
glPushMatrix();
|
|
glScalef(TREE_SCALE, TREE_SCALE, TREE_SCALE);
|
|
if (_nudgeStarted) {
|
|
renderNudgeGuide(_nudgeGuidePosition.x, _nudgeGuidePosition.y, _nudgeGuidePosition.z, _nudgeVoxel.s);
|
|
renderNudgeGrid(_nudgeVoxel.x, _nudgeVoxel.y, _nudgeVoxel.z, _nudgeVoxel.s, _mouseVoxel.s);
|
|
glPushMatrix();
|
|
glTranslatef(_nudgeVoxel.x + _nudgeVoxel.s * 0.5f,
|
|
_nudgeVoxel.y + _nudgeVoxel.s * 0.5f,
|
|
_nudgeVoxel.z + _nudgeVoxel.s * 0.5f);
|
|
glColor3ub(255, 255, 255);
|
|
glLineWidth(4.0f);
|
|
glutWireCube(_nudgeVoxel.s);
|
|
glPopMatrix();
|
|
} else {
|
|
renderMouseVoxelGrid(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelAddMode)) {
|
|
// use a contrasting color so that we can see what we're doing
|
|
glColor3ub(_mouseVoxel.red + 128, _mouseVoxel.green + 128, _mouseVoxel.blue + 128);
|
|
} else {
|
|
glColor3ub(_mouseVoxel.red, _mouseVoxel.green, _mouseVoxel.blue);
|
|
}
|
|
|
|
if (_nudgeStarted) {
|
|
// render nudge guide cube
|
|
glTranslatef(_nudgeGuidePosition.x + _nudgeVoxel.s*0.5f,
|
|
_nudgeGuidePosition.y + _nudgeVoxel.s*0.5f,
|
|
_nudgeGuidePosition.z + _nudgeVoxel.s*0.5f);
|
|
glLineWidth(4.0f);
|
|
glutWireCube(_nudgeVoxel.s);
|
|
} else {
|
|
glTranslatef(_mouseVoxel.x + _mouseVoxel.s*0.5f,
|
|
_mouseVoxel.y + _mouseVoxel.s*0.5f,
|
|
_mouseVoxel.z + _mouseVoxel.s*0.5f);
|
|
glLineWidth(4.0f);
|
|
glutWireCube(_mouseVoxel.s);
|
|
}
|
|
glLineWidth(1.0f);
|
|
glPopMatrix();
|
|
glEnable(GL_LIGHTING);
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelSelectMode) && _pasteMode) {
|
|
glPushMatrix();
|
|
glTranslatef(_mouseVoxel.x * TREE_SCALE,
|
|
_mouseVoxel.y * TREE_SCALE,
|
|
_mouseVoxel.z * TREE_SCALE);
|
|
glScalef(_mouseVoxel.s,
|
|
_mouseVoxel.s,
|
|
_mouseVoxel.s);
|
|
|
|
_sharedVoxelSystem.render(true);
|
|
glPopMatrix();
|
|
}
|
|
|
|
_myAvatar.renderScreenTint(SCREEN_TINT_BEFORE_AVATARS, whichCamera);
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Avatars)) {
|
|
// Render avatars of other nodes
|
|
NodeList* nodeList = NodeList::getInstance();
|
|
|
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
|
node->lock();
|
|
|
|
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
|
Avatar *avatar = (Avatar *)node->getLinkedData();
|
|
if (!avatar->isInitialized()) {
|
|
avatar->init();
|
|
}
|
|
// Set lookAt to myCamera on client side if other avatars are looking at client
|
|
if (isLookingAtMyAvatar(avatar)) {
|
|
avatar->getHead().setLookAtPosition(_myCamera.getPosition());
|
|
}
|
|
avatar->render(false, Menu::getInstance()->isOptionChecked(MenuOption::AvatarAsBalls));
|
|
avatar->setDisplayingLookatVectors(Menu::getInstance()->isOptionChecked(MenuOption::LookAtVectors));
|
|
}
|
|
|
|
node->unlock();
|
|
}
|
|
|
|
// Render my own Avatar
|
|
if (_myCamera.getMode() == CAMERA_MODE_MIRROR && !_faceshift.isActive()) {
|
|
_myAvatar.getHead().setLookAtPosition(_myCamera.getPosition());
|
|
}
|
|
_myAvatar.render(Menu::getInstance()->isOptionChecked(MenuOption::Mirror),
|
|
Menu::getInstance()->isOptionChecked(MenuOption::AvatarAsBalls));
|
|
_myAvatar.setDisplayingLookatVectors(Menu::getInstance()->isOptionChecked(MenuOption::LookAtVectors));
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::LookAtIndicator) && _isLookingAtOtherAvatar) {
|
|
renderLookatIndicator(_lookatOtherPosition, whichCamera);
|
|
}
|
|
}
|
|
|
|
_myAvatar.renderScreenTint(SCREEN_TINT_AFTER_AVATARS, whichCamera);
|
|
|
|
// Render the world box
|
|
if (!Menu::getInstance()->isOptionChecked(MenuOption::Mirror) && Menu::getInstance()->isOptionChecked(MenuOption::Stats)) {
|
|
renderWorldBox();
|
|
}
|
|
|
|
// render the ambient occlusion effect if enabled
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AmbientOcclusion)) {
|
|
_ambientOcclusionEffect.render();
|
|
}
|
|
|
|
// brad's frustum for debugging
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::DisplayFrustum)) {
|
|
renderViewFrustum(_viewFrustum);
|
|
}
|
|
|
|
// render voxel fades if they exist
|
|
if (_voxelFades.size() > 0) {
|
|
for(std::vector<VoxelFade>::iterator fade = _voxelFades.begin(); fade != _voxelFades.end();) {
|
|
fade->render();
|
|
if(fade->isDone()) {
|
|
fade = _voxelFades.erase(fade);
|
|
} else {
|
|
++fade;
|
|
}
|
|
}
|
|
}
|
|
|
|
renderFollowIndicator();
|
|
|
|
// render transmitter pick ray, if non-empty
|
|
if (_transmitterPickStart != _transmitterPickEnd) {
|
|
Glower glower;
|
|
const float TRANSMITTER_PICK_COLOR[] = { 1.0f, 1.0f, 0.0f };
|
|
glColor3fv(TRANSMITTER_PICK_COLOR);
|
|
glLineWidth(3.0f);
|
|
glBegin(GL_LINES);
|
|
glVertex3f(_transmitterPickStart.x, _transmitterPickStart.y, _transmitterPickStart.z);
|
|
glVertex3f(_transmitterPickEnd.x, _transmitterPickEnd.y, _transmitterPickEnd.z);
|
|
glEnd();
|
|
glLineWidth(1.0f);
|
|
|
|
glPushMatrix();
|
|
glTranslatef(_transmitterPickEnd.x, _transmitterPickEnd.y, _transmitterPickEnd.z);
|
|
|
|
const float PICK_END_RADIUS = 0.025f;
|
|
glutSolidSphere(PICK_END_RADIUS, 8, 8);
|
|
|
|
glPopMatrix();
|
|
}
|
|
}
|
|
|
|
void Application::displayOverlay() {
|
|
// Render 2D overlay: I/O level bar graphs and text
|
|
glMatrixMode(GL_PROJECTION);
|
|
glPushMatrix();
|
|
glLoadIdentity();
|
|
gluOrtho2D(0, _glWidget->width(), _glWidget->height(), 0);
|
|
glDisable(GL_DEPTH_TEST);
|
|
glDisable(GL_LIGHTING);
|
|
|
|
// Display a single screen-size quad to create an alpha blended 'collision' flash
|
|
float collisionSoundMagnitude = _audio.getCollisionSoundMagnitude();
|
|
const float VISIBLE_COLLISION_SOUND_MAGNITUDE = 0.5f;
|
|
if (collisionSoundMagnitude > VISIBLE_COLLISION_SOUND_MAGNITUDE) {
|
|
renderCollisionOverlay(_glWidget->width(), _glWidget->height(), _audio.getCollisionSoundMagnitude());
|
|
}
|
|
|
|
#ifndef _WIN32
|
|
_audio.render(_glWidget->width(), _glWidget->height());
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Oscilloscope)) {
|
|
_audioScope.render(20, _glWidget->height() - 200);
|
|
}
|
|
#endif
|
|
|
|
//noiseTest(_glWidget->width(), _glWidget->height());
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::HeadMouse)
|
|
&& !Menu::getInstance()->isOptionChecked(MenuOption::Mirror)
|
|
&& USING_INVENSENSE_MPU9150) {
|
|
// Display small target box at center or head mouse target that can also be used to measure LOD
|
|
glColor3f(1.0, 1.0, 1.0);
|
|
glDisable(GL_LINE_SMOOTH);
|
|
const int PIXEL_BOX = 20;
|
|
glBegin(GL_LINE_STRIP);
|
|
glVertex2f(_headMouseX - PIXEL_BOX/2, _headMouseY - PIXEL_BOX/2);
|
|
glVertex2f(_headMouseX + PIXEL_BOX/2, _headMouseY - PIXEL_BOX/2);
|
|
glVertex2f(_headMouseX + PIXEL_BOX/2, _headMouseY + PIXEL_BOX/2);
|
|
glVertex2f(_headMouseX - PIXEL_BOX/2, _headMouseY + PIXEL_BOX/2);
|
|
glVertex2f(_headMouseX - PIXEL_BOX/2, _headMouseY - PIXEL_BOX/2);
|
|
glEnd();
|
|
glEnable(GL_LINE_SMOOTH);
|
|
}
|
|
|
|
// Show detected levels from the serial I/O ADC channel sensors
|
|
if (_displayLevels) _serialHeadSensor.renderLevels(_glWidget->width(), _glWidget->height());
|
|
|
|
// Show hand transmitter data if detected
|
|
if (_myTransmitter.isConnected()) {
|
|
_myTransmitter.renderLevels(_glWidget->width(), _glWidget->height());
|
|
}
|
|
// Display stats and log text onscreen
|
|
glLineWidth(1.0f);
|
|
glPointSize(1.0f);
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Stats)) {
|
|
displayStats();
|
|
}
|
|
|
|
// testing rendering coverage map
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::CoverageMapV2)) {
|
|
renderCoverageMapV2();
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::CoverageMap)) {
|
|
renderCoverageMap();
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Bandwidth)) {
|
|
_bandwidthMeter.render(_glWidget->width(), _glWidget->height());
|
|
}
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Log)) {
|
|
LogDisplay::instance.render(_glWidget->width(), _glWidget->height());
|
|
}
|
|
|
|
// Show chat entry field
|
|
if (_chatEntryOn) {
|
|
_chatEntry.render(_glWidget->width(), _glWidget->height());
|
|
}
|
|
|
|
// Show on-screen msec timer
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::FrameTimer)) {
|
|
char frameTimer[10];
|
|
uint64_t mSecsNow = floor(usecTimestampNow() / 1000.0 + 0.5);
|
|
sprintf(frameTimer, "%d\n", (int)(mSecsNow % 1000));
|
|
drawtext(_glWidget->width() - 100, _glWidget->height() - 20, 0.30, 0, 1.0, 0, frameTimer, 0, 0, 0);
|
|
drawtext(_glWidget->width() - 102, _glWidget->height() - 22, 0.30, 0, 1.0, 0, frameTimer, 1, 1, 1);
|
|
}
|
|
|
|
// Stats at upper right of screen about who domain server is telling us about
|
|
glPointSize(1.0f);
|
|
char nodes[100];
|
|
|
|
NodeList* nodeList = NodeList::getInstance();
|
|
int totalAvatars = 0, totalServers = 0;
|
|
|
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
|
node->getType() == NODE_TYPE_AGENT ? totalAvatars++ : totalServers++;
|
|
}
|
|
|
|
sprintf(nodes, "Servers: %d, Avatars: %d\n", totalServers, totalAvatars);
|
|
drawtext(_glWidget->width() - 150, 20, 0.10, 0, 1.0, 0, nodes, 1, 0, 0);
|
|
|
|
// render the webcam input frame
|
|
_webcam.renderPreview(_glWidget->width(), _glWidget->height());
|
|
|
|
_palette.render(_glWidget->width(), _glWidget->height());
|
|
|
|
QAction* paintColorAction = NULL;
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelGetColorMode)
|
|
&& (paintColorAction = Menu::getInstance()->getActionForOption(MenuOption::VoxelPaintColor))->data().value<QColor>()
|
|
!= _swatch.getColor()) {
|
|
QColor color = paintColorAction->data().value<QColor>();
|
|
TextRenderer textRenderer(SANS_FONT_FAMILY, 11, 50);
|
|
const char line1[] = "Assign this color to a swatch";
|
|
const char line2[] = "by choosing a key from 1 to 8.";
|
|
|
|
int left = (_glWidget->width() - POPUP_WIDTH - 2 * POPUP_MARGIN) / 2;
|
|
int top = _glWidget->height() / 40;
|
|
|
|
glBegin(GL_POLYGON);
|
|
glColor3f(0.0f, 0.0f, 0.0f);
|
|
for (double a = M_PI; a < 1.5f * M_PI; a += POPUP_STEP) {
|
|
glVertex2f(left + POPUP_MARGIN * cos(a) , top + POPUP_MARGIN * sin(a));
|
|
}
|
|
for (double a = 1.5f * M_PI; a < 2.0f * M_PI; a += POPUP_STEP) {
|
|
glVertex2f(left + POPUP_WIDTH + POPUP_MARGIN * cos(a), top + POPUP_MARGIN * sin(a));
|
|
}
|
|
for (double a = 0.0f; a < 0.5f * M_PI; a += POPUP_STEP) {
|
|
glVertex2f(left + POPUP_WIDTH + POPUP_MARGIN * cos(a), top + POPUP_HEIGHT + POPUP_MARGIN * sin(a));
|
|
}
|
|
for (double a = 0.5f * M_PI; a < 1.0f * M_PI; a += POPUP_STEP) {
|
|
glVertex2f(left + POPUP_MARGIN * cos(a) , top + POPUP_HEIGHT + POPUP_MARGIN * sin(a));
|
|
}
|
|
glEnd();
|
|
|
|
glBegin(GL_QUADS);
|
|
glColor3f(color.redF(),
|
|
color.greenF(),
|
|
color.blueF());
|
|
glVertex2f(left , top);
|
|
glVertex2f(left + SWATCH_WIDTH, top);
|
|
glVertex2f(left + SWATCH_WIDTH, top + SWATCH_HEIGHT);
|
|
glVertex2f(left , top + SWATCH_HEIGHT);
|
|
glEnd();
|
|
|
|
glColor3f(1.0f, 1.0f, 1.0f);
|
|
textRenderer.draw(left + SWATCH_WIDTH + POPUP_MARGIN, top + FIRST_LINE_OFFSET , line1);
|
|
textRenderer.draw(left + SWATCH_WIDTH + POPUP_MARGIN, top + SECOND_LINE_OFFSET, line2);
|
|
}
|
|
else {
|
|
_swatch.checkColor();
|
|
}
|
|
|
|
if (_pieMenu.isDisplayed()) {
|
|
_pieMenu.render();
|
|
}
|
|
|
|
glPopMatrix();
|
|
}
|
|
|
|
void Application::displayStats() {
|
|
int statsVerticalOffset = 8;
|
|
|
|
char stats[200];
|
|
sprintf(stats, "%3.0f FPS, %d Pkts/sec, %3.2f Mbps ",
|
|
_fps, _packetsPerSecond, (float)_bytesPerSecond * 8.f / 1000000.f);
|
|
drawtext(10, statsVerticalOffset + 15, 0.10f, 0, 1.0, 0, stats);
|
|
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::TestPing)) {
|
|
int pingAudio = 0, pingAvatar = 0, pingVoxel = 0, pingVoxelMax = 0;
|
|
|
|
NodeList* nodeList = NodeList::getInstance();
|
|
Node* audioMixerNode = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER);
|
|
Node* avatarMixerNode = nodeList->soloNodeOfType(NODE_TYPE_AVATAR_MIXER);
|
|
|
|
pingAudio = audioMixerNode ? audioMixerNode->getPingMs() : 0;
|
|
pingAvatar = avatarMixerNode ? avatarMixerNode->getPingMs() : 0;
|
|
|
|
|
|
// Now handle voxel servers, since there could be more than one, we average their ping times
|
|
unsigned long totalPingVoxel = 0;
|
|
int voxelServerCount = 0;
|
|
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
|
if (node->getType() == NODE_TYPE_VOXEL_SERVER) {
|
|
totalPingVoxel += node->getPingMs();
|
|
voxelServerCount++;
|
|
if (pingVoxelMax < node->getPingMs()) {
|
|
pingVoxelMax = node->getPingMs();
|
|
}
|
|
}
|
|
}
|
|
if (voxelServerCount) {
|
|
pingVoxel = totalPingVoxel/voxelServerCount;
|
|
}
|
|
|
|
char pingStats[200];
|
|
sprintf(pingStats, "Ping audio/avatar/voxel: %d / %d / %d avg %d max ", pingAudio, pingAvatar, pingVoxel, pingVoxelMax);
|
|
drawtext(10, statsVerticalOffset + 35, 0.10f, 0, 1.0, 0, pingStats);
|
|
}
|
|
|
|
char avatarStats[200];
|
|
glm::vec3 avatarPos = _myAvatar.getPosition();
|
|
sprintf(avatarStats, "Avatar position: %.3f, %.3f, %.3f, yaw = %.2f", avatarPos.x, avatarPos.y, avatarPos.z, _myAvatar.getBodyYaw());
|
|
drawtext(10, statsVerticalOffset + 55, 0.10f, 0, 1.0, 0, avatarStats);
|
|
|
|
|
|
std::stringstream voxelStats;
|
|
voxelStats.precision(4);
|
|
voxelStats << "Voxels Rendered: " << _voxels.getVoxelsRendered() / 1000.f << "K Updated: " << _voxels.getVoxelsUpdated()/1000.f << "K";
|
|
drawtext(10, statsVerticalOffset + 230, 0.10f, 0, 1.0, 0, (char *)voxelStats.str().c_str());
|
|
|
|
voxelStats.str("");
|
|
char* voxelDetails = _voxelSceneStats.getItemValue(VoxelSceneStats::ITEM_VOXELS);
|
|
voxelStats << "Voxels Sent from Server: " << voxelDetails;
|
|
drawtext(10, statsVerticalOffset + 250, 0.10f, 0, 1.0, 0, (char *)voxelStats.str().c_str());
|
|
|
|
voxelStats.str("");
|
|
voxelDetails = _voxelSceneStats.getItemValue(VoxelSceneStats::ITEM_ELAPSED);
|
|
voxelStats << "Scene Send Time from Server: " << voxelDetails;
|
|
drawtext(10, statsVerticalOffset + 270, 0.10f, 0, 1.0, 0, (char *)voxelStats.str().c_str());
|
|
|
|
voxelStats.str("");
|
|
voxelDetails = _voxelSceneStats.getItemValue(VoxelSceneStats::ITEM_ENCODE);
|
|
voxelStats << "Encode Time on Server: " << voxelDetails;
|
|
drawtext(10, statsVerticalOffset + 290, 0.10f, 0, 1.0, 0, (char *)voxelStats.str().c_str());
|
|
|
|
voxelStats.str("");
|
|
voxelDetails = _voxelSceneStats.getItemValue(VoxelSceneStats::ITEM_MODE);
|
|
voxelStats << "Sending Mode: " << voxelDetails;
|
|
drawtext(10, statsVerticalOffset + 310, 0.10f, 0, 1.0, 0, (char *)voxelStats.str().c_str());
|
|
|
|
Node *avatarMixer = NodeList::getInstance()->soloNodeOfType(NODE_TYPE_AVATAR_MIXER);
|
|
char avatarMixerStats[200];
|
|
|
|
if (avatarMixer) {
|
|
sprintf(avatarMixerStats, "Avatar Mixer: %.f kbps, %.f pps",
|
|
roundf(avatarMixer->getAverageKilobitsPerSecond()),
|
|
roundf(avatarMixer->getAveragePacketsPerSecond()));
|
|
} else {
|
|
sprintf(avatarMixerStats, "No Avatar Mixer");
|
|
}
|
|
|
|
drawtext(10, statsVerticalOffset + 330, 0.10f, 0, 1.0, 0, avatarMixerStats);
|
|
drawtext(10, statsVerticalOffset + 450, 0.10f, 0, 1.0, 0, (char *)LeapManager::statusString().c_str());
|
|
|
|
if (_perfStatsOn) {
|
|
// Get the PerfStats group details. We need to allocate and array of char* long enough to hold 1+groups
|
|
char** perfStatLinesArray = new char*[PerfStat::getGroupCount()+1];
|
|
int lines = PerfStat::DumpStats(perfStatLinesArray);
|
|
int atZ = 150; // arbitrary place on screen that looks good
|
|
for (int line=0; line < lines; line++) {
|
|
drawtext(10, statsVerticalOffset + atZ, 0.10f, 0, 1.0, 0, perfStatLinesArray[line]);
|
|
delete perfStatLinesArray[line]; // we're responsible for cleanup
|
|
perfStatLinesArray[line]=NULL;
|
|
atZ+=20; // height of a line
|
|
}
|
|
delete []perfStatLinesArray; // we're responsible for cleanup
|
|
}
|
|
}
|
|
|
|
void Application::renderThrustAtVoxel(const glm::vec3& thrust) {
|
|
if (_mousePressed) {
|
|
glColor3f(1, 0, 0);
|
|
glLineWidth(2.0f);
|
|
glBegin(GL_LINES);
|
|
glm::vec3 voxelTouched = getMouseVoxelWorldCoordinates(_mouseVoxelDragging);
|
|
glVertex3f(voxelTouched.x, voxelTouched.y, voxelTouched.z);
|
|
glVertex3f(voxelTouched.x + thrust.x, voxelTouched.y + thrust.y, voxelTouched.z + thrust.z);
|
|
glEnd();
|
|
}
|
|
}
|
|
|
|
void Application::renderLineToTouchedVoxel() {
|
|
// Draw a teal line to the voxel I am currently dragging on
|
|
if (_mousePressed) {
|
|
glColor3f(0, 1, 1);
|
|
glLineWidth(2.0f);
|
|
glBegin(GL_LINES);
|
|
glm::vec3 voxelTouched = getMouseVoxelWorldCoordinates(_mouseVoxelDragging);
|
|
glVertex3f(voxelTouched.x, voxelTouched.y, voxelTouched.z);
|
|
glm::vec3 headPosition = _myAvatar.getHeadJointPosition();
|
|
glVertex3fv(&headPosition.x);
|
|
glEnd();
|
|
}
|
|
}
|
|
|
|
|
|
glm::vec2 Application::getScaledScreenPoint(glm::vec2 projectedPoint) {
|
|
float horizontalScale = _glWidget->width() / 2.0f;
|
|
float verticalScale = _glWidget->height() / 2.0f;
|
|
|
|
// -1,-1 is 0,windowHeight
|
|
// 1,1 is windowWidth,0
|
|
|
|
// -1,1 1,1
|
|
// +-----------------------+
|
|
// | | |
|
|
// | | |
|
|
// | -1,0 | |
|
|
// |-----------+-----------|
|
|
// | 0,0 |
|
|
// | | |
|
|
// | | |
|
|
// | | |
|
|
// +-----------------------+
|
|
// -1,-1 1,-1
|
|
|
|
glm::vec2 screenPoint((projectedPoint.x + 1.0) * horizontalScale,
|
|
((projectedPoint.y + 1.0) * -verticalScale) + _glWidget->height());
|
|
|
|
return screenPoint;
|
|
}
|
|
|
|
// render the coverage map on screen
|
|
void Application::renderCoverageMapV2() {
|
|
|
|
//qDebug("renderCoverageMap()\n");
|
|
|
|
glDisable(GL_LIGHTING);
|
|
glLineWidth(2.0);
|
|
glBegin(GL_LINES);
|
|
glColor3f(0,1,1);
|
|
|
|
renderCoverageMapsV2Recursively(&_voxels.myCoverageMapV2);
|
|
|
|
glEnd();
|
|
glEnable(GL_LIGHTING);
|
|
}
|
|
|
|
void Application::renderCoverageMapsV2Recursively(CoverageMapV2* map) {
|
|
// render ourselves...
|
|
if (map->isCovered()) {
|
|
BoundingBox box = map->getBoundingBox();
|
|
|
|
glm::vec2 firstPoint = getScaledScreenPoint(box.getVertex(0));
|
|
glm::vec2 lastPoint(firstPoint);
|
|
|
|
for (int i = 1; i < box.getVertexCount(); i++) {
|
|
glm::vec2 thisPoint = getScaledScreenPoint(box.getVertex(i));
|
|
|
|
glVertex2f(lastPoint.x, lastPoint.y);
|
|
glVertex2f(thisPoint.x, thisPoint.y);
|
|
lastPoint = thisPoint;
|
|
}
|
|
|
|
glVertex2f(lastPoint.x, lastPoint.y);
|
|
glVertex2f(firstPoint.x, firstPoint.y);
|
|
} else {
|
|
// iterate our children and call render on them.
|
|
for (int i = 0; i < CoverageMapV2::NUMBER_OF_CHILDREN; i++) {
|
|
CoverageMapV2* childMap = map->getChild(i);
|
|
if (childMap) {
|
|
renderCoverageMapsV2Recursively(childMap);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// render the coverage map on screen
|
|
void Application::renderCoverageMap() {
|
|
|
|
//qDebug("renderCoverageMap()\n");
|
|
|
|
glDisable(GL_LIGHTING);
|
|
glLineWidth(2.0);
|
|
glBegin(GL_LINES);
|
|
glColor3f(0,0,1);
|
|
|
|
renderCoverageMapsRecursively(&_voxels.myCoverageMap);
|
|
|
|
glEnd();
|
|
glEnable(GL_LIGHTING);
|
|
}
|
|
|
|
void Application::renderCoverageMapsRecursively(CoverageMap* map) {
|
|
for (int i = 0; i < map->getPolygonCount(); i++) {
|
|
|
|
VoxelProjectedPolygon* polygon = map->getPolygon(i);
|
|
|
|
if (polygon->getProjectionType() == (PROJECTION_RIGHT | PROJECTION_NEAR | PROJECTION_BOTTOM)) {
|
|
glColor3f(.5,0,0); // dark red
|
|
} else if (polygon->getProjectionType() == (PROJECTION_NEAR | PROJECTION_RIGHT)) {
|
|
glColor3f(.5,.5,0); // dark yellow
|
|
} else if (polygon->getProjectionType() == (PROJECTION_NEAR | PROJECTION_LEFT)) {
|
|
glColor3f(.5,.5,.5); // gray
|
|
} else if (polygon->getProjectionType() == (PROJECTION_NEAR | PROJECTION_LEFT | PROJECTION_BOTTOM)) {
|
|
glColor3f(.5,0,.5); // dark magenta
|
|
} else if (polygon->getProjectionType() == (PROJECTION_NEAR | PROJECTION_BOTTOM)) {
|
|
glColor3f(.75,0,0); // red
|
|
} else if (polygon->getProjectionType() == (PROJECTION_NEAR | PROJECTION_TOP)) {
|
|
glColor3f(1,0,1); // magenta
|
|
} else if (polygon->getProjectionType() == (PROJECTION_NEAR | PROJECTION_LEFT | PROJECTION_TOP)) {
|
|
glColor3f(0,0,1); // Blue
|
|
} else if (polygon->getProjectionType() == (PROJECTION_NEAR | PROJECTION_RIGHT | PROJECTION_TOP)) {
|
|
glColor3f(0,1,0); // green
|
|
} else if (polygon->getProjectionType() == (PROJECTION_NEAR)) {
|
|
glColor3f(1,1,0); // yellow
|
|
} else if (polygon->getProjectionType() == (PROJECTION_FAR | PROJECTION_RIGHT | PROJECTION_BOTTOM)) {
|
|
glColor3f(0,.5,.5); // dark cyan
|
|
} else {
|
|
glColor3f(1,0,0);
|
|
}
|
|
|
|
glm::vec2 firstPoint = getScaledScreenPoint(polygon->getVertex(0));
|
|
glm::vec2 lastPoint(firstPoint);
|
|
|
|
for (int i = 1; i < polygon->getVertexCount(); i++) {
|
|
glm::vec2 thisPoint = getScaledScreenPoint(polygon->getVertex(i));
|
|
|
|
glVertex2f(lastPoint.x, lastPoint.y);
|
|
glVertex2f(thisPoint.x, thisPoint.y);
|
|
lastPoint = thisPoint;
|
|
}
|
|
|
|
glVertex2f(lastPoint.x, lastPoint.y);
|
|
glVertex2f(firstPoint.x, firstPoint.y);
|
|
}
|
|
|
|
// iterate our children and call render on them.
|
|
for (int i = 0; i < CoverageMapV2::NUMBER_OF_CHILDREN; i++) {
|
|
CoverageMap* childMap = map->getChild(i);
|
|
if (childMap) {
|
|
renderCoverageMapsRecursively(childMap);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////////////////
|
|
// renderViewFrustum()
|
|
//
|
|
// Description: this will render the view frustum bounds for EITHER the head
|
|
// or the "myCamera".
|
|
//
|
|
// Frustum rendering mode. For debug purposes, we allow drawing the frustum in a couple of different ways.
|
|
// We can draw it with each of these parts:
|
|
// * Origin Direction/Up/Right vectors - these will be drawn at the point of the camera
|
|
// * Near plane - this plane is drawn very close to the origin point.
|
|
// * Right/Left planes - these two planes are drawn between the near and far planes.
|
|
// * Far plane - the plane is drawn in the distance.
|
|
// Modes - the following modes, will draw the following parts.
|
|
// * All - draws all the parts listed above
|
|
// * Planes - draws the planes but not the origin vectors
|
|
// * Origin Vectors - draws the origin vectors ONLY
|
|
// * Near Plane - draws only the near plane
|
|
// * Far Plane - draws only the far plane
|
|
void Application::renderViewFrustum(ViewFrustum& viewFrustum) {
|
|
// Load it with the latest details!
|
|
loadViewFrustum(_myCamera, viewFrustum);
|
|
|
|
glm::vec3 position = viewFrustum.getOffsetPosition();
|
|
glm::vec3 direction = viewFrustum.getOffsetDirection();
|
|
glm::vec3 up = viewFrustum.getOffsetUp();
|
|
glm::vec3 right = viewFrustum.getOffsetRight();
|
|
|
|
// Get ready to draw some lines
|
|
glDisable(GL_LIGHTING);
|
|
glColor4f(1.0, 1.0, 1.0, 1.0);
|
|
glLineWidth(1.0);
|
|
glBegin(GL_LINES);
|
|
|
|
if (Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_ALL
|
|
|| Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_VECTORS) {
|
|
// Calculate the origin direction vectors
|
|
glm::vec3 lookingAt = position + (direction * 0.2f);
|
|
glm::vec3 lookingAtUp = position + (up * 0.2f);
|
|
glm::vec3 lookingAtRight = position + (right * 0.2f);
|
|
|
|
// Looking At = white
|
|
glColor3f(1,1,1);
|
|
glVertex3f(position.x, position.y, position.z);
|
|
glVertex3f(lookingAt.x, lookingAt.y, lookingAt.z);
|
|
|
|
// Looking At Up = purple
|
|
glColor3f(1,0,1);
|
|
glVertex3f(position.x, position.y, position.z);
|
|
glVertex3f(lookingAtUp.x, lookingAtUp.y, lookingAtUp.z);
|
|
|
|
// Looking At Right = cyan
|
|
glColor3f(0,1,1);
|
|
glVertex3f(position.x, position.y, position.z);
|
|
glVertex3f(lookingAtRight.x, lookingAtRight.y, lookingAtRight.z);
|
|
}
|
|
|
|
if (Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_ALL
|
|
|| Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_PLANES
|
|
|| Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_NEAR_PLANE) {
|
|
// Drawing the bounds of the frustum
|
|
// viewFrustum.getNear plane - bottom edge
|
|
glColor3f(1,0,0);
|
|
glVertex3f(viewFrustum.getNearBottomLeft().x, viewFrustum.getNearBottomLeft().y, viewFrustum.getNearBottomLeft().z);
|
|
glVertex3f(viewFrustum.getNearBottomRight().x, viewFrustum.getNearBottomRight().y, viewFrustum.getNearBottomRight().z);
|
|
|
|
// viewFrustum.getNear plane - top edge
|
|
glVertex3f(viewFrustum.getNearTopLeft().x, viewFrustum.getNearTopLeft().y, viewFrustum.getNearTopLeft().z);
|
|
glVertex3f(viewFrustum.getNearTopRight().x, viewFrustum.getNearTopRight().y, viewFrustum.getNearTopRight().z);
|
|
|
|
// viewFrustum.getNear plane - right edge
|
|
glVertex3f(viewFrustum.getNearBottomRight().x, viewFrustum.getNearBottomRight().y, viewFrustum.getNearBottomRight().z);
|
|
glVertex3f(viewFrustum.getNearTopRight().x, viewFrustum.getNearTopRight().y, viewFrustum.getNearTopRight().z);
|
|
|
|
// viewFrustum.getNear plane - left edge
|
|
glVertex3f(viewFrustum.getNearBottomLeft().x, viewFrustum.getNearBottomLeft().y, viewFrustum.getNearBottomLeft().z);
|
|
glVertex3f(viewFrustum.getNearTopLeft().x, viewFrustum.getNearTopLeft().y, viewFrustum.getNearTopLeft().z);
|
|
}
|
|
|
|
if (Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_ALL
|
|
|| Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_PLANES
|
|
|| Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_FAR_PLANE) {
|
|
// viewFrustum.getFar plane - bottom edge
|
|
glColor3f(0,1,0); // GREEN!!!
|
|
glVertex3f(viewFrustum.getFarBottomLeft().x, viewFrustum.getFarBottomLeft().y, viewFrustum.getFarBottomLeft().z);
|
|
glVertex3f(viewFrustum.getFarBottomRight().x, viewFrustum.getFarBottomRight().y, viewFrustum.getFarBottomRight().z);
|
|
|
|
// viewFrustum.getFar plane - top edge
|
|
glVertex3f(viewFrustum.getFarTopLeft().x, viewFrustum.getFarTopLeft().y, viewFrustum.getFarTopLeft().z);
|
|
glVertex3f(viewFrustum.getFarTopRight().x, viewFrustum.getFarTopRight().y, viewFrustum.getFarTopRight().z);
|
|
|
|
// viewFrustum.getFar plane - right edge
|
|
glVertex3f(viewFrustum.getFarBottomRight().x, viewFrustum.getFarBottomRight().y, viewFrustum.getFarBottomRight().z);
|
|
glVertex3f(viewFrustum.getFarTopRight().x, viewFrustum.getFarTopRight().y, viewFrustum.getFarTopRight().z);
|
|
|
|
// viewFrustum.getFar plane - left edge
|
|
glVertex3f(viewFrustum.getFarBottomLeft().x, viewFrustum.getFarBottomLeft().y, viewFrustum.getFarBottomLeft().z);
|
|
glVertex3f(viewFrustum.getFarTopLeft().x, viewFrustum.getFarTopLeft().y, viewFrustum.getFarTopLeft().z);
|
|
}
|
|
|
|
if (Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_ALL
|
|
|| Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_PLANES) {
|
|
// RIGHT PLANE IS CYAN
|
|
// right plane - bottom edge - viewFrustum.getNear to distant
|
|
glColor3f(0,1,1);
|
|
glVertex3f(viewFrustum.getNearBottomRight().x, viewFrustum.getNearBottomRight().y, viewFrustum.getNearBottomRight().z);
|
|
glVertex3f(viewFrustum.getFarBottomRight().x, viewFrustum.getFarBottomRight().y, viewFrustum.getFarBottomRight().z);
|
|
|
|
// right plane - top edge - viewFrustum.getNear to distant
|
|
glVertex3f(viewFrustum.getNearTopRight().x, viewFrustum.getNearTopRight().y, viewFrustum.getNearTopRight().z);
|
|
glVertex3f(viewFrustum.getFarTopRight().x, viewFrustum.getFarTopRight().y, viewFrustum.getFarTopRight().z);
|
|
|
|
// LEFT PLANE IS BLUE
|
|
// left plane - bottom edge - viewFrustum.getNear to distant
|
|
glColor3f(0,0,1);
|
|
glVertex3f(viewFrustum.getNearBottomLeft().x, viewFrustum.getNearBottomLeft().y, viewFrustum.getNearBottomLeft().z);
|
|
glVertex3f(viewFrustum.getFarBottomLeft().x, viewFrustum.getFarBottomLeft().y, viewFrustum.getFarBottomLeft().z);
|
|
|
|
// left plane - top edge - viewFrustum.getNear to distant
|
|
glVertex3f(viewFrustum.getNearTopLeft().x, viewFrustum.getNearTopLeft().y, viewFrustum.getNearTopLeft().z);
|
|
glVertex3f(viewFrustum.getFarTopLeft().x, viewFrustum.getFarTopLeft().y, viewFrustum.getFarTopLeft().z);
|
|
|
|
// focal plane - bottom edge
|
|
glColor3f(1.0f, 0.0f, 1.0f);
|
|
float focalProportion = (viewFrustum.getFocalLength() - viewFrustum.getNearClip()) /
|
|
(viewFrustum.getFarClip() - viewFrustum.getNearClip());
|
|
glm::vec3 focalBottomLeft = glm::mix(viewFrustum.getNearBottomLeft(), viewFrustum.getFarBottomLeft(), focalProportion);
|
|
glm::vec3 focalBottomRight = glm::mix(viewFrustum.getNearBottomRight(),
|
|
viewFrustum.getFarBottomRight(), focalProportion);
|
|
glVertex3f(focalBottomLeft.x, focalBottomLeft.y, focalBottomLeft.z);
|
|
glVertex3f(focalBottomRight.x, focalBottomRight.y, focalBottomRight.z);
|
|
|
|
// focal plane - top edge
|
|
glm::vec3 focalTopLeft = glm::mix(viewFrustum.getNearTopLeft(), viewFrustum.getFarTopLeft(), focalProportion);
|
|
glm::vec3 focalTopRight = glm::mix(viewFrustum.getNearTopRight(), viewFrustum.getFarTopRight(), focalProportion);
|
|
glVertex3f(focalTopLeft.x, focalTopLeft.y, focalTopLeft.z);
|
|
glVertex3f(focalTopRight.x, focalTopRight.y, focalTopRight.z);
|
|
|
|
// focal plane - left edge
|
|
glVertex3f(focalBottomLeft.x, focalBottomLeft.y, focalBottomLeft.z);
|
|
glVertex3f(focalTopLeft.x, focalTopLeft.y, focalTopLeft.z);
|
|
|
|
// focal plane - right edge
|
|
glVertex3f(focalBottomRight.x, focalBottomRight.y, focalBottomRight.z);
|
|
glVertex3f(focalTopRight.x, focalTopRight.y, focalTopRight.z);
|
|
}
|
|
glEnd();
|
|
glEnable(GL_LIGHTING);
|
|
|
|
if (Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_ALL
|
|
|| Menu::getInstance()->getFrustumDrawMode() == FRUSTUM_DRAW_MODE_KEYHOLE) {
|
|
// Draw the keyhole
|
|
float keyholeRadius = viewFrustum.getKeyholeRadius();
|
|
if (keyholeRadius > 0.0f) {
|
|
glPushMatrix();
|
|
glColor4f(1, 1, 0, 1);
|
|
glTranslatef(position.x, position.y, position.z); // where we actually want it!
|
|
glutWireSphere(keyholeRadius, 20, 20);
|
|
glPopMatrix();
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::injectVoxelAddedSoundEffect() {
|
|
AudioInjector* voxelInjector = AudioInjectionManager::injectorWithCapacity(11025);
|
|
|
|
if (voxelInjector) {
|
|
voxelInjector->setPosition(glm::vec3(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z));
|
|
//voxelInjector->setBearing(-1 * _myAvatar.getAbsoluteHeadYaw());
|
|
voxelInjector->setVolume (16 * pow (_mouseVoxel.s, 2) / .0000001); //255 is max, and also default value
|
|
|
|
/* for (int i = 0; i
|
|
< 22050; i++) {
|
|
if (i % 4 == 0) {
|
|
voxelInjector->addSample(4000);
|
|
} else if (i % 4 == 1) {
|
|
voxelInjector->addSample(0);
|
|
} else if (i % 4 == 2) {
|
|
voxelInjector->addSample(-4000);
|
|
} else {
|
|
voxelInjector->addSample(0);
|
|
}
|
|
*/
|
|
|
|
const float BIG_VOXEL_MIN_SIZE = .01f;
|
|
|
|
for (int i = 0; i < 11025; i++) {
|
|
|
|
/*
|
|
A440 square wave
|
|
if (sin(i * 2 * PIE / 50)>=0) {
|
|
voxelInjector->addSample(4000);
|
|
} else {
|
|
voxelInjector->addSample(-4000);
|
|
}
|
|
*/
|
|
|
|
if (_mouseVoxel.s > BIG_VOXEL_MIN_SIZE) {
|
|
voxelInjector->addSample(20000 * sin((i * 2 * PIE) / (500 * sin((i + 1) / 200))));
|
|
} else {
|
|
voxelInjector->addSample(16000 * sin(i / (1.5 * log (_mouseVoxel.s / .0001) * ((i + 11025) / 5512.5)))); //808
|
|
}
|
|
}
|
|
|
|
//voxelInjector->addSample(32500 * sin(i/(2 * 1 * ((i+5000)/5512.5)))); //80
|
|
//voxelInjector->addSample(20000 * sin(i/(6 * (_mouseVoxel.s/.001) *((i+5512.5)/5512.5)))); //808
|
|
//voxelInjector->addSample(20000 * sin(i/(6 * ((i+5512.5)/5512.5)))); //808
|
|
//voxelInjector->addSample(4000 * sin(i * 2 * PIE /50)); //A440 sine wave
|
|
//voxelInjector->addSample(4000 * sin(i * 2 * PIE /50) * sin (i/500)); //A440 sine wave with amplitude modulation
|
|
|
|
//FM library
|
|
//voxelInjector->addSample(20000 * sin((i * 2 * PIE) /(500*sin((i+1)/200)))); //FM 1 dubstep
|
|
//voxelInjector->addSample(20000 * sin((i * 2 * PIE) /(300*sin((i+1)/5.0)))); //FM 2 flange sweep
|
|
//voxelInjector->addSample(10000 * sin((i * 2 * PIE) /(500*sin((i+1)/500.0)))); //FM 3 resonant pulse
|
|
|
|
AudioInjectionManager::threadInjector(voxelInjector);
|
|
}
|
|
}
|
|
|
|
bool Application::maybeEditVoxelUnderCursor() {
|
|
if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelAddMode)
|
|
|| Menu::getInstance()->isOptionChecked(MenuOption::VoxelColorMode)) {
|
|
if (_mouseVoxel.s != 0) {
|
|
PACKET_TYPE message = Menu::getInstance()->isOptionChecked(MenuOption::DestructiveAddVoxel)
|
|
? PACKET_TYPE_SET_VOXEL_DESTRUCTIVE
|
|
: PACKET_TYPE_SET_VOXEL;
|
|
_voxelEditSender.sendVoxelEditMessage(message, _mouseVoxel);
|
|
|
|
// create the voxel locally so it appears immediately
|
|
_voxels.createVoxel(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s,
|
|
_mouseVoxel.red, _mouseVoxel.green, _mouseVoxel.blue,
|
|
Menu::getInstance()->isOptionChecked(MenuOption::DestructiveAddVoxel));
|
|
|
|
// Implement voxel fade effect
|
|
VoxelFade fade(VoxelFade::FADE_OUT, 1.0f, 1.0f, 1.0f);
|
|
const float VOXEL_BOUNDS_ADJUST = 0.01f;
|
|
float slightlyBigger = _mouseVoxel.s * VOXEL_BOUNDS_ADJUST;
|
|
fade.voxelDetails.x = _mouseVoxel.x - slightlyBigger;
|
|
fade.voxelDetails.y = _mouseVoxel.y - slightlyBigger;
|
|
fade.voxelDetails.z = _mouseVoxel.z - slightlyBigger;
|
|
fade.voxelDetails.s = _mouseVoxel.s + slightlyBigger + slightlyBigger;
|
|
_voxelFades.push_back(fade);
|
|
|
|
// inject a sound effect
|
|
injectVoxelAddedSoundEffect();
|
|
|
|
// remember the position for drag detection
|
|
_justEditedVoxel = true;
|
|
|
|
}
|
|
} else if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelDeleteMode)) {
|
|
deleteVoxelUnderCursor();
|
|
VoxelFade fade(VoxelFade::FADE_OUT, 1.0f, 1.0f, 1.0f);
|
|
const float VOXEL_BOUNDS_ADJUST = 0.01f;
|
|
float slightlyBigger = _mouseVoxel.s * VOXEL_BOUNDS_ADJUST;
|
|
fade.voxelDetails.x = _mouseVoxel.x - slightlyBigger;
|
|
fade.voxelDetails.y = _mouseVoxel.y - slightlyBigger;
|
|
fade.voxelDetails.z = _mouseVoxel.z - slightlyBigger;
|
|
fade.voxelDetails.s = _mouseVoxel.s + slightlyBigger + slightlyBigger;
|
|
_voxelFades.push_back(fade);
|
|
|
|
} else if (Menu::getInstance()->isOptionChecked(MenuOption::VoxelGetColorMode)) {
|
|
eyedropperVoxelUnderCursor();
|
|
} else {
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void Application::deleteVoxelUnderCursor() {
|
|
if (_mouseVoxel.s != 0) {
|
|
// sending delete to the server is sufficient, server will send new version so we see updates soon enough
|
|
_voxelEditSender.sendVoxelEditMessage(PACKET_TYPE_ERASE_VOXEL, _mouseVoxel);
|
|
AudioInjector* voxelInjector = AudioInjectionManager::injectorWithCapacity(5000);
|
|
|
|
if (voxelInjector) {
|
|
voxelInjector->setPosition(glm::vec3(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z));
|
|
//voxelInjector->setBearing(0); //straight down the z axis
|
|
voxelInjector->setVolume (255); //255 is max, and also default value
|
|
|
|
|
|
for (int i = 0; i < 5000; i++) {
|
|
voxelInjector->addSample(10000 * sin((i * 2 * PIE) / (500 * sin((i + 1) / 500.0)))); //FM 3 resonant pulse
|
|
//voxelInjector->addSample(20000 * sin((i) /((4 / _mouseVoxel.s) * sin((i)/(20 * _mouseVoxel.s / .001))))); //FM 2 comb filter
|
|
}
|
|
|
|
AudioInjectionManager::threadInjector(voxelInjector);
|
|
}
|
|
}
|
|
// remember the position for drag detection
|
|
_justEditedVoxel = true;
|
|
}
|
|
|
|
void Application::eyedropperVoxelUnderCursor() {
|
|
VoxelNode* selectedNode = _voxels.getVoxelAt(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
|
|
if (selectedNode && selectedNode->isColored()) {
|
|
QColor selectedColor(selectedNode->getColor()[RED_INDEX],
|
|
selectedNode->getColor()[GREEN_INDEX],
|
|
selectedNode->getColor()[BLUE_INDEX]);
|
|
|
|
if (selectedColor.isValid()) {
|
|
QAction* voxelPaintColorAction = Menu::getInstance()->getActionForOption(MenuOption::VoxelPaintColor);
|
|
voxelPaintColorAction->setData(selectedColor);
|
|
voxelPaintColorAction->setIcon(Swatch::createIcon(selectedColor));
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::toggleFollowMode() {
|
|
glm::vec3 mouseRayOrigin, mouseRayDirection;
|
|
_viewFrustum.computePickRay(_pieMenu.getX() / (float)_glWidget->width(),
|
|
_pieMenu.getY() / (float)_glWidget->height(),
|
|
mouseRayOrigin, mouseRayDirection);
|
|
glm::vec3 eyePositionIgnored;
|
|
uint16_t nodeIDIgnored;
|
|
Avatar* leadingAvatar = isLookingAtOtherAvatar(mouseRayOrigin,
|
|
mouseRayDirection,
|
|
eyePositionIgnored,
|
|
nodeIDIgnored);
|
|
|
|
_myAvatar.follow(leadingAvatar);
|
|
}
|
|
|
|
void Application::resetSensors() {
|
|
_headMouseX = _mouseX = _glWidget->width() / 2;
|
|
_headMouseY = _mouseY = _glWidget->height() / 2;
|
|
|
|
if (_serialHeadSensor.isActive()) {
|
|
_serialHeadSensor.resetAverages();
|
|
}
|
|
_webcam.reset();
|
|
_faceshift.reset();
|
|
QCursor::setPos(_headMouseX, _headMouseY);
|
|
_myAvatar.reset();
|
|
_myTransmitter.resetLevels();
|
|
_myAvatar.setVelocity(glm::vec3(0,0,0));
|
|
_myAvatar.setThrust(glm::vec3(0,0,0));
|
|
}
|
|
|
|
static void setShortcutsEnabled(QWidget* widget, bool enabled) {
|
|
foreach (QAction* action, widget->actions()) {
|
|
QKeySequence shortcut = action->shortcut();
|
|
if (!shortcut.isEmpty() && (shortcut[0] & (Qt::CTRL | Qt::ALT | Qt::META)) == 0) {
|
|
// it's a shortcut that may coincide with a "regular" key, so switch its context
|
|
action->setShortcutContext(enabled ? Qt::WindowShortcut : Qt::WidgetShortcut);
|
|
}
|
|
}
|
|
foreach (QObject* child, widget->children()) {
|
|
if (child->isWidgetType()) {
|
|
setShortcutsEnabled(static_cast<QWidget*>(child), enabled);
|
|
}
|
|
}
|
|
}
|
|
|
|
void Application::setMenuShortcutsEnabled(bool enabled) {
|
|
setShortcutsEnabled(_window->menuBar(), enabled);
|
|
}
|
|
|
|
void Application::updateCursor() {
|
|
_glWidget->setCursor(OculusManager::isConnected() && _window->windowState().testFlag(Qt::WindowFullScreen) ?
|
|
Qt::BlankCursor : Qt::ArrowCursor);
|
|
}
|
|
|
|
void Application::attachNewHeadToNode(Node* newNode) {
|
|
if (newNode->getLinkedData() == NULL) {
|
|
newNode->setLinkedData(new Avatar(newNode));
|
|
}
|
|
}
|
|
|
|
void Application::nodeAdded(Node* node) {
|
|
}
|
|
|
|
void Application::nodeKilled(Node* node) {
|
|
if (node->getType() == NODE_TYPE_VOXEL_SERVER) {
|
|
uint16_t nodeID = node->getNodeID();
|
|
// see if this is the first we've heard of this node...
|
|
if (_voxelServerJurisdictions.find(nodeID) != _voxelServerJurisdictions.end()) {
|
|
unsigned char* rootCode = _voxelServerJurisdictions[nodeID].getRootOctalCode();
|
|
VoxelPositionSize rootDetails;
|
|
voxelDetailsForCode(rootCode, rootDetails);
|
|
|
|
printf("voxel server going away...... v[%f, %f, %f, %f]\n",
|
|
rootDetails.x, rootDetails.y, rootDetails.z, rootDetails.s);
|
|
|
|
// Add the jurisditionDetails object to the list of "fade outs"
|
|
VoxelFade fade(VoxelFade::FADE_OUT, NODE_KILLED_RED, NODE_KILLED_GREEN, NODE_KILLED_BLUE);
|
|
fade.voxelDetails = rootDetails;
|
|
const float slightly_smaller = 0.99;
|
|
fade.voxelDetails.s = fade.voxelDetails.s * slightly_smaller;
|
|
_voxelFades.push_back(fade);
|
|
}
|
|
}
|
|
}
|
|
|
|
int Application::parseVoxelStats(unsigned char* messageData, ssize_t messageLength, sockaddr senderAddress) {
|
|
|
|
// parse the incoming stats data, and stick it into our averaging stats object for now... even though this
|
|
// means mixing in stats from potentially multiple servers.
|
|
int statsMessageLength = _voxelSceneStats.unpackFromMessage(messageData, messageLength);
|
|
|
|
// But, also identify the sender, and keep track of the contained jurisdiction root for this server
|
|
Node* voxelServer = NodeList::getInstance()->nodeWithAddress(&senderAddress);
|
|
|
|
// quick fix for crash... why would voxelServer be NULL?
|
|
if (voxelServer) {
|
|
uint16_t nodeID = voxelServer->getNodeID();
|
|
|
|
VoxelPositionSize rootDetails;
|
|
voxelDetailsForCode(_voxelSceneStats.getJurisdictionRoot(), rootDetails);
|
|
|
|
// see if this is the first we've heard of this node...
|
|
if (_voxelServerJurisdictions.find(nodeID) == _voxelServerJurisdictions.end()) {
|
|
printf("stats from new voxel server... v[%f, %f, %f, %f]\n",
|
|
rootDetails.x, rootDetails.y, rootDetails.z, rootDetails.s);
|
|
|
|
// Add the jurisditionDetails object to the list of "fade outs"
|
|
VoxelFade fade(VoxelFade::FADE_OUT, NODE_ADDED_RED, NODE_ADDED_GREEN, NODE_ADDED_BLUE);
|
|
fade.voxelDetails = rootDetails;
|
|
const float slightly_smaller = 0.99;
|
|
fade.voxelDetails.s = fade.voxelDetails.s * slightly_smaller;
|
|
_voxelFades.push_back(fade);
|
|
}
|
|
// store jurisdiction details for later use
|
|
// This is bit of fiddling is because JurisdictionMap assumes it is the owner of the values used to construct it
|
|
// but VoxelSceneStats thinks it's just returning a reference to it's contents. So we need to make a copy of the
|
|
// details from the VoxelSceneStats to construct the JurisdictionMap
|
|
JurisdictionMap jurisdictionMap;
|
|
jurisdictionMap.copyContents(_voxelSceneStats.getJurisdictionRoot(), _voxelSceneStats.getJurisdictionEndNodes());
|
|
_voxelServerJurisdictions[nodeID] = jurisdictionMap;
|
|
}
|
|
return statsMessageLength;
|
|
}
|
|
|
|
// Receive packets from other nodes/servers and decide what to do with them!
|
|
void* Application::networkReceive(void* args) {
|
|
sockaddr senderAddress;
|
|
ssize_t bytesReceived;
|
|
|
|
Application* app = Application::getInstance();
|
|
while (!app->_stopNetworkReceiveThread) {
|
|
if (NodeList::getInstance()->getNodeSocket()->receive(&senderAddress, app->_incomingPacket, &bytesReceived)) {
|
|
|
|
app->_packetCount++;
|
|
app->_bytesCount += bytesReceived;
|
|
|
|
if (packetVersionMatch(app->_incomingPacket)) {
|
|
// only process this packet if we have a match on the packet version
|
|
switch (app->_incomingPacket[0]) {
|
|
case PACKET_TYPE_TRANSMITTER_DATA_V2:
|
|
// V2 = IOS transmitter app
|
|
app->_myTransmitter.processIncomingData(app->_incomingPacket, bytesReceived);
|
|
|
|
break;
|
|
case PACKET_TYPE_MIXED_AUDIO:
|
|
app->_audio.addReceivedAudioToBuffer(app->_incomingPacket, bytesReceived);
|
|
break;
|
|
case PACKET_TYPE_VOXEL_DATA:
|
|
case PACKET_TYPE_VOXEL_DATA_MONOCHROME:
|
|
case PACKET_TYPE_Z_COMMAND:
|
|
case PACKET_TYPE_ERASE_VOXEL:
|
|
case PACKET_TYPE_VOXEL_STATS:
|
|
case PACKET_TYPE_ENVIRONMENT_DATA: {
|
|
// add this packet to our list of voxel packets and process them on the voxel processing
|
|
app->_voxelProcessor.queueReceivedPacket(senderAddress, app->_incomingPacket, bytesReceived);
|
|
break;
|
|
}
|
|
case PACKET_TYPE_BULK_AVATAR_DATA:
|
|
NodeList::getInstance()->processBulkNodeData(&senderAddress,
|
|
app->_incomingPacket,
|
|
bytesReceived);
|
|
getInstance()->_bandwidthMeter.inputStream(BandwidthMeter::AVATARS).updateValue(bytesReceived);
|
|
break;
|
|
case PACKET_TYPE_AVATAR_VOXEL_URL:
|
|
processAvatarVoxelURLMessage(app->_incomingPacket, bytesReceived);
|
|
break;
|
|
case PACKET_TYPE_AVATAR_FACE_VIDEO:
|
|
processAvatarFaceVideoMessage(app->_incomingPacket, bytesReceived);
|
|
break;
|
|
default:
|
|
NodeList::getInstance()->processNodeData(&senderAddress, app->_incomingPacket, bytesReceived);
|
|
break;
|
|
}
|
|
}
|
|
} else if (!app->_enableNetworkThread) {
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (app->_enableNetworkThread) {
|
|
pthread_exit(0);
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
void Application::packetSentNotification(ssize_t length) {
|
|
_bandwidthMeter.outputStream(BandwidthMeter::VOXELS).updateValue(length);
|
|
}
|