mirror of
https://github.com/overte-org/overte.git
synced 2025-04-21 18:44:00 +02:00
merge upstream/master into andrew/inertia
Conflicts: interface/src/Application.cpp
This commit is contained in:
commit
3d6752ad90
37 changed files with 451 additions and 159 deletions
7
BUILD.md
7
BUILD.md
|
@ -24,9 +24,12 @@ In order for CMake to find the Qt5 find modules, you will need to set an ENV var
|
|||
|
||||
For example, a Qt5 5.2.0 installation to /usr/local/qt5 would require that QT_CMAKE_PREFIX_PATH be set with the following command. This can either be entered directly into your shell session before you build or in your shell profile (e.g.: ~/.bash_profile, ~/.bashrc, ~/.zshrc - this depends on your shell and environment).
|
||||
|
||||
export QT_CMAKE_PREFIX_PATH=/usr/local/qt/5.2.0/clang_64/lib/cmake/
|
||||
The path it needs to be set to will depend on where and how Qt5 was installed. e.g.
|
||||
|
||||
export QT_CMAKE_PREFIX_PATH=/usr/local/qt/5.2.0/clang_64/lib/cmake/
|
||||
export QT_CMAKE_PREFIX_PATH=/usr/local/Cellar/qt5/5.2.1/lib/cmake
|
||||
export QT_CMAKE_PREFIX_PATH=/usr/local/opt/qt5/lib/cmake
|
||||
|
||||
The path it needs to be set to will depend on where and how Qt5 was installed.
|
||||
|
||||
####Generating build files
|
||||
Create a build directory in the root of your checkout and then run the CMake build from there. This will keep the rest of the directory clean.
|
||||
|
|
|
@ -32,14 +32,6 @@ set(CMAKE_INCLUDE_CURRENT_DIR ON)
|
|||
# Instruct CMake to run moc automatically when needed.
|
||||
set(CMAKE_AUTOMOC ON)
|
||||
|
||||
if (APPLE)
|
||||
exec_program(uname ARGS -v OUTPUT_VARIABLE DARWIN_VERSION)
|
||||
string(REGEX MATCH "[0-9]+" DARWIN_VERSION ${DARWIN_VERSION})
|
||||
if (DARWIN_VERSION GREATER 12)
|
||||
set(CMAKE_CXX_FLAGS "-stdlib=libstdc++")
|
||||
endif (DARWIN_VERSION GREATER 12)
|
||||
endif (APPLE)
|
||||
|
||||
# targets not supported on windows
|
||||
if (NOT WIN32)
|
||||
add_subdirectory(animation-server)
|
||||
|
|
|
@ -138,11 +138,14 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
|||
// this was a used buffer, push the output pointer forwards
|
||||
PositionalAudioRingBuffer* audioBuffer = *i;
|
||||
|
||||
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 100;
|
||||
|
||||
if (audioBuffer->willBeAddedToMix()) {
|
||||
audioBuffer->shiftReadPosition(audioBuffer->getSamplesPerFrame());
|
||||
audioBuffer->setWillBeAddedToMix(false);
|
||||
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector
|
||||
&& audioBuffer->hasStarted() && audioBuffer->isStarved()) {
|
||||
&& audioBuffer->hasStarted() && audioBuffer->isStarved()
|
||||
&& audioBuffer->getConsecutiveNotMixedCount() > INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD) {
|
||||
// this is an empty audio buffer that has starved, safe to delete
|
||||
// also delete its sequence number stats
|
||||
QUuid streamIdentifier = ((InjectedAudioRingBuffer*)audioBuffer)->getStreamIdentifier();
|
||||
|
|
|
@ -87,13 +87,14 @@ void MetavoxelServer::sendDeltas() {
|
|||
int elapsed = now - _lastSend;
|
||||
_lastSend = now;
|
||||
|
||||
_sendTimer.start(qMax(0, 2 * SEND_INTERVAL - elapsed));
|
||||
_sendTimer.start(qMax(0, 2 * SEND_INTERVAL - qMax(elapsed, SEND_INTERVAL)));
|
||||
}
|
||||
|
||||
MetavoxelSession::MetavoxelSession(const SharedNodePointer& node, MetavoxelServer* server) :
|
||||
Endpoint(node, new PacketRecord(), NULL),
|
||||
_server(server),
|
||||
_reliableDeltaChannel(NULL) {
|
||||
_reliableDeltaChannel(NULL),
|
||||
_reliableDeltaID(0) {
|
||||
|
||||
connect(&_sequencer, SIGNAL(receivedHighPriorityMessage(const QVariant&)), SLOT(handleMessage(const QVariant&)));
|
||||
connect(&_sequencer, SIGNAL(sendAcknowledged(int)), SLOT(checkReliableDeltaReceived()));
|
||||
|
@ -108,9 +109,7 @@ void MetavoxelSession::update() {
|
|||
}
|
||||
// if we're sending a reliable delta, wait until it's acknowledged
|
||||
if (_reliableDeltaChannel) {
|
||||
Bitstream& out = _sequencer.startPacket();
|
||||
out << QVariant::fromValue(MetavoxelDeltaPendingMessage());
|
||||
_sequencer.endPacket();
|
||||
sendPacketGroup();
|
||||
return;
|
||||
}
|
||||
Bitstream& out = _sequencer.startPacket();
|
||||
|
@ -134,12 +133,16 @@ void MetavoxelSession::update() {
|
|||
|
||||
// go back to the beginning with the current packet and note that there's a delta pending
|
||||
_sequencer.getOutputStream().getUnderlying().device()->seek(start);
|
||||
out << QVariant::fromValue(MetavoxelDeltaPendingMessage());
|
||||
MetavoxelDeltaPendingMessage msg = { ++_reliableDeltaID };
|
||||
out << QVariant::fromValue(msg);
|
||||
_sequencer.endPacket();
|
||||
|
||||
} else {
|
||||
_sequencer.endPacket();
|
||||
}
|
||||
|
||||
// perhaps send additional packets to fill out the group
|
||||
sendPacketGroup(1);
|
||||
}
|
||||
|
||||
void MetavoxelSession::handleMessage(const QVariant& message, Bitstream& in) {
|
||||
|
@ -176,3 +179,17 @@ void MetavoxelSession::checkReliableDeltaReceived() {
|
|||
_reliableDeltaData = MetavoxelData();
|
||||
_reliableDeltaChannel = NULL;
|
||||
}
|
||||
|
||||
void MetavoxelSession::sendPacketGroup(int alreadySent) {
|
||||
int additionalPackets = _sequencer.notePacketGroup() - alreadySent;
|
||||
for (int i = 0; i < additionalPackets; i++) {
|
||||
Bitstream& out = _sequencer.startPacket();
|
||||
if (_reliableDeltaChannel) {
|
||||
MetavoxelDeltaPendingMessage msg = { _reliableDeltaID };
|
||||
out << QVariant::fromValue(msg);
|
||||
} else {
|
||||
out << QVariant();
|
||||
}
|
||||
_sequencer.endPacket();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,6 +74,8 @@ private slots:
|
|||
|
||||
private:
|
||||
|
||||
void sendPacketGroup(int alreadySent = 0);
|
||||
|
||||
MetavoxelServer* _server;
|
||||
|
||||
MetavoxelLOD _lod;
|
||||
|
@ -83,6 +85,7 @@ private:
|
|||
MetavoxelData _reliableDeltaData;
|
||||
MetavoxelLOD _reliableDeltaLOD;
|
||||
Bitstream::WriteMappings _reliableDeltaWriteMappings;
|
||||
int _reliableDeltaID;
|
||||
};
|
||||
|
||||
#endif // hifi_MetavoxelServer_h
|
||||
|
|
|
@ -26,8 +26,8 @@ else ()
|
|||
set(RTMIDI_SEARCH_DIRS "${RTMIDI_ROOT_DIR}" "$ENV{HIFI_LIB_DIR}/rtmidi")
|
||||
|
||||
find_path(RTMIDI_INCLUDE_DIR RtMidi.h PATH_SUFFIXES include HINTS ${RTMIDI_SEARCH_DIRS})
|
||||
find_file(RTMIDI_CPP NAMES RtMidi.cpp PATH_SUFFIXES src HINTS ${RTMIDI_SEARCH_DIRS})
|
||||
find_library(RTMIDI_LIBRARY NAMES rtmidi PATH_SUFFIXES lib HINTS ${RTMIDI_SEARCH_DIRS})
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(RTMIDI DEFAULT_MSG RTMIDI_INCLUDE_DIR RTMIDI_CPP)
|
||||
find_package_handle_standard_args(RTMIDI DEFAULT_MSG RTMIDI_INCLUDE_DIR RTMIDI_LIBRARY)
|
||||
endif ()
|
173
examples/clap.js
173
examples/clap.js
|
@ -1,72 +1,147 @@
|
|||
//
|
||||
// cameraExample.js
|
||||
// clap.js
|
||||
// examples
|
||||
//
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// This sample script watches your hydra hands and makes clapping sound when they come close together fast
|
||||
// This sample script watches your hydra hands and makes clapping sound when they come close together fast,
|
||||
// and also watches for the 'shift' key and claps when that key is pressed. Clapping multiple times by pressing
|
||||
// the shift key again makes the animation and sound match your pace of clapping.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
function length(v) {
|
||||
return Math.sqrt(v.x * v.x + v.y * v.y + v.z * v.z);
|
||||
}
|
||||
var clapAnimation = "https://s3-us-west-1.amazonaws.com/highfidelity-public/animations/ClapAnimations/ClapHands_Standing.fbx";
|
||||
var ANIMATION_FRAMES_PER_CLAP = 10.0;
|
||||
var startEndFrames = [];
|
||||
startEndFrames.push({ start: 0, end: 10});
|
||||
startEndFrames.push({ start: 10, end: 20});
|
||||
startEndFrames.push({ start: 20, end: 30});
|
||||
startEndFrames.push({ start: 30, end: 40});
|
||||
startEndFrames.push({ start: 41, end: 51});
|
||||
startEndFrames.push({ start: 53, end: 0});
|
||||
|
||||
var lastClapFrame = 0;
|
||||
var lastAnimFrame = 0;
|
||||
|
||||
function printVector(v) {
|
||||
print(v.x + ", " + v.y + ", " + v.z + "\n");
|
||||
}
|
||||
var claps = [];
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap1Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap2Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap3Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap4Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap5Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap6Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap7Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap8Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap9Rvb.wav"));
|
||||
claps.push(new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/claps/BClap10Rvb.wav"));
|
||||
var numberOfSounds = claps.length;
|
||||
|
||||
function vMinus(a, b) {
|
||||
var rval = { x: a.x - b.x, y: a.y - b.y, z: a.z - b.z };
|
||||
return rval;
|
||||
}
|
||||
var clappingNow = false;
|
||||
var collectedClicks = 0;
|
||||
|
||||
// First, load the clap sound from a URL
|
||||
var clap1 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/claps/clap1.raw");
|
||||
var clap2 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/claps/clap2.raw");
|
||||
var clap3 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/claps/clap3.raw");
|
||||
var clap4 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/claps/clap4.raw");
|
||||
var clap5 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/claps/clap5.raw");
|
||||
var clap6 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/claps/clap6.raw");
|
||||
|
||||
var clapping = new Array();
|
||||
clapping[0] = false;
|
||||
clapping[1] = false;
|
||||
var clickStartTime, clickEndTime;
|
||||
var clickClappingNow = false;
|
||||
var CLAP_START_RATE = 15.0;
|
||||
var clapRate = CLAP_START_RATE;
|
||||
var startedTimer = false;
|
||||
|
||||
function maybePlaySound(deltaTime) {
|
||||
// Set the location and other info for the sound to play
|
||||
var palm1Position = Controller.getSpatialControlPosition(0);
|
||||
var palm2Position = Controller.getSpatialControlPosition(2);
|
||||
var distanceBetween = length(vMinus(palm1Position, palm2Position));
|
||||
|
||||
for (var palm = 0; palm < 2; palm++) {
|
||||
var palmVelocity = Controller.getSpatialControlVelocity(palm * 2 + 1);
|
||||
var speed = length(palmVelocity);
|
||||
|
||||
const CLAP_SPEED = 0.2;
|
||||
const CLAP_DISTANCE = 0.2;
|
||||
var animationDetails = MyAvatar.getAnimationDetails(clapAnimation);
|
||||
|
||||
if (!clapping[palm] && (distanceBetween < CLAP_DISTANCE) && (speed > CLAP_SPEED)) {
|
||||
var options = new AudioInjectionOptions();
|
||||
options.position = palm1Position;
|
||||
options.volume = speed / 2.0;
|
||||
if (options.volume > 1.0) options.volume = 1.0;
|
||||
which = Math.floor((Math.random() * 6) + 1);
|
||||
if (which == 1) { Audio.playSound(clap1, options); }
|
||||
else if (which == 2) { Audio.playSound(clap2, options); }
|
||||
else if (which == 3) { Audio.playSound(clap3, options); }
|
||||
else if (which == 4) { Audio.playSound(clap4, options); }
|
||||
else if (which == 5) { Audio.playSound(clap5, options); }
|
||||
else { Audio.playSound(clap6, options); }
|
||||
Audio.playSound(clap, options);
|
||||
clapping[palm] = true;
|
||||
} else if (clapping[palm] && (speed < (CLAP_SPEED / 4.0))) {
|
||||
clapping[palm] = false;
|
||||
}
|
||||
var frame = Math.floor(animationDetails.frameIndex);
|
||||
|
||||
if (frame != lastAnimFrame) {
|
||||
lastAnimFrame = frame;
|
||||
}
|
||||
|
||||
for (var i = 0; i < startEndFrames.length; i++) {
|
||||
if (frame == startEndFrames[i].start && (frame != lastClapFrame)) {
|
||||
playClap(1.0, Camera.getPosition());
|
||||
lastClapFrame = frame;
|
||||
}
|
||||
}
|
||||
|
||||
var palm1Position = MyAvatar.getLeftPalmPosition();
|
||||
var palm2Position = MyAvatar.getRightPalmPosition();
|
||||
var distanceBetween = Vec3.length(Vec3.subtract(palm1Position, palm2Position));
|
||||
|
||||
var palm1Velocity = Controller.getSpatialControlVelocity(1);
|
||||
var palm2Velocity = Controller.getSpatialControlVelocity(3);
|
||||
var closingVelocity = Vec3.length(Vec3.subtract(palm1Velocity, palm2Velocity));
|
||||
|
||||
const CLAP_SPEED = 0.7;
|
||||
const CLAP_DISTANCE = 0.15;
|
||||
|
||||
if ((closingVelocity > CLAP_SPEED) && (distanceBetween < CLAP_DISTANCE) && !clappingNow) {
|
||||
var volume = closingVelocity / 2.0;
|
||||
if (volume > 1.0) volume = 1.0;
|
||||
playClap(volume, palm1Position);
|
||||
clappingNow = true;
|
||||
} else if (clappingNow && (distanceBetween > CLAP_DISTANCE * 1.2)) {
|
||||
clappingNow = false;
|
||||
}
|
||||
}
|
||||
|
||||
function playClap(volume, position) {
|
||||
var options = new AudioInjectionOptions();
|
||||
options.position = position;
|
||||
options.volume = 1.0;
|
||||
var clip = Math.floor(Math.random() * numberOfSounds);
|
||||
Audio.playSound(claps[clip], options);
|
||||
}
|
||||
|
||||
var FASTEST_CLAP_INTERVAL = 100.0;
|
||||
var SLOWEST_CLAP_INTERVAL = 2000.0;
|
||||
|
||||
Controller.keyPressEvent.connect(function(event) {
|
||||
if(event.text == "SHIFT") {
|
||||
if (!clickClappingNow) {
|
||||
clickClappingNow = true;
|
||||
clickStartTime = new Date();
|
||||
playClap(1.0, Camera.getPosition());
|
||||
lastClapFrame = 0;
|
||||
MyAvatar.startAnimation(clapAnimation, clapRate, 1.0, true, false);
|
||||
} else {
|
||||
// Adjust animation speed for measured clicking interval
|
||||
clickEndTime = new Date();
|
||||
var milliseconds = clickEndTime - clickStartTime;
|
||||
clickStartTime = new Date();
|
||||
if ((milliseconds < SLOWEST_CLAP_INTERVAL) && (milliseconds > FASTEST_CLAP_INTERVAL)) {
|
||||
clapRate = ANIMATION_FRAMES_PER_CLAP * (1000.0 / milliseconds);
|
||||
playClap(1.0, Camera.getPosition());
|
||||
MyAvatar.stopAnimation(clapAnimation);
|
||||
MyAvatar.startAnimation(clapAnimation, clapRate, 1.0, true, false);
|
||||
}
|
||||
collectedClicks = collectedClicks + 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
var CLAP_END_WAIT_MSECS = 300;
|
||||
Controller.keyReleaseEvent.connect(function(event) {
|
||||
if (event.text == "SHIFT") {
|
||||
collectedClicks = 0;
|
||||
if (!startedTimer) {
|
||||
collectedClicks = 0;
|
||||
Script.setTimeout(stopClapping, CLAP_END_WAIT_MSECS);
|
||||
startedTimer = true;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
function stopClapping() {
|
||||
if (collectedClicks == 0) {
|
||||
startedTimer = false;
|
||||
MyAvatar.stopAnimation(clapAnimation);
|
||||
clapRate = CLAP_START_RATE;
|
||||
clickClappingNow = false;
|
||||
} else {
|
||||
startedTimer = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Connect a call back that happens every frame
|
||||
|
|
|
@ -18,13 +18,16 @@ var RIGHT = 1;
|
|||
var lastLeftFrame = 0;
|
||||
var lastRightFrame = 0;
|
||||
|
||||
var LAST_FRAME = 11.0; // What is the number of the last frame we want to use in the animation?
|
||||
var SMOOTH_FACTOR = 0.80;
|
||||
var leftDirection = true;
|
||||
var rightDirection = true;
|
||||
|
||||
var LAST_FRAME = 15.0; // What is the number of the last frame we want to use in the animation?
|
||||
var SMOOTH_FACTOR = 0.0;
|
||||
var MAX_FRAMES = 30.0;
|
||||
|
||||
Script.update.connect(function(deltaTime) {
|
||||
var leftTriggerValue = Math.sqrt(Controller.getTriggerValue(LEFT));
|
||||
var rightTriggerValue = Math.sqrt(Controller.getTriggerValue(RIGHT));
|
||||
var leftTriggerValue = Controller.getTriggerValue(LEFT);
|
||||
var rightTriggerValue = Controller.getTriggerValue(RIGHT);
|
||||
|
||||
var leftFrame, rightFrame;
|
||||
|
||||
|
@ -32,10 +35,31 @@ Script.update.connect(function(deltaTime) {
|
|||
leftFrame = (leftTriggerValue * LAST_FRAME) * (1.0 - SMOOTH_FACTOR) + lastLeftFrame * SMOOTH_FACTOR;
|
||||
rightFrame = (rightTriggerValue * LAST_FRAME) * (1.0 - SMOOTH_FACTOR) + lastRightFrame * SMOOTH_FACTOR;
|
||||
|
||||
|
||||
if (!leftDirection) {
|
||||
leftFrame = MAX_FRAMES - leftFrame;
|
||||
}
|
||||
if (!rightDirection) {
|
||||
rightFrame = MAX_FRAMES - rightFrame;
|
||||
}
|
||||
|
||||
if ((leftTriggerValue == 1.0) && (leftDirection == true)) {
|
||||
leftDirection = false;
|
||||
lastLeftFrame = MAX_FRAMES - leftFrame;
|
||||
} else if ((leftTriggerValue == 0.0) && (leftDirection == false)) {
|
||||
leftDirection = true;
|
||||
lastLeftFrame = leftFrame;
|
||||
}
|
||||
if ((rightTriggerValue == 1.0) && (rightDirection == true)) {
|
||||
rightDirection = false;
|
||||
lastRightFrame = MAX_FRAMES - rightFrame;
|
||||
} else if ((rightTriggerValue == 0.0) && (rightDirection == false)) {
|
||||
rightDirection = true;
|
||||
lastRightFrame = rightFrame;
|
||||
}
|
||||
|
||||
if ((leftFrame != lastLeftFrame) && leftHandAnimation.length){
|
||||
MyAvatar.stopAnimation(leftHandAnimation);
|
||||
MyAvatar.startAnimation(leftHandAnimation, 30.0, 1.0, false, true, leftFrame, leftFrame);
|
||||
MyAvatar.startAnimation(leftHandAnimation, 30.0, 1.0, false, true, leftFrame, leftFrame);
|
||||
}
|
||||
if ((rightFrame != lastRightFrame) && rightHandAnimation.length) {
|
||||
MyAvatar.stopAnimation(rightHandAnimation);
|
||||
|
|
|
@ -111,16 +111,6 @@ if (APPLE)
|
|||
SET(INTERFACE_SRCS ${INTERFACE_SRCS} "${CMAKE_CURRENT_SOURCE_DIR}/interface.icns")
|
||||
endif()
|
||||
|
||||
# RtMidi for scripted MIDI control
|
||||
find_package(RtMidi)
|
||||
|
||||
if (RTMIDI_FOUND AND NOT DISABLE_RTMIDI)
|
||||
add_definitions(-DHAVE_RTMIDI)
|
||||
include_directories(SYSTEM ${RTMIDI_INCLUDE_DIR})
|
||||
|
||||
set(INTERFACE_SRCS ${INTERFACE_SRCS} "${RTMIDI_CPP}")
|
||||
endif ()
|
||||
|
||||
# create the executable, make it a bundle on OS X
|
||||
add_executable(${TARGET_NAME} MACOSX_BUNDLE ${INTERFACE_SRCS} ${QM})
|
||||
|
||||
|
@ -151,6 +141,7 @@ find_package(Sixense)
|
|||
find_package(Visage)
|
||||
find_package(ZLIB)
|
||||
find_package(Qxmpp)
|
||||
find_package(RtMidi)
|
||||
|
||||
# include the Sixense library for Razer Hydra if available
|
||||
if (SIXENSE_FOUND AND NOT DISABLE_SIXENSE)
|
||||
|
@ -223,11 +214,18 @@ if (QXMPP_FOUND AND NOT DISABLE_QXMPP)
|
|||
target_link_libraries(${TARGET_NAME} "${QXMPP_LIBRARY}")
|
||||
endif (QXMPP_FOUND AND NOT DISABLE_QXMPP)
|
||||
|
||||
# link CoreMIDI if we're using RtMidi
|
||||
if (RTMIDI_FOUND AND APPLE)
|
||||
find_library(CoreMIDI CoreMIDI)
|
||||
add_definitions(-D__MACOSX_CORE__)
|
||||
target_link_libraries(${TARGET_NAME} ${CoreMIDI})
|
||||
# and with RtMidi for RtMidi control
|
||||
if (RTMIDI_FOUND AND NOT DISABLE_RTMIDI)
|
||||
|
||||
add_definitions(-DHAVE_RTMIDI)
|
||||
include_directories(SYSTEM ${RTMIDI_INCLUDE_DIR})
|
||||
target_link_libraries(${TARGET_NAME} "${RTMIDI_LIBRARY}")
|
||||
|
||||
if (APPLE)
|
||||
find_library(CoreMIDI CoreMIDI)
|
||||
add_definitions(-D__MACOSX_CORE__)
|
||||
target_link_libraries(${TARGET_NAME} ${CoreMIDI})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# include headers for interface and InterfaceConfig.
|
||||
|
|
4
interface/external/rtmidi/readme.txt
vendored
4
interface/external/rtmidi/readme.txt
vendored
|
@ -7,7 +7,9 @@ Stephen Birarda, June 30, 2014
|
|||
|
||||
2. Copy RtMidi.h to externals/rtmidi/include.
|
||||
|
||||
3. Copy RtMidi.cpp to externals/rtmidi/src
|
||||
3. Compile the RtMidi library.
|
||||
|
||||
3. Copy either librtmidi.dylib (dynamic) or librtmidi.a (static) to externals/rtmidi/lib
|
||||
|
||||
4. Delete your build directory, run cmake and build, and you should be all set.
|
||||
|
||||
|
|
|
@ -354,6 +354,9 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
|
|||
|
||||
// Set the sixense filtering
|
||||
_sixenseManager.setFilter(Menu::getInstance()->isOptionChecked(MenuOption::FilterSixense));
|
||||
|
||||
// Set hand controller velocity filtering
|
||||
_sixenseManager.setLowVelocityFilter(Menu::getInstance()->isOptionChecked(MenuOption::LowVelocityFilter));
|
||||
|
||||
checkVersion();
|
||||
|
||||
|
@ -601,9 +604,19 @@ void Application::paintGL() {
|
|||
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
_myCamera.setTightness(0.0f);
|
||||
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||
_myCamera.setTargetRotation(_myAvatar->getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
|
||||
_myCamera.setTargetPosition(_myAvatar->getHead()->calculateAverageEyePosition() + glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0));
|
||||
//Only behave like a true mirror when in the OR
|
||||
if (OculusManager::isConnected()) {
|
||||
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||
_myCamera.setTargetRotation(_myAvatar->getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
|
||||
_myCamera.setTargetPosition(_myAvatar->getHead()->calculateAverageEyePosition() + glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0));
|
||||
} else {
|
||||
_myCamera.setTightness(0.0f);
|
||||
glm::vec3 eyePosition = _myAvatar->getHead()->calculateAverageEyePosition();
|
||||
float headHeight = eyePosition.y - _myAvatar->getPosition().y;
|
||||
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||
_myCamera.setTargetPosition(_myAvatar->getPosition() + glm::vec3(0, headHeight + (_raiseMirror * _myAvatar->getScale()), 0));
|
||||
_myCamera.setTargetRotation(_myAvatar->getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
|
||||
}
|
||||
}
|
||||
|
||||
// Update camera position
|
||||
|
@ -683,12 +696,9 @@ void Application::paintGL() {
|
|||
|
||||
{
|
||||
PerformanceTimer perfTimer("renderOverlay");
|
||||
//If alpha is 1, we can render directly to the screen.
|
||||
if (_applicationOverlay.getAlpha() == 1.0f) {
|
||||
_applicationOverlay.renderOverlay();
|
||||
} else {
|
||||
//Render to to texture so we can fade it
|
||||
_applicationOverlay.renderOverlay(true);
|
||||
// PrioVR will only work if renderOverlay is called, calibration is connected to Application::renderingOverlay()
|
||||
_applicationOverlay.renderOverlay(true);
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::UserInterface)) {
|
||||
_applicationOverlay.displayOverlayTexture();
|
||||
}
|
||||
}
|
||||
|
@ -713,6 +723,7 @@ void Application::resizeGL(int width, int height) {
|
|||
resetCamerasOnResizeGL(_myCamera, width, height);
|
||||
|
||||
glViewport(0, 0, width, height); // shouldn't this account for the menu???
|
||||
_applicationOverlay.resize();
|
||||
|
||||
updateProjectionMatrix();
|
||||
glLoadIdentity();
|
||||
|
@ -1010,6 +1021,9 @@ void Application::keyPressEvent(QKeyEvent* event) {
|
|||
Menu::getInstance()->triggerOption(MenuOption::FullscreenMirror);
|
||||
}
|
||||
break;
|
||||
case Qt::Key_Slash:
|
||||
Menu::getInstance()->triggerOption(MenuOption::UserInterface);
|
||||
break;
|
||||
case Qt::Key_F:
|
||||
if (isShifted) {
|
||||
Menu::getInstance()->triggerOption(MenuOption::DisplayFrustum);
|
||||
|
@ -1029,7 +1043,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
|
|||
}
|
||||
break;
|
||||
break;
|
||||
case Qt::Key_Slash:
|
||||
case Qt::Key_Percent:
|
||||
Menu::getInstance()->triggerOption(MenuOption::Stats);
|
||||
break;
|
||||
case Qt::Key_Plus:
|
||||
|
@ -1426,6 +1440,10 @@ void Application::setRenderVoxels(bool voxelRender) {
|
|||
}
|
||||
}
|
||||
|
||||
void Application::setLowVelocityFilter(bool lowVelocityFilter) {
|
||||
getSixenseManager()->setLowVelocityFilter(lowVelocityFilter);
|
||||
}
|
||||
|
||||
void Application::doKillLocalVoxels() {
|
||||
_wantToKillLocalVoxels = true;
|
||||
}
|
||||
|
@ -2778,7 +2796,8 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
|
|||
|
||||
if (!selfAvatarOnly) {
|
||||
// Render the world box
|
||||
if (whichCamera.getMode() != CAMERA_MODE_MIRROR && Menu::getInstance()->isOptionChecked(MenuOption::Stats)) {
|
||||
if (whichCamera.getMode() != CAMERA_MODE_MIRROR && Menu::getInstance()->isOptionChecked(MenuOption::Stats) &&
|
||||
Menu::getInstance()->isOptionChecked(MenuOption::UserInterface)) {
|
||||
PerformanceTimer perfTimer("worldBox");
|
||||
renderWorldBox();
|
||||
}
|
||||
|
|
|
@ -317,6 +317,7 @@ public slots:
|
|||
void nudgeVoxelsByVector(const VoxelDetail& sourceVoxel, const glm::vec3& nudgeVec);
|
||||
|
||||
void setRenderVoxels(bool renderVoxels);
|
||||
void setLowVelocityFilter(bool lowVelocityFilter);
|
||||
void doKillLocalVoxels();
|
||||
void loadDialog();
|
||||
void loadScriptURLDialog();
|
||||
|
|
|
@ -67,7 +67,7 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
|||
_proceduralAudioOutput(NULL),
|
||||
_proceduralOutputDevice(NULL),
|
||||
_inputRingBuffer(0),
|
||||
_ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO),
|
||||
_ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, false, 100),
|
||||
_isStereoInput(false),
|
||||
_averagedLatency(0.0),
|
||||
_measuredJitter(0),
|
||||
|
@ -869,14 +869,16 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
_numFramesDisplayStarve = 10;
|
||||
}
|
||||
|
||||
// if there is anything in the ring buffer, decide what to do
|
||||
if (_ringBuffer.samplesAvailable() > 0) {
|
||||
|
||||
int numNetworkOutputSamples = _ringBuffer.samplesAvailable();
|
||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
||||
|
||||
QByteArray outputBuffer;
|
||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||
int numNetworkOutputSamples;
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
|
||||
numNetworkOutputSamples = _ringBuffer.samplesAvailable();
|
||||
} else {
|
||||
int numSamplesAudioOutputRoomFor = _audioOutput->bytesFree() / sizeof(int16_t);
|
||||
numNetworkOutputSamples = std::min(_ringBuffer.samplesAvailable(), (int)(numSamplesAudioOutputRoomFor * networkOutputToOutputRatio));
|
||||
}
|
||||
|
||||
// if there is data in the ring buffer and room in the audio output, decide what to do
|
||||
if (numNetworkOutputSamples > 0) {
|
||||
|
||||
int numSamplesNeededToStartPlayback = std::min(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2),
|
||||
_ringBuffer.getSampleCapacity());
|
||||
|
@ -885,6 +887,11 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
// We are still waiting for enough samples to begin playback
|
||||
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
|
||||
} else {
|
||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
||||
|
||||
QByteArray outputBuffer;
|
||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||
|
||||
// We are either already playing back, or we have enough audio to start playing back.
|
||||
//qDebug() << "pushing " << numNetworkOutputSamples;
|
||||
_ringBuffer.setIsStarved(false);
|
||||
|
|
35
interface/src/Hair.cpp
Normal file
35
interface/src/Hair.cpp
Normal file
|
@ -0,0 +1,35 @@
|
|||
//
|
||||
// Hair.cpp
|
||||
// interface/src
|
||||
//
|
||||
// Created by Philip on June 26, 2014
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
// Creates single flexible vertlet-integrated strands that can be used for hair/fur/grass
|
||||
|
||||
#include "Hair.h"
|
||||
|
||||
#include "Util.h"
|
||||
#include "world.h"
|
||||
|
||||
|
||||
Hair::Hair() {
|
||||
qDebug() << "Creating Hair";
|
||||
}
|
||||
|
||||
void Hair::simulate(float deltaTime) {
|
||||
}
|
||||
|
||||
void Hair::render() {
|
||||
//
|
||||
// Before calling this function, translate/rotate to the origin of the owning object
|
||||
glPushMatrix();
|
||||
glColor3f(1.0f, 1.0f, 0.0f);
|
||||
glutSolidSphere(1.0f, 15, 15);
|
||||
glPopMatrix();
|
||||
}
|
||||
|
||||
|
35
interface/src/Hair.h
Normal file
35
interface/src/Hair.h
Normal file
|
@ -0,0 +1,35 @@
|
|||
//
|
||||
// Hair.h
|
||||
// interface/src
|
||||
//
|
||||
// Created by Philip on June 26, 2014
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_Hair_h
|
||||
#define hifi_Hair_h
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <SharedUtil.h>
|
||||
|
||||
#include "GeometryUtil.h"
|
||||
#include "InterfaceConfig.h"
|
||||
#include "Util.h"
|
||||
|
||||
|
||||
class Hair {
|
||||
public:
|
||||
Hair();
|
||||
void simulate(float deltaTime);
|
||||
void render();
|
||||
|
||||
private:
|
||||
|
||||
};
|
||||
|
||||
#endif // hifi_Hair_h
|
|
@ -276,6 +276,7 @@ Menu::Menu() :
|
|||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::Mirror, Qt::SHIFT | Qt::Key_H, true);
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::FullscreenMirror, Qt::Key_H, false,
|
||||
appInstance, SLOT(cameraMenuChanged()));
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::UserInterface, Qt::Key_Slash, true);
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::EnableVRMode, 0,
|
||||
false,
|
||||
|
@ -326,7 +327,7 @@ Menu::Menu() :
|
|||
|
||||
|
||||
addDisabledActionAndSeparator(viewMenu, "Stats");
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::Stats, Qt::Key_Slash);
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::Stats, Qt::Key_Percent);
|
||||
addActionToQMenuAndActionHash(viewMenu, MenuOption::Log, Qt::CTRL | Qt::Key_L, appInstance, SLOT(toggleLogDialog()));
|
||||
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::Bandwidth, 0, true);
|
||||
addActionToQMenuAndActionHash(viewMenu, MenuOption::BandwidthDetails, 0, this, SLOT(bandwidthDetails()));
|
||||
|
@ -407,9 +408,6 @@ Menu::Menu() :
|
|||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::GlowWhenSpeaking, 0, true);
|
||||
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::ChatCircling, 0, false);
|
||||
|
||||
QMenu* oculusOptionsMenu = developerMenu->addMenu("Oculus Options");
|
||||
addCheckableActionToQMenuAndActionHash(oculusOptionsMenu, MenuOption::DisplayOculusOverlays, 0, true);
|
||||
|
||||
QMenu* sixenseOptionsMenu = developerMenu->addMenu("Sixense Options");
|
||||
addCheckableActionToQMenuAndActionHash(sixenseOptionsMenu, MenuOption::SixenseMouseInput, 0, true);
|
||||
|
||||
|
@ -421,6 +419,13 @@ Menu::Menu() :
|
|||
true,
|
||||
appInstance->getSixenseManager(),
|
||||
SLOT(setFilter(bool)));
|
||||
addCheckableActionToQMenuAndActionHash(handOptionsMenu,
|
||||
MenuOption::LowVelocityFilter,
|
||||
0,
|
||||
true,
|
||||
appInstance,
|
||||
SLOT(setLowVelocityFilter(bool)));
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::DisplayHands, 0, true);
|
||||
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::DisplayHandTargets, 0, false);
|
||||
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::HandsCollideWithSelf, 0, false);
|
||||
|
@ -575,6 +580,8 @@ Menu::Menu() :
|
|||
Qt::CTRL | Qt::SHIFT | Qt::Key_U,
|
||||
false);
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, false);
|
||||
|
||||
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
||||
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
|
||||
this,
|
||||
|
|
|
@ -343,13 +343,13 @@ namespace MenuOption {
|
|||
const QString DecreaseVoxelSize = "Decrease Voxel Size";
|
||||
const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD";
|
||||
const QString DisableNackPackets = "Disable NACK Packets";
|
||||
const QString DisableQAudioOutputOverflowCheck = "Disable QAudioOutput Overflow Check";
|
||||
const QString DisplayFrustum = "Display Frustum";
|
||||
const QString DisplayHands = "Display Hands";
|
||||
const QString DisplayHandTargets = "Display Hand Targets";
|
||||
const QString DisplayModelBounds = "Display Model Bounds";
|
||||
const QString DisplayModelElementProxy = "Display Model Element Bounds";
|
||||
const QString DisplayModelElementChildProxies = "Display Model Element Children";
|
||||
const QString DisplayOculusOverlays = "Display Oculus Overlays";
|
||||
const QString DisplayTimingDetails = "Display Timing Details";
|
||||
const QString DontFadeOnVoxelServerChanges = "Don't Fade In/Out on Voxel Server Changes";
|
||||
const QString EchoLocalAudio = "Echo Local Audio";
|
||||
|
@ -367,6 +367,7 @@ namespace MenuOption {
|
|||
const QString Faceplus = "Faceplus";
|
||||
const QString Faceshift = "Faceshift";
|
||||
const QString FilterSixense = "Smooth Sixense Movement";
|
||||
const QString LowVelocityFilter = "Low Velocity Filter";
|
||||
const QString FirstPerson = "First Person";
|
||||
const QString FrameTimer = "Show Timer";
|
||||
const QString FrustumRenderMode = "Render Mode";
|
||||
|
@ -438,6 +439,7 @@ namespace MenuOption {
|
|||
const QString UploadAttachment = "Upload Attachment Model";
|
||||
const QString UploadHead = "Upload Head Model";
|
||||
const QString UploadSkeleton = "Upload Skeleton Model";
|
||||
const QString UserInterface = "User Interface";
|
||||
const QString Visage = "Visage";
|
||||
const QString VoxelMode = "Cycle Voxel Mode";
|
||||
const QString Voxels = "Voxels";
|
||||
|
|
|
@ -150,6 +150,11 @@ void Avatar::simulate(float deltaTime) {
|
|||
PerformanceTimer perfTimer("hair");
|
||||
simulateHair(deltaTime);
|
||||
}
|
||||
|
||||
foreach (Hair* hair, _hairs) {
|
||||
hair->simulate(deltaTime);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// update position by velocity, and subtract the change added earlier for gravity
|
||||
|
@ -387,6 +392,9 @@ void Avatar::renderBody(RenderMode renderMode, float glowLevel) {
|
|||
getHead()->render(1.0f, modelRenderMode);
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::StringHair)) {
|
||||
renderHair();
|
||||
foreach (Hair* hair, _hairs) {
|
||||
hair->render();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -610,7 +618,6 @@ void Avatar::initializeHair() {
|
|||
|
||||
}
|
||||
}
|
||||
qDebug() << "Initialize Hair";
|
||||
}
|
||||
|
||||
bool Avatar::shouldRenderHead(const glm::vec3& cameraPosition, RenderMode renderMode) const {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include <AvatarData.h>
|
||||
|
||||
#include "Hair.h"
|
||||
#include "Hand.h"
|
||||
#include "Head.h"
|
||||
#include "InterfaceConfig.h"
|
||||
|
@ -159,6 +160,7 @@ signals:
|
|||
void collisionWithAvatar(const QUuid& myUUID, const QUuid& theirUUID, const CollisionInfo& collision);
|
||||
|
||||
protected:
|
||||
QVector<Hair*> _hairs;
|
||||
SkeletonModel _skeletonModel;
|
||||
QVector<Model*> _attachmentModels;
|
||||
float _bodyYawDelta;
|
||||
|
|
|
@ -187,6 +187,9 @@ void MyAvatar::simulate(float deltaTime) {
|
|||
PerformanceTimer perfTimer("hair");
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::StringHair)) {
|
||||
simulateHair(deltaTime);
|
||||
foreach (Hair* hair, _hairs) {
|
||||
hair->simulate(deltaTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -884,6 +887,9 @@ void MyAvatar::renderBody(RenderMode renderMode, float glowLevel) {
|
|||
getHead()->render(1.0f, modelRenderMode);
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::StringHair)) {
|
||||
renderHair();
|
||||
foreach (Hair* hair, _hairs) {
|
||||
hair->render();
|
||||
}
|
||||
}
|
||||
}
|
||||
getHand()->render(true, modelRenderMode);
|
||||
|
|
|
@ -269,7 +269,7 @@ void OculusManager::display(const glm::quat &bodyOrientation, const glm::vec3 &p
|
|||
// We only need to render the overlays to a texture once, then we just render the texture on the hemisphere
|
||||
// PrioVR will only work if renderOverlay is called, calibration is connected to Application::renderingOverlay()
|
||||
applicationOverlay.renderOverlay(true);
|
||||
const bool displayOverlays = Menu::getInstance()->isOptionChecked(MenuOption::DisplayOculusOverlays);
|
||||
const bool displayOverlays = Menu::getInstance()->isOptionChecked(MenuOption::UserInterface);
|
||||
|
||||
//Bind our framebuffer object. If we are rendering the glow effect, we let the glow effect shader take care of it
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::EnableGlowEffect)) {
|
||||
|
|
|
@ -32,6 +32,7 @@ SixenseManager::SixenseManager() {
|
|||
#ifdef HAVE_SIXENSE
|
||||
_lastMovement = 0;
|
||||
_amountMoved = glm::vec3(0.0f);
|
||||
_lowVelocityFilter = false;
|
||||
|
||||
_calibrationState = CALIBRATION_STATE_IDLE;
|
||||
// By default we assume the _neckBase (in orb frame) is as high above the orb
|
||||
|
@ -60,10 +61,8 @@ SixenseManager::~SixenseManager() {
|
|||
void SixenseManager::setFilter(bool filter) {
|
||||
#ifdef HAVE_SIXENSE
|
||||
if (filter) {
|
||||
qDebug("Sixense Filter ON");
|
||||
sixenseSetFilterEnabled(1);
|
||||
} else {
|
||||
qDebug("Sixense Filter OFF");
|
||||
sixenseSetFilterEnabled(0);
|
||||
}
|
||||
#endif
|
||||
|
@ -160,17 +159,21 @@ void SixenseManager::update(float deltaTime) {
|
|||
}
|
||||
palm->setRawVelocity(rawVelocity); // meters/sec
|
||||
|
||||
// Use a velocity sensitive filter to damp small motions and preserve large ones with
|
||||
// no latency.
|
||||
float velocityFilter = glm::clamp(1.0f - glm::length(rawVelocity), 0.0f, 1.0f);
|
||||
palm->setRawPosition(palm->getRawPosition() * velocityFilter + position * (1.0f - velocityFilter));
|
||||
|
||||
// adjustment for hydra controllers fit into hands
|
||||
float sign = (i == 0) ? -1.0f : 1.0f;
|
||||
rotation *= glm::angleAxis(sign * PI/4.0f, glm::vec3(0.0f, 0.0f, 1.0f));
|
||||
|
||||
palm->setRawRotation(safeMix(palm->getRawRotation(), rotation, 1.0f - velocityFilter));
|
||||
|
||||
if (_lowVelocityFilter) {
|
||||
// Use a velocity sensitive filter to damp small motions and preserve large ones with
|
||||
// no latency.
|
||||
float velocityFilter = glm::clamp(1.0f - glm::length(rawVelocity), 0.0f, 1.0f);
|
||||
palm->setRawPosition(palm->getRawPosition() * velocityFilter + position * (1.0f - velocityFilter));
|
||||
palm->setRawRotation(safeMix(palm->getRawRotation(), rotation, 1.0f - velocityFilter));
|
||||
} else {
|
||||
palm->setRawPosition(position);
|
||||
palm->setRawRotation(rotation);
|
||||
}
|
||||
|
||||
// use the velocity to determine whether there's any movement (if the hand isn't new)
|
||||
const float MOVEMENT_DISTANCE_THRESHOLD = 0.003f;
|
||||
_amountMoved += rawVelocity * deltaTime;
|
||||
|
|
|
@ -47,6 +47,7 @@ public:
|
|||
public slots:
|
||||
|
||||
void setFilter(bool filter);
|
||||
void setLowVelocityFilter(bool lowVelocityFilter) { _lowVelocityFilter = lowVelocityFilter; };
|
||||
|
||||
private:
|
||||
#ifdef HAVE_SIXENSE
|
||||
|
@ -80,6 +81,8 @@ private:
|
|||
bool _bumperPressed[2];
|
||||
int _oldX[2];
|
||||
int _oldY[2];
|
||||
|
||||
bool _lowVelocityFilter;
|
||||
};
|
||||
|
||||
#endif // hifi_SixenseManager_h
|
||||
|
|
|
@ -100,6 +100,7 @@ void TV3DManager::display(Camera& whichCamera) {
|
|||
// We only need to render the overlays to a texture once, then we just render the texture as a quad
|
||||
// PrioVR will only work if renderOverlay is called, calibration is connected to Application::renderingOverlay()
|
||||
applicationOverlay.renderOverlay(true);
|
||||
const bool displayOverlays = Menu::getInstance()->isOptionChecked(MenuOption::UserInterface);
|
||||
|
||||
if (glowEnabled) {
|
||||
Application::getInstance()->getGlowEffect()->prepare();
|
||||
|
@ -128,7 +129,9 @@ void TV3DManager::display(Camera& whichCamera) {
|
|||
glLoadIdentity();
|
||||
Application::getInstance()->displaySide(whichCamera);
|
||||
|
||||
applicationOverlay.displayOverlayTexture3DTV(whichCamera, _aspect, fov);
|
||||
if (displayOverlays) {
|
||||
applicationOverlay.displayOverlayTexture3DTV(whichCamera, _aspect, fov);
|
||||
}
|
||||
}
|
||||
glPopMatrix();
|
||||
glDisable(GL_SCISSOR_TEST);
|
||||
|
@ -154,7 +157,9 @@ void TV3DManager::display(Camera& whichCamera) {
|
|||
glLoadIdentity();
|
||||
Application::getInstance()->displaySide(whichCamera);
|
||||
|
||||
applicationOverlay.displayOverlayTexture3DTV(whichCamera, _aspect, fov);
|
||||
if (displayOverlays) {
|
||||
applicationOverlay.displayOverlayTexture3DTV(whichCamera, _aspect, fov);
|
||||
}
|
||||
}
|
||||
glPopMatrix();
|
||||
glDisable(GL_SCISSOR_TEST);
|
||||
|
|
|
@ -40,7 +40,6 @@ ApplicationOverlay::ApplicationOverlay() :
|
|||
_framebufferObject(NULL),
|
||||
_textureFov(DEFAULT_OCULUS_UI_ANGULAR_SIZE * RADIANS_PER_DEGREE),
|
||||
_alpha(1.0f),
|
||||
_active(true),
|
||||
_crosshairTexture(0) {
|
||||
|
||||
memset(_reticleActive, 0, sizeof(_reticleActive));
|
||||
|
@ -70,8 +69,8 @@ void ApplicationOverlay::renderOverlay(bool renderToTexture) {
|
|||
QGLWidget* glWidget = application->getGLWidget();
|
||||
MyAvatar* myAvatar = application->getAvatar();
|
||||
|
||||
//Handle fadeing and deactivation/activation of UI
|
||||
if (_active) {
|
||||
//Handle fading and deactivation/activation of UI
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::UserInterface)) {
|
||||
_alpha += FADE_SPEED;
|
||||
if (_alpha > 1.0f) {
|
||||
_alpha = 1.0f;
|
||||
|
@ -485,7 +484,8 @@ void ApplicationOverlay::renderControllerPointers() {
|
|||
if (palmData->getTrigger() == 1.0f) {
|
||||
if (!triggerPressed[index]) {
|
||||
if (bumperPressed[index]) {
|
||||
_active = !_active;
|
||||
Menu::getInstance()->setIsOptionChecked(MenuOption::UserInterface,
|
||||
!Menu::getInstance()->isOptionChecked(MenuOption::UserInterface));
|
||||
}
|
||||
triggerPressed[index] = true;
|
||||
}
|
||||
|
@ -495,7 +495,8 @@ void ApplicationOverlay::renderControllerPointers() {
|
|||
if ((controllerButtons & BUTTON_FWD)) {
|
||||
if (!bumperPressed[index]) {
|
||||
if (triggerPressed[index]) {
|
||||
_active = !_active;
|
||||
Menu::getInstance()->setIsOptionChecked(MenuOption::UserInterface,
|
||||
!Menu::getInstance()->isOptionChecked(MenuOption::UserInterface));
|
||||
}
|
||||
bumperPressed[index] = true;
|
||||
}
|
||||
|
@ -998,6 +999,14 @@ void ApplicationOverlay::renderTexturedHemisphere() {
|
|||
|
||||
}
|
||||
|
||||
void ApplicationOverlay::resize() {
|
||||
if (_framebufferObject != NULL) {
|
||||
delete _framebufferObject;
|
||||
_framebufferObject = NULL;
|
||||
}
|
||||
// _framebufferObject is recreated at the correct size the next time it is accessed via getFramebufferObject().
|
||||
}
|
||||
|
||||
QOpenGLFramebufferObject* ApplicationOverlay::getFramebufferObject() {
|
||||
if (!_framebufferObject) {
|
||||
_framebufferObject = new QOpenGLFramebufferObject(Application::getInstance()->getGLWidget()->size());
|
||||
|
|
|
@ -32,6 +32,7 @@ public:
|
|||
void displayOverlayTexture3DTV(Camera& whichCamera, float aspectRatio, float fov);
|
||||
void computeOculusPickRay(float x, float y, glm::vec3& direction) const;
|
||||
void getClickLocation(int &x, int &y) const;
|
||||
void resize();
|
||||
|
||||
// Getters
|
||||
QOpenGLFramebufferObject* getFramebufferObject();
|
||||
|
@ -68,7 +69,6 @@ private:
|
|||
float _magSizeMult[NUMBER_OF_MAGNIFIERS];
|
||||
|
||||
float _alpha;
|
||||
bool _active;
|
||||
|
||||
GLuint _crosshairTexture;
|
||||
};
|
||||
|
|
|
@ -99,7 +99,8 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::
|
|||
_listenerUnattenuatedZone(NULL),
|
||||
_desiredJitterBufferFrames(1),
|
||||
_currentJitterBufferFrames(-1),
|
||||
_dynamicJitterBuffers(dynamicJitterBuffers)
|
||||
_dynamicJitterBuffers(dynamicJitterBuffers),
|
||||
_consecutiveNotMixedCount(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -129,7 +130,7 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
|
|||
numSilentSamples = getSamplesPerFrame();
|
||||
|
||||
if (numSilentSamples > 0) {
|
||||
if (_currentJitterBufferFrames > _desiredJitterBufferFrames) {
|
||||
if (_dynamicJitterBuffers && _currentJitterBufferFrames > _desiredJitterBufferFrames) {
|
||||
// our current jitter buffer size exceeds its desired value, so ignore some silent
|
||||
// frames to get that size as close to desired as possible
|
||||
int samplesPerFrame = getSamplesPerFrame();
|
||||
|
@ -206,11 +207,12 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
|||
if (!isNotStarvedOrHasMinimumSamples(samplesPerFrame + desiredJitterBufferSamples)) {
|
||||
// if the buffer was starved, allow it to accrue at least the desired number of
|
||||
// jitter buffer frames before we start taking frames from it for mixing
|
||||
|
||||
|
||||
if (_shouldOutputStarveDebug) {
|
||||
_shouldOutputStarveDebug = false;
|
||||
}
|
||||
|
||||
_consecutiveNotMixedCount++;
|
||||
return false;
|
||||
} else if (samplesAvailable() < samplesPerFrame) {
|
||||
// if the buffer doesn't have a full frame of samples to take for mixing, it is starved
|
||||
|
@ -222,6 +224,7 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
|||
// reset our _shouldOutputStarveDebug to true so the next is printed
|
||||
_shouldOutputStarveDebug = true;
|
||||
|
||||
_consecutiveNotMixedCount++;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -231,6 +234,7 @@ bool PositionalAudioRingBuffer::shouldBeAddedToMix() {
|
|||
// minus one (since a frame will be read immediately after this) is the length of the jitter buffer
|
||||
_currentJitterBufferFrames = samplesAvailable() / samplesPerFrame - 1;
|
||||
_isStarved = false;
|
||||
_consecutiveNotMixedCount = 0;
|
||||
}
|
||||
|
||||
// since we've read data from ring buffer at least once - we've started
|
||||
|
|
|
@ -83,6 +83,8 @@ public:
|
|||
int getDesiredJitterBufferFrames() const { return _desiredJitterBufferFrames; }
|
||||
int getCurrentJitterBufferFrames() const { return _currentJitterBufferFrames; }
|
||||
|
||||
int getConsecutiveNotMixedCount() const { return _consecutiveNotMixedCount; }
|
||||
|
||||
protected:
|
||||
// disallow copying of PositionalAudioRingBuffer objects
|
||||
PositionalAudioRingBuffer(const PositionalAudioRingBuffer&);
|
||||
|
@ -107,9 +109,7 @@ protected:
|
|||
bool _dynamicJitterBuffers;
|
||||
|
||||
// extra stats
|
||||
int _starveCount;
|
||||
int _silentFramesDropped;
|
||||
|
||||
int _consecutiveNotMixedCount;
|
||||
};
|
||||
|
||||
#endif // hifi_PositionalAudioRingBuffer_h
|
||||
|
|
|
@ -79,7 +79,7 @@ ReliableChannel* DatagramSequencer::getReliableInputChannel(int index) {
|
|||
return channel;
|
||||
}
|
||||
|
||||
int DatagramSequencer::startPacketGroup(int desiredPackets) {
|
||||
int DatagramSequencer::notePacketGroup(int desiredPackets) {
|
||||
// figure out how much data we have enqueued and increase the number of packets desired
|
||||
int totalAvailable = 0;
|
||||
foreach (ReliableChannel* channel, _reliableOutputChannels) {
|
||||
|
|
|
@ -108,10 +108,10 @@ public:
|
|||
/// Returns the intput channel at the specified index, creating it if necessary.
|
||||
ReliableChannel* getReliableInputChannel(int index = 0);
|
||||
|
||||
/// Starts a packet group.
|
||||
/// Notes that we're sending a group of packets.
|
||||
/// \param desiredPackets the number of packets we'd like to write in the group
|
||||
/// \return the number of packets to write in the group
|
||||
int startPacketGroup(int desiredPackets = 1);
|
||||
int notePacketGroup(int desiredPackets = 1);
|
||||
|
||||
/// Starts a new packet for transmission.
|
||||
/// \return a reference to the Bitstream to use for writing to the packet
|
||||
|
|
|
@ -39,9 +39,12 @@ Endpoint::~Endpoint() {
|
|||
}
|
||||
|
||||
void Endpoint::update() {
|
||||
Bitstream& out = _sequencer.startPacket();
|
||||
writeUpdateMessage(out);
|
||||
_sequencer.endPacket();
|
||||
int packetsToSend = _sequencer.notePacketGroup();
|
||||
for (int i = 0; i < packetsToSend; i++) {
|
||||
Bitstream& out = _sequencer.startPacket();
|
||||
writeUpdateMessage(out);
|
||||
_sequencer.endPacket();
|
||||
}
|
||||
}
|
||||
|
||||
int Endpoint::parseData(const QByteArray& packet) {
|
||||
|
|
|
@ -87,7 +87,8 @@ void MetavoxelClientManager::updateClient(MetavoxelClient* client) {
|
|||
MetavoxelClient::MetavoxelClient(const SharedNodePointer& node, MetavoxelClientManager* manager) :
|
||||
Endpoint(node, new PacketRecord(), new PacketRecord()),
|
||||
_manager(manager),
|
||||
_reliableDeltaChannel(NULL) {
|
||||
_reliableDeltaChannel(NULL),
|
||||
_reliableDeltaID(0) {
|
||||
|
||||
connect(_sequencer.getReliableInputChannel(RELIABLE_DELTA_CHANNEL_INDEX),
|
||||
SIGNAL(receivedMessage(const QVariant&, Bitstream&)), SLOT(handleMessage(const QVariant&, Bitstream&)));
|
||||
|
@ -139,10 +140,16 @@ void MetavoxelClient::handleMessage(const QVariant& message, Bitstream& in) {
|
|||
}
|
||||
}
|
||||
} else if (userType == MetavoxelDeltaPendingMessage::Type) {
|
||||
if (!_reliableDeltaChannel) {
|
||||
// check the id to make sure this is not a delta we've already processed
|
||||
int id = message.value<MetavoxelDeltaPendingMessage>().id;
|
||||
if (id > _reliableDeltaID) {
|
||||
_reliableDeltaID = id;
|
||||
_reliableDeltaChannel = _sequencer.getReliableInputChannel(RELIABLE_DELTA_CHANNEL_INDEX);
|
||||
_reliableDeltaChannel->getBitstream().copyPersistentMappings(_sequencer.getInputStream());
|
||||
_reliableDeltaLOD = getLastAcknowledgedSendRecord()->getLOD();
|
||||
PacketRecord* receiveRecord = getLastAcknowledgedReceiveRecord();
|
||||
_remoteDataLOD = receiveRecord->getLOD();
|
||||
_remoteData = receiveRecord->getData();
|
||||
}
|
||||
} else {
|
||||
Endpoint::handleMessage(message, in);
|
||||
|
|
|
@ -74,6 +74,7 @@ private:
|
|||
|
||||
ReliableChannel* _reliableDeltaChannel;
|
||||
MetavoxelLOD _reliableDeltaLOD;
|
||||
int _reliableDeltaID;
|
||||
};
|
||||
|
||||
#endif // hifi_MetavoxelClientManager_h
|
||||
|
|
|
@ -64,6 +64,10 @@ DECLARE_STREAMABLE_METATYPE(MetavoxelDeltaMessage)
|
|||
/// A message indicating that metavoxel delta information is being sent on a reliable channel.
|
||||
class MetavoxelDeltaPendingMessage {
|
||||
STREAMABLE
|
||||
|
||||
public:
|
||||
|
||||
STREAM int id;
|
||||
};
|
||||
|
||||
DECLARE_STREAMABLE_METATYPE(MetavoxelDeltaPendingMessage)
|
||||
|
|
|
@ -515,8 +515,6 @@ void ScriptEngine::run() {
|
|||
|
||||
qint64 now = usecTimestampNow();
|
||||
float deltaTime = (float) (now - lastUpdate) / (float) USECS_PER_SECOND;
|
||||
emit update(deltaTime);
|
||||
lastUpdate = now;
|
||||
|
||||
if (_engine.hasUncaughtException()) {
|
||||
int line = _engine.uncaughtExceptionLineNumber();
|
||||
|
@ -524,6 +522,9 @@ void ScriptEngine::run() {
|
|||
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + _engine.uncaughtException().toString());
|
||||
_engine.clearExceptions();
|
||||
}
|
||||
|
||||
emit update(deltaTime);
|
||||
lastUpdate = now;
|
||||
}
|
||||
emit scriptEnding();
|
||||
|
||||
|
|
|
@ -647,7 +647,8 @@ TestEndpoint::TestEndpoint(Mode mode) :
|
|||
_mode(mode),
|
||||
_highPriorityMessagesToSend(0.0f),
|
||||
_reliableMessagesToSend(0.0f),
|
||||
_reliableDeltaChannel(NULL) {
|
||||
_reliableDeltaChannel(NULL),
|
||||
_reliableDeltaID(0) {
|
||||
|
||||
connect(&_sequencer, SIGNAL(receivedHighPriorityMessage(const QVariant&)),
|
||||
SLOT(handleHighPriorityMessage(const QVariant&)));
|
||||
|
@ -858,7 +859,7 @@ bool TestEndpoint::simulate(int iterationNumber) {
|
|||
bytesReceived += datagram.size();
|
||||
_remainingPipelineCapacity += datagram.size();
|
||||
}
|
||||
int packetCount = _sequencer.startPacketGroup();
|
||||
int packetCount = _sequencer.notePacketGroup();
|
||||
groupsSent++;
|
||||
maxPacketsPerGroup = qMax(maxPacketsPerGroup, packetCount);
|
||||
for (int i = 0; i < packetCount; i++) {
|
||||
|
@ -908,7 +909,8 @@ bool TestEndpoint::simulate(int iterationNumber) {
|
|||
// if we're sending a reliable delta, wait until it's acknowledged
|
||||
if (_reliableDeltaChannel) {
|
||||
Bitstream& out = _sequencer.startPacket();
|
||||
out << QVariant::fromValue(MetavoxelDeltaPendingMessage());
|
||||
MetavoxelDeltaPendingMessage msg = { _reliableDeltaID };
|
||||
out << QVariant::fromValue(msg);
|
||||
_sequencer.endPacket();
|
||||
return false;
|
||||
}
|
||||
|
@ -932,7 +934,8 @@ bool TestEndpoint::simulate(int iterationNumber) {
|
|||
_reliableDeltaLOD = _lod;
|
||||
|
||||
_sequencer.getOutputStream().getUnderlying().device()->seek(start);
|
||||
out << QVariant::fromValue(MetavoxelDeltaPendingMessage());
|
||||
MetavoxelDeltaPendingMessage msg = { ++_reliableDeltaID };
|
||||
out << QVariant::fromValue(msg);
|
||||
_sequencer.endPacket();
|
||||
|
||||
} else {
|
||||
|
@ -1081,15 +1084,22 @@ void TestEndpoint::handleMessage(const QVariant& message, Bitstream& in) {
|
|||
|
||||
} else if (userType == MetavoxelDeltaMessage::Type) {
|
||||
PacketRecord* receiveRecord = getLastAcknowledgedReceiveRecord();
|
||||
_data.readDelta(receiveRecord->getData(), receiveRecord->getLOD(), in,
|
||||
_dataLOD = getLastAcknowledgedSendRecord()->getLOD());
|
||||
_remoteData.readDelta(receiveRecord->getData(), receiveRecord->getLOD(), in,
|
||||
_remoteDataLOD = getLastAcknowledgedSendRecord()->getLOD());
|
||||
in.reset();
|
||||
_data = _remoteData;
|
||||
compareMetavoxelData();
|
||||
|
||||
} else if (userType == MetavoxelDeltaPendingMessage::Type) {
|
||||
if (!_reliableDeltaChannel) {
|
||||
int id = message.value<MetavoxelDeltaPendingMessage>().id;
|
||||
if (id > _reliableDeltaID) {
|
||||
_reliableDeltaID = id;
|
||||
_reliableDeltaChannel = _sequencer.getReliableInputChannel(RELIABLE_DELTA_CHANNEL_INDEX);
|
||||
_reliableDeltaChannel->getBitstream().copyPersistentMappings(_sequencer.getInputStream());
|
||||
_reliableDeltaLOD = getLastAcknowledgedSendRecord()->getLOD();
|
||||
PacketRecord* receiveRecord = getLastAcknowledgedReceiveRecord();
|
||||
_remoteDataLOD = receiveRecord->getLOD();
|
||||
_remoteData = receiveRecord->getData();
|
||||
}
|
||||
} else if (userType == QMetaType::QVariantList) {
|
||||
foreach (const QVariant& element, message.toList()) {
|
||||
|
@ -1107,7 +1117,7 @@ PacketRecord* TestEndpoint::maybeCreateSendRecord() const {
|
|||
}
|
||||
|
||||
PacketRecord* TestEndpoint::maybeCreateReceiveRecord() const {
|
||||
return new TestReceiveRecord(_dataLOD, (_mode == METAVOXEL_SERVER_MODE) ? MetavoxelData() : _data, _remoteState);
|
||||
return new TestReceiveRecord(_remoteDataLOD, _remoteData, _remoteState);
|
||||
}
|
||||
|
||||
void TestEndpoint::handleHighPriorityMessage(const QVariant& message) {
|
||||
|
@ -1127,9 +1137,10 @@ void TestEndpoint::handleHighPriorityMessage(const QVariant& message) {
|
|||
void TestEndpoint::handleReliableMessage(const QVariant& message, Bitstream& in) {
|
||||
if (message.userType() == MetavoxelDeltaMessage::Type) {
|
||||
PacketRecord* receiveRecord = getLastAcknowledgedReceiveRecord();
|
||||
_data.readDelta(receiveRecord->getData(), receiveRecord->getLOD(), in, _dataLOD = _reliableDeltaLOD);
|
||||
_remoteData.readDelta(receiveRecord->getData(), receiveRecord->getLOD(), in, _remoteDataLOD = _reliableDeltaLOD);
|
||||
_sequencer.getInputStream().persistReadMappings(in.getAndResetReadMappings());
|
||||
in.clearPersistentMappings();
|
||||
_data = _remoteData;
|
||||
compareMetavoxelData();
|
||||
_reliableDeltaChannel = NULL;
|
||||
return;
|
||||
|
|
|
@ -79,6 +79,8 @@ private:
|
|||
|
||||
MetavoxelData _data;
|
||||
MetavoxelLOD _dataLOD;
|
||||
MetavoxelData _remoteData;
|
||||
MetavoxelLOD _remoteDataLOD;
|
||||
MetavoxelLOD _lod;
|
||||
|
||||
SharedObjectPointer _sphere;
|
||||
|
@ -104,6 +106,7 @@ private:
|
|||
MetavoxelData _reliableDeltaData;
|
||||
MetavoxelLOD _reliableDeltaLOD;
|
||||
Bitstream::WriteMappings _reliableDeltaWriteMappings;
|
||||
int _reliableDeltaID;
|
||||
};
|
||||
|
||||
/// A simple shared object.
|
||||
|
|
Loading…
Reference in a new issue