Merge: master -> panel-attachable-improvements.

This commit is contained in:
Zander Otavka 2015-08-06 16:29:38 -07:00
commit be4e827281
206 changed files with 2831 additions and 4644 deletions
BUILD.mdBUILD_WIN.mdCMakeLists.txt
cmake
domain-server/resources
examples
interface
libraries

View file

@ -5,6 +5,7 @@
* [OpenSSL](https://www.openssl.org/related/binaries.html) ~> 1.0.1m
* IMPORTANT: Using the recommended version of OpenSSL is critical to avoid security vulnerabilities.
* [VHACD](https://github.com/virneo/v-hacd)(clone this repository)(Optional)
* [zlib](http://www.zlib.net/)
####CMake External Project Dependencies

View file

@ -75,6 +75,16 @@ To prevent these problems, install OpenSSL yourself. Download the following bina
Install OpenSSL into the Windows system directory, to make sure that Qt uses the version that you've just installed, and not some other version.
####zlib
Install zlib from
[Zlib for Windows](http://gnuwin32.sourceforge.net/packages/zlib.htm)
and fix a header file, as described here:
[zlib zconf.h bug](http://sourceforge.net/p/gnuwin32/bugs/169/)
###Build High Fidelity using Visual Studio
Follow the same build steps from the CMake section of [BUILD.md](BUILD.md), but pass a different generator to CMake.

View file

@ -186,13 +186,10 @@ option(GET_POLYVOX "Get polyvox library automatically as external project" 1)
option(GET_OPENVR "Get OpenVR library automatically as external project" 1)
option(GET_BOOSTCONFIG "Get Boost-config library automatically as external project" 1)
option(GET_OGLPLUS "Get OGLplus library automatically as external project" 1)
option(GET_GLEW "Get GLEW library automatically as external project" 1)
option(USE_NSIGHT "Attempt to find the nSight libraries" 1)
if (WIN32)
option(GET_GLEW "Get GLEW library automatically as external project" 1)
endif ()
option(GET_SDL2 "Get SDL2 library automatically as external project" 0)
if (WIN32)

View file

@ -1,34 +1,34 @@
if (WIN32)
set(EXTERNAL_NAME glew)
set(EXTERNAL_NAME glew)
include(ExternalProject)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://hifi-public.s3.amazonaws.com/dependencies/glew-1.10.0-win32.zip
URL_MD5 37514e4e595a3b3dc587eee8f7e8ec2f
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
LOG_DOWNLOAD 1
)
if (ANDROID)
set(ANDROID_CMAKE_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}" "-DANDROID_NATIVE_API_LEVEL=19")
endif ()
# Hide this external target (for ide users)
set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals")
include(ExternalProject)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://hifi-public.s3.amazonaws.com/dependencies/glew_simple.zip
URL_MD5 0507dc08337a82a5e7ecbc5417f92cc1
CONFIGURE_COMMAND CMAKE_ARGS ${ANDROID_CMAKE_ARGS} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
LOG_DOWNLOAD 1
LOG_CONFIGURE 1
LOG_BUILD 1
)
ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR)
# Hide this external target (for ide users)
set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals")
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${SOURCE_DIR}/include CACHE PATH "List of glew include directories")
ExternalProject_Get_Property(${EXTERNAL_NAME} INSTALL_DIR)
if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set(_LIB_DIR ${SOURCE_DIR}/lib/Release/x64)
set(${EXTERNAL_NAME_UPPER}_DLL_PATH ${SOURCE_DIR}/bin/Release/x64 CACHE FILEPATH "Location of GLEW DLL")
else()
set(_LIB_DIR ${SOURCE_DIR}/lib/Release/Win32)
set(${EXTERNAL_NAME_UPPER}_DLL_PATH ${SOURCE_DIR}/bin/Release/Win32 CACHE FILEPATH "Location of GLEW DLL")
endif()
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${INSTALL_DIR}/include CACHE PATH "List of glew include directories")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${_LIB_DIR}/glew32.lib CACHE FILEPATH "Location of GLEW release library")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG "" CACHE FILEPATH "Location of GLEW debug library")
if (UNIX)
set(LIB_PREFIX "lib")
set(LIB_EXT "a")
elseif (WIN32)
set(LIB_EXT "lib")
endif ()
endif ()
set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG ${INSTALL_DIR}/lib/${LIB_PREFIX}glew_d.${LIB_EXT} CACHE FILEPATH "Path to glew debug library")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${INSTALL_DIR}/lib/${LIB_PREFIX}glew.${LIB_EXT} CACHE FILEPATH "Path to glew release library")

28
cmake/externals/zlib/CMakeLists.txt vendored Normal file
View file

@ -0,0 +1,28 @@
if (WIN32)
set(EXTERNAL_NAME zlib)
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
include(ExternalProject)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://zlib.net/zlib128.zip
URL_MD5 126f8676442ffbd97884eb4d6f32afb4
INSTALL_COMMAND ""
LOG_DOWNLOAD 1
)
# Hide this external target (for ide users)
set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals")
ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${SOURCE_DIR}/include CACHE PATH "List of zlib include directories")
ExternalProject_Get_Property(${EXTERNAL_NAME} BINARY_DIR)
set(${EXTERNAL_NAME_UPPER}_DLL_PATH ${BINARY_DIR}/Release CACHE FILEPATH "Location of GLEW DLL")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${BINARY_DIR}/Release/zlib.lib CACHE FILEPATH "Location of ZLib release library")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG "" CACHE FILEPATH "Location of ZLib debug library")
endif ()

View file

@ -12,7 +12,7 @@ macro(SETUP_HIFI_LIBRARY)
project(${TARGET_NAME})
# grab the implemenation and header files
file(GLOB_RECURSE LIB_SRCS "src/*.h" "src/*.cpp")
file(GLOB_RECURSE LIB_SRCS "src/*.h" "src/*.cpp" "src/*.c")
list(APPEND ${TARGET_NAME}_SRCS ${LIB_SRCS})
# create a library and set the property so it can be referenced later

View file

@ -10,11 +10,6 @@ macro(SETUP_HIFI_OPENGL)
elseif (WIN32)
add_dependency_external_projects(glew)
find_package(GLEW REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARIES} opengl32.lib)
if (USE_NSIGHT)
# try to find the Nsight package and add it to the build if we find it
find_package(NSIGHT)

View file

@ -0,0 +1,38 @@
#
# Find3DConnexionClient.cmake
#
# Once done this will define
# 3DCONNEXIONCLIENT_FOUND - system found 3DConnexion
# 3DCONNEXIONCLIENT_INCLUDE_DIRS - the 3DConnexion include directory
# 3DCONNEXIONCLIENT_LIBRARY - Link this to use 3DConnexion
#
# Created on 10/06/2015 by Marcel Verhagen
# Copyright 2015 High Fidelity, Inc.
#
# Distributed under the Apache License, Version 2.0.
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
#
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
hifi_library_search_hints("connexionclient")
if (APPLE)
find_library(3DCONNEXIONCLIENT_LIBRARIES NAMES 3DConnexionClient HINTS 3DCONNEXIONCLIENT_SEARCH_DIRS)
if(EXISTS ${3DConnexionClient})
set(3DCONNEXIONCLIENT_FOUND true)
set(3DCONNEXIONCLIENT_INCLUDE_DIRS ${3DConnexionClient})
set(3DCONNEXIONCLIENT_LIBRARY ${3DConnexionClient})
message(STATUS "Found 3DConnexion at " ${3DConnexionClient})
mark_as_advanced(3DCONNEXIONCLIENT_INCLUDE_DIR 3DCONNEXIONCLIENT_LIBRARY)
else ()
message(STATUS "Could NOT find 3DConnexionClient")
endif()
elseif (WIN32)
find_path(3DCONNEXIONCLIENT_INCLUDE_DIRS I3dMouseParams.h PATH_SUFFIXES include HINTS ${3DCONNEXIONCLIENT_SEARCH_DIRS})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(3DConnexionClient DEFAULT_MSG 3DCONNEXIONCLIENT_INCLUDE_DIRS)
mark_as_advanced(3DCONNEXIONCLIENT_INCLUDE_DIRS 3DCONNEXIONCLIENT_SEARCH_DIRS)
endif()

View file

@ -1,7 +1,7 @@
#
# FindGLEW.cmake
#
# Try to find GLEW library and include path.
# Try to find GLEW library and include path. Note that this only handles static GLEW.
# Once done this will define
#
# GLEW_FOUND
@ -18,39 +18,18 @@
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
#
if (WIN32)
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
hifi_library_search_hints("glew")
find_path(GLEW_INCLUDE_DIRS GL/glew.h PATH_SUFFIXES include HINTS ${GLEW_SEARCH_DIRS})
find_library(GLEW_LIBRARY_RELEASE glew32 PATH_SUFFIXES "lib/Release/Win32" "lib" HINTS ${GLEW_SEARCH_DIRS})
find_library(GLEW_LIBRARY_DEBUG glew32d PATH_SUFFIXES "lib/Debug/Win32" "lib" HINTS ${GLEW_SEARCH_DIRS})
find_path(GLEW_DLL_PATH glew32.dll PATH_SUFFIXES "bin/Release/Win32" HINTS ${GLEW_SEARCH_DIRS})
include(SelectLibraryConfigurations)
select_library_configurations(GLEW)
set(GLEW_LIBRARIES ${GLEW_LIBRARY})
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
hifi_library_search_hints("glew")
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GLEW DEFAULT_MSG GLEW_INCLUDE_DIRS GLEW_LIBRARIES GLEW_DLL_PATH)
add_paths_to_fixup_libs(${GLEW_DLL_PATH})
elseif (APPLE)
else ()
find_path(GLEW_INCLUDE_DIR GL/glew.h)
find_library(GLEW_LIBRARY NAMES GLEW glew32 glew glew32s PATH_SUFFIXES lib64)
find_path(GLEW_INCLUDE_DIRS GL/glew.h PATH_SUFFIXES include HINTS ${GLEW_SEARCH_DIRS})
set(GLEW_INCLUDE_DIRS ${GLEW_INCLUDE_DIR})
set(GLEW_LIBRARIES ${GLEW_LIBRARY})
find_library(GLEW_LIBRARY_RELEASE glew32 PATH_SUFFIXES "lib/Release/Win32" "lib" HINTS ${GLEW_SEARCH_DIRS})
find_library(GLEW_LIBRARY_DEBUG glew32d PATH_SUFFIXES "lib/Debug/Win32" "lib" HINTS ${GLEW_SEARCH_DIRS})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GLEW
REQUIRED_VARS GLEW_INCLUDE_DIR GLEW_LIBRARY)
include(SelectLibraryConfigurations)
select_library_configurations(GLEW)
mark_as_advanced(GLEW_INCLUDE_DIR GLEW_LIBRARY)
endif ()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GLEW DEFAULT_MSG GLEW_INCLUDE_DIRS GLEW_LIBRARIES)
message(STATUS "Found GLEW - Assuming that GLEW is static and defining GLEW_STATIC")

View file

@ -1,38 +0,0 @@
#
# FindconnexionClient.cmake
#
# Once done this will define
#
# 3DCONNEXIONCLIENT_INCLUDE_DIRS
#
# Created on 10/06/2015 by Marcel Verhagen
# Copyright 2015 High Fidelity, Inc.
#
# Distributed under the Apache License, Version 2.0.
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
#
# setup hints for 3DCONNEXIONCLIENT search
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
hifi_library_search_hints("connexionclient")
if (APPLE)
find_library(3DconnexionClient 3DconnexionClient)
if(EXISTS ${3DconnexionClient})
set(CONNEXIONCLIENT_FOUND true)
set(CONNEXIONCLIENT_INCLUDE_DIR ${3DconnexionClient})
set(CONNEXIONCLIENT_LIBRARY ${3DconnexionClient})
set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS "-weak_framework 3DconnexionClient")
message(STATUS "Found 3Dconnexion")
mark_as_advanced(CONNEXIONCLIENT_INCLUDE_DIR CONNEXIONCLIENT_LIBRARY)
endif()
endif()
if (WIN32)
find_path(CONNEXIONCLIENT_INCLUDE_DIRS I3dMouseParams.h PATH_SUFFIXES Inc HINTS ${CONNEXIONCLIENT_SEARCH_DIRS})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(connexionClient DEFAULT_MSG CONNEXIONCLIENT_INCLUDE_DIRS)
mark_as_advanced(CONNEXIONCLIENT_INCLUDE_DIRS CONNEXIONCLIENT_SEARCH_DIRS)
endif()

View file

@ -389,6 +389,10 @@
{
"value": "json",
"label": "Entity server persists data as JSON"
},
{
"value": "json.gz",
"label": "Entity server persists data as gzipped JSON"
}
],
"advanced": true

78
examples/gridTest.js Normal file
View file

@ -0,0 +1,78 @@
//
// Created by Philip Rosedale on July 28, 2015
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// Creates a rectangular grid of objects, starting at the origin and proceeding along the X/Z plane.
// Useful for testing the rendering, LOD, and octree storage aspects of the system.
//
// Note that when creating things quickly, the entity server will ignore data if we send updates too quickly.
// like Internet MTU, these rates are set by th domain operator, so in this script there is a RATE_PER_SECOND
// variable letting you set this speed. If entities are missing from the grid after a relog, this number
// being too high may be the reason.
var SIZE = 10.0;
var SEPARATION = 20.0;
var ROWS_X = 30;
var ROWS_Z = 30;
var TYPE = "Sphere"; // Right now this can be "Box" or "Model" or "Sphere"
var MODEL_URL = "https://hifi-public.s3.amazonaws.com/models/props/LowPolyIsland/CypressTreeGroup.fbx";
var MODEL_DIMENSION = { x: 33, y: 16, z: 49 };
var RATE_PER_SECOND = 1000; // The entity server will drop data if we create things too fast.
var SCRIPT_INTERVAL = 100;
var LIFETIME = 600; // By default, these entities will live in the server for 10 minutes
var addRandom = false;
var x = 0;
var z = 0;
var totalCreated = 0;
Script.setInterval(function () {
if (!Entities.serversExist() || !Entities.canRez()) {
return;
}
var numToCreate = RATE_PER_SECOND * (SCRIPT_INTERVAL / 1000.0);
for (var i = 0; i < numToCreate; i++) {
var position = { x: SIZE + (x * SEPARATION), y: SIZE, z: SIZE + (z * SEPARATION) };
if (TYPE == "Model") {
Entities.addEntity({
type: TYPE,
name: "gridTest",
modelURL: MODEL_URL,
position: position,
dimensions: MODEL_DIMENSION,
ignoreCollisions: true,
collisionsWillMove: false,
lifetime: LIFETIME
});
} else {
Entities.addEntity({
type: TYPE,
name: "gridTest",
position: position,
dimensions: { x: SIZE, y: SIZE, z: SIZE },
color: { red: x / ROWS_X * 255, green: 50, blue: z / ROWS_Z * 255 },
ignoreCollisions: true,
collisionsWillMove: false,
lifetime: LIFETIME
});
}
totalCreated++;
x++;
if (x == ROWS_X) {
x = 0;
z++;
print("Created: " + totalCreated);
}
if (z == ROWS_Z) {
Script.stop();
}
}
}, SCRIPT_INTERVAL);

View file

@ -2,7 +2,7 @@ set(TARGET_NAME interface)
project(${TARGET_NAME})
# set a default root dir for each of our optional externals if it was not passed
set(OPTIONAL_EXTERNALS "Faceshift" "Sixense" "LeapMotion" "RtMidi" "SDL2" "RSSDK" "connexionClient")
set(OPTIONAL_EXTERNALS "Faceshift" "Sixense" "LeapMotion" "RtMidi" "SDL2" "RSSDK" "3DConnexionClient")
foreach(EXTERNAL ${OPTIONAL_EXTERNALS})
string(TOUPPER ${EXTERNAL} ${EXTERNAL}_UPPERCASE)
if (NOT ${${EXTERNAL}_UPPERCASE}_ROOT_DIR)

View file

@ -0,0 +1,3 @@
The Mac version does not require any files. The 3D Connexion driver should be installed from http://www.3dconnexion.eu/service/drivers.html
For Windows the provided header file is required: include/I3dMouseParams.h

View file

@ -1,4 +0,0 @@
The mac version does not require any files here. 3D connexion should be installed from
http://www.3dconnexion.eu/service/drivers.html
For windows a header file is required Inc/I3dMouseParams.h

Binary file not shown.

View file

@ -0,0 +1,64 @@
name = defaultAvatar_full
type = body+head
scale = 1
filename = defaultAvatar_full/defaultAvatar_full.fbx
texdir = defaultAvatar_full/textures
joint = jointNeck = Head
joint = jointLeftHand = LeftHand
joint = jointRoot = Hips
joint = jointHead = HeadTop_End
joint = jointRightHand = RightHand
joint = jointLean = Spine
freeJoint = LeftArm
freeJoint = LeftForeArm
freeJoint = RightArm
freeJoint = RightForeArm
jointIndex = LeftHand = 35
jointIndex = Reye = 3
jointIndex = Hips = 10
jointIndex = LeftHandIndex1 = 36
jointIndex = LeftHandIndex2 = 37
jointIndex = LeftHandIndex3 = 38
jointIndex = LeftHandIndex4 = 39
jointIndex = LeftShoulder = 32
jointIndex = RightLeg = 12
jointIndex = Grp_blendshapes = 0
jointIndex = Leye = 4
jointIndex = headphone = 8
jointIndex = RightForeArm = 26
jointIndex = Spine = 21
jointIndex = LeftFoot = 18
jointIndex = RightToeBase = 14
jointIndex = face = 1
jointIndex = LeftToe_End = 20
jointIndex = Spine1 = 22
jointIndex = body = 9
jointIndex = Spine2 = 23
jointIndex = RightUpLeg = 11
jointIndex = top1 = 7
jointIndex = Neck = 40
jointIndex = HeadTop_End = 42
jointIndex = RightShoulder = 24
jointIndex = RightArm = 25
jointIndex = Head = 41
jointIndex = LeftLeg = 17
jointIndex = LeftForeArm = 34
jointIndex = hair = 6
jointIndex = RightHand = 27
jointIndex = LeftToeBase = 19
jointIndex = LeftUpLeg = 16
jointIndex = mouth = 2
jointIndex = RightFoot = 13
jointIndex = LeftArm = 33
jointIndex = shield = 5
jointIndex = RightHandIndex1 = 28
jointIndex = RightHandIndex2 = 29
jointIndex = RightToe_End = 15
jointIndex = RightHandIndex3 = 30
jointIndex = RightHandIndex4 = 31
ry = 0
rz = 0
tx = 0
ty = 0
tz = 0
rx = 0

Binary file not shown.

After

(image error) Size: 4.6 KiB

View file

@ -115,7 +115,7 @@
#include "devices/MIDIManager.h"
#include "devices/OculusManager.h"
#include "devices/TV3DManager.h"
#include "devices/3Dconnexion.h"
#include "devices/3DConnexionClient.h"
#include "scripting/AccountScriptingInterface.h"
#include "scripting/AudioDeviceScriptingInterface.h"
@ -641,7 +641,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
applicationUpdater->checkForUpdate();
// the 3Dconnexion device wants to be initiliazed after a window is displayed.
ConnexionClient::init();
ConnexionClient::getInstance().init();
auto& packetReceiver = nodeList->getPacketReceiver();
packetReceiver.registerListener(PacketType::DomainConnectionDenied, this, "handleDomainConnectionDeniedPacket");
@ -754,7 +754,7 @@ Application::~Application() {
Leapmotion::destroy();
RealSense::destroy();
ConnexionClient::destroy();
ConnexionClient::getInstance().destroy();
qInstallMessageHandler(NULL); // NOTE: Do this as late as possible so we continue to get our log messages
}
@ -893,14 +893,15 @@ void Application::paintGL() {
{
float ratio = ((float)QApplication::desktop()->windowHandle()->devicePixelRatio() * getRenderResolutionScale());
auto mirrorViewport = glm::ivec4(0, 0, _mirrorViewRect.width() * ratio, _mirrorViewRect.height() * ratio);
auto mirrorViewportDest = mirrorViewport;
// Flip the src and destination rect horizontally to do the mirror
auto mirrorRect = glm::ivec4(0, 0, _mirrorViewRect.width() * ratio, _mirrorViewRect.height() * ratio);
auto mirrorRectDest = glm::ivec4(mirrorRect.z, mirrorRect.y, mirrorRect.x, mirrorRect.w);
auto selfieFbo = DependencyManager::get<FramebufferCache>()->getSelfieFramebuffer();
gpu::Batch batch;
batch.setFramebuffer(selfieFbo);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0f, 0.0f, 0.0f, 0.0f));
batch.blit(primaryFbo, mirrorViewport, selfieFbo, mirrorViewportDest);
batch.blit(primaryFbo, mirrorRect, selfieFbo, mirrorRectDest);
batch.setFramebuffer(nullptr);
renderArgs._context->render(batch);
}
@ -991,8 +992,14 @@ void Application::paintGL() {
auto geometryCache = DependencyManager::get<GeometryCache>();
auto primaryFbo = DependencyManager::get<FramebufferCache>()->getPrimaryFramebufferDepthColor();
gpu::Batch batch;
batch.blit(primaryFbo, glm::ivec4(0, 0, _renderResolution.x, _renderResolution.y),
nullptr, glm::ivec4(0, 0, _glWidget->getDeviceSize().width(), _glWidget->getDeviceSize().height()));
if (renderArgs._renderMode == RenderArgs::MIRROR_RENDER_MODE) {
batch.blit(primaryFbo, glm::ivec4(0, 0, _renderResolution.x, _renderResolution.y),
nullptr, glm::ivec4(_glWidget->getDeviceSize().width(), 0, 0, _glWidget->getDeviceSize().height()));
} else {
batch.blit(primaryFbo, glm::ivec4(0, 0, _renderResolution.x, _renderResolution.y),
nullptr, glm::ivec4(0, 0, _glWidget->getDeviceSize().width(), _glWidget->getDeviceSize().height()));
}
batch.setFramebuffer(nullptr);
@ -2013,6 +2020,7 @@ void Application::setActiveFaceTracker() {
#ifdef HAVE_DDE
bool isUsingDDE = Menu::getInstance()->isOptionChecked(MenuOption::UseCamera);
Menu::getInstance()->getActionForOption(MenuOption::BinaryEyelidControl)->setVisible(isUsingDDE);
Menu::getInstance()->getActionForOption(MenuOption::CoupleEyelids)->setVisible(isUsingDDE);
Menu::getInstance()->getActionForOption(MenuOption::UseAudioForMouth)->setVisible(isUsingDDE);
Menu::getInstance()->getActionForOption(MenuOption::VelocityFilter)->setVisible(isUsingDDE);
Menu::getInstance()->getActionForOption(MenuOption::CalibrateCamera)->setVisible(isUsingDDE);

View file

@ -19,7 +19,22 @@
const int MSECS_PER_FRAME_WHEN_THROTTLED = 66;
GLCanvas::GLCanvas() : QGLWidget(QGL::NoDepthBuffer | QGL::NoStencilBuffer),
static QGLFormat& getDesiredGLFormat() {
// Specify an OpenGL 3.3 format using the Core profile.
// That is, no old-school fixed pipeline functionality
static QGLFormat glFormat;
static std::once_flag once;
std::call_once(once, [] {
glFormat.setVersion(4, 1);
glFormat.setProfile(QGLFormat::CoreProfile); // Requires >=Qt-4.8.0
glFormat.setSampleBuffers(false);
glFormat.setDepth(false);
glFormat.setStencil(false);
});
return glFormat;
}
GLCanvas::GLCanvas() : QGLWidget(getDesiredGLFormat()),
_throttleRendering(false),
_idleRenderInterval(MSECS_PER_FRAME_WHEN_THROTTLED)
{

View file

@ -29,7 +29,7 @@
#include "devices/Faceshift.h"
#include "devices/RealSense.h"
#include "devices/SixenseManager.h"
#include "devices/3Dconnexion.h"
#include "devices/3DConnexionClient.h"
#include "MainWindow.h"
#include "scripting/MenuScriptingInterface.h"
#if defined(Q_OS_MAC) || defined(Q_OS_WIN)
@ -421,6 +421,8 @@ Menu::Menu() {
faceTrackingMenu->addSeparator();
QAction* binaryEyelidControl = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::BinaryEyelidControl, 0, true);
binaryEyelidControl->setVisible(true); // DDE face tracking is on by default
QAction* coupleEyelids = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::CoupleEyelids, 0, true);
coupleEyelids->setVisible(true); // DDE face tracking is on by default
QAction* useAudioForMouth = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::UseAudioForMouth, 0, true);
useAudioForMouth->setVisible(true); // DDE face tracking is on by default
QAction* ddeFiltering = addCheckableActionToQMenuAndActionHash(faceTrackingMenu, MenuOption::VelocityFilter, 0, true);

View file

@ -165,6 +165,7 @@ namespace MenuOption {
const QString ControlWithSpeech = "Control With Speech";
const QString CopyAddress = "Copy Address to Clipboard";
const QString CopyPath = "Copy Path to Clipboard";
const QString CoupleEyelids = "Couple Eyelids";
const QString DebugAmbientOcclusion = "Debug Ambient Occlusion";
const QString DecreaseAvatarSize = "Decrease Avatar Size";
const QString DeleteBookmark = "Delete Bookmark...";

View file

@ -137,7 +137,7 @@ void AudioScope::render(RenderArgs* renderArgs, int width, int height) {
batch.setProjectionTransform(legacyProjection);
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch._glLineWidth(1.0f); // default
geometryCache->renderQuad(batch, x, y, w, h, backgroundColor, _audioScopeBackground);
geometryCache->renderGrid(batch, x, y, w, h, gridRows, gridCols, gridColor, _audioScopeGrid);
renderLineStrip(batch, _inputID, inputColor, x, y, _samplesPerScope, _scopeInputOffset, _scopeInput);

View file

@ -936,8 +936,14 @@ void Avatar::setFaceModelURL(const QUrl& faceModelURL) {
void Avatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
AvatarData::setSkeletonModelURL(skeletonModelURL);
const QUrl DEFAULT_FULL_MODEL_URL = QUrl::fromLocalFile(PathUtils::resourcesPath() + "meshes/defaultAvatar_full.fst");
const QUrl DEFAULT_SKELETON_MODEL_URL = QUrl::fromLocalFile(PathUtils::resourcesPath() + "meshes/defaultAvatar_body.fst");
_skeletonModel.setURL(_skeletonModelURL, DEFAULT_SKELETON_MODEL_URL, true, !isMyAvatar());
if (isMyAvatar()) {
_skeletonModel.setURL(_skeletonModelURL,
getUseFullAvatar() ? DEFAULT_FULL_MODEL_URL : DEFAULT_SKELETON_MODEL_URL, true, !isMyAvatar());
} else {
_skeletonModel.setURL(_skeletonModelURL, DEFAULT_SKELETON_MODEL_URL, true, !isMyAvatar());
}
}
void Avatar::setAttachmentData(const QVector<AttachmentData>& attachmentData) {

View file

@ -150,6 +150,7 @@ public:
Q_INVOKABLE glm::vec3 getAngularVelocity() const { return _angularVelocity; }
Q_INVOKABLE glm::vec3 getAngularAcceleration() const { return _angularAcceleration; }
virtual bool getUseFullAvatar() const { return false; }
/// Scales a world space position vector relative to the avatar position and scale
/// \param vector position to be scaled. Will store the result
@ -169,9 +170,6 @@ public:
void setMotionState(AvatarMotionState* motionState) { _motionState = motionState; }
AvatarMotionState* getMotionState() { return _motionState; }
signals:
void collisionWithAvatar(const QUuid& myUUID, const QUuid& theirUUID, const CollisionInfo& collision);
protected:
SkeletonModel _skeletonModel;
glm::vec3 _skeletonOffset;

View file

@ -397,7 +397,7 @@ void Head::renderLookatVectors(RenderArgs* renderArgs, glm::vec3 leftEyePosition
auto& batch = *renderArgs->_batch;
auto transform = Transform{};
batch.setModelTransform(transform);
batch._glLineWidth(2.0f);
// FIXME: THe line width of 2.0f is not supported anymore, we ll need a workaround
auto deferredLighting = DependencyManager::get<DeferredLightingEffect>();
deferredLighting->bindSimpleProgram(batch);

View file

@ -107,7 +107,7 @@ JointReferential::JointReferential(Referential* referential, EntityTree* tree, A
EntityItemPointer item = _tree->findEntityByID(_entityID);
const Model* model = getModel(item);
if (!isValid() || model == NULL || _jointIndex >= (uint32_t)(model->getJointStateCount())) {
if (isValid() && model != NULL && _jointIndex < (uint32_t)(model->getJointStateCount())) {
_lastRefDimension = item->getDimensions();
model->getJointRotationInWorldFrame(_jointIndex, _refRotation);
model->getJointPositionInWorldFrame(_jointIndex, _refPosition);

View file

@ -29,7 +29,6 @@
#include <udt/PacketHeaders.h>
#include <PathUtils.h>
#include <PerfStat.h>
#include <ShapeCollider.h>
#include <SharedUtil.h>
#include <TextRenderer3D.h>
#include <UserActivityLogger.h>
@ -48,9 +47,6 @@
#include "Util.h"
#include "InterfaceLogging.h"
#include "gpu/GLBackend.h"
using namespace std;
const glm::vec3 DEFAULT_UP_DIRECTION(0.0f, 1.0f, 0.0f);
@ -104,7 +100,6 @@ MyAvatar::MyAvatar(RigPointer rig) :
_rig(rig),
_prevShouldDrawHead(true)
{
ShapeCollider::initDispatchTable();
for (int i = 0; i < MAX_DRIVE_KEYS; i++) {
_driveKeys[i] = 0.0f;
}
@ -625,6 +620,12 @@ float loadSetting(QSettings& settings, const char* name, float defaultValue) {
return value;
}
void MyAvatar::setEnableRigAnimations(bool isEnabled) {
Settings settings;
settings.setValue("enableRig", isEnabled);
_rig->setEnableRig(isEnabled);
}
void MyAvatar::loadData() {
Settings settings;
settings.beginGroup("Avatar");

View file

@ -72,6 +72,7 @@ public:
Q_INVOKABLE AnimationDetails getAnimationDetailsByRole(const QString& role);
Q_INVOKABLE AnimationDetails getAnimationDetails(const QString& url);
void clearJointAnimationPriorities();
Q_INVOKABLE void setEnableRigAnimations(bool isEnabled);
// get/set avatar data
void saveData();
@ -115,7 +116,7 @@ public:
Q_INVOKABLE void useHeadAndBodyURLs(const QUrl& headURL, const QUrl& bodyURL,
const QString& headName = QString(), const QString& bodyName = QString());
Q_INVOKABLE bool getUseFullAvatar() const { return _useFullAvatar; }
Q_INVOKABLE virtual bool getUseFullAvatar() const { return _useFullAvatar; }
Q_INVOKABLE const QUrl& getFullAvatarURLFromPreferences() const { return _fullAvatarURLFromPreferences; }
Q_INVOKABLE const QUrl& getHeadURLFromPreferences() const { return _headURLFromPreferences; }
Q_INVOKABLE const QUrl& getBodyURLFromPreferences() const { return _skeletonURLFromPreferences; }

View file

@ -42,7 +42,23 @@ SkeletonModel::~SkeletonModel() {
void SkeletonModel::initJointStates(QVector<JointState> states) {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
glm::mat4 parentTransform = glm::scale(_scale) * glm::translate(_offset) * geometry.offset;
_boundingRadius = _rig->initJointStates(states, parentTransform);
int rootJointIndex = geometry.rootJointIndex;
int leftHandJointIndex = geometry.leftHandJointIndex;
int leftElbowJointIndex = leftHandJointIndex >= 0 ? geometry.joints.at(leftHandJointIndex).parentIndex : -1;
int leftShoulderJointIndex = leftElbowJointIndex >= 0 ? geometry.joints.at(leftElbowJointIndex).parentIndex : -1;
int rightHandJointIndex = geometry.rightHandJointIndex;
int rightElbowJointIndex = rightHandJointIndex >= 0 ? geometry.joints.at(rightHandJointIndex).parentIndex : -1;
int rightShoulderJointIndex = rightElbowJointIndex >= 0 ? geometry.joints.at(rightElbowJointIndex).parentIndex : -1;
_boundingRadius = _rig->initJointStates(states, parentTransform,
rootJointIndex,
leftHandJointIndex,
leftElbowJointIndex,
leftShoulderJointIndex,
rightHandJointIndex,
rightElbowJointIndex,
rightShoulderJointIndex);
// Determine the default eye position for avatar scale = 1.0
int headJointIndex = _geometry->getFBXGeometry().headJointIndex;
@ -81,10 +97,11 @@ void SkeletonModel::initJointStates(QVector<JointState> states) {
}
const float PALM_PRIORITY = DEFAULT_PRIORITY;
const float LEAN_PRIORITY = DEFAULT_PRIORITY;
void SkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
_rig->computeMotionAnimationState(deltaTime, _owningAvatar->getPosition(), _owningAvatar->getVelocity(), _owningAvatar->getOrientation());
if (_owningAvatar->isMyAvatar()) {
_rig->computeMotionAnimationState(deltaTime, _owningAvatar->getPosition(), _owningAvatar->getVelocity(), _owningAvatar->getOrientation());
}
Model::updateRig(deltaTime, parentTransform);
if (_owningAvatar->isMyAvatar()) {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
@ -227,7 +244,7 @@ void SkeletonModel::applyPalmData(int jointIndex, PalmData& palm) {
palmRotation = rotationBetween(palmRotation * glm::vec3(-sign, 0.0f, 0.0f), fingerDirection) * palmRotation;
if (Menu::getInstance()->isOptionChecked(MenuOption::AlternateIK)) {
setHandPosition(jointIndex, palmPosition, palmRotation);
_rig->setHandPosition(jointIndex, palmPosition, palmRotation, extractUniformScale(_scale), PALM_PRIORITY);
} else if (Menu::getInstance()->isOptionChecked(MenuOption::AlignForearmsWithWrists)) {
float forearmLength = geometry.joints.at(jointIndex).distanceToParent * extractUniformScale(_scale);
glm::vec3 forearm = palmRotation * glm::vec3(sign * forearmLength, 0.0f, 0.0f);
@ -248,7 +265,8 @@ void SkeletonModel::renderJointConstraints(gpu::Batch& batch, int jointIndex) {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const float BASE_DIRECTION_SIZE = 0.3f;
float directionSize = BASE_DIRECTION_SIZE * extractUniformScale(_scale);
batch._glLineWidth(3.0f);
// FIXME: THe line width of 3.0f is not supported anymore, we ll need a workaround
do {
const FBXJoint& joint = geometry.joints.at(jointIndex);
const JointState& jointState = _rig->getJointState(jointIndex);
@ -332,69 +350,6 @@ void SkeletonModel::renderOrientationDirections(gpu::Batch& batch, int jointInde
geometryCache->renderLine(batch, position, pFront, blue, jointLineIDs._front);
}
void SkeletonModel::setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation) {
// this algorithm is from sample code from sixense
const FBXGeometry& geometry = _geometry->getFBXGeometry();
int elbowJointIndex = geometry.joints.at(jointIndex).parentIndex;
if (elbowJointIndex == -1) {
return;
}
int shoulderJointIndex = geometry.joints.at(elbowJointIndex).parentIndex;
glm::vec3 shoulderPosition;
if (!getJointPosition(shoulderJointIndex, shoulderPosition)) {
return;
}
// precomputed lengths
float scale = extractUniformScale(_scale);
float upperArmLength = geometry.joints.at(elbowJointIndex).distanceToParent * scale;
float lowerArmLength = geometry.joints.at(jointIndex).distanceToParent * scale;
// first set wrist position
glm::vec3 wristPosition = position;
glm::vec3 shoulderToWrist = wristPosition - shoulderPosition;
float distanceToWrist = glm::length(shoulderToWrist);
// prevent gimbal lock
if (distanceToWrist > upperArmLength + lowerArmLength - EPSILON) {
distanceToWrist = upperArmLength + lowerArmLength - EPSILON;
shoulderToWrist = glm::normalize(shoulderToWrist) * distanceToWrist;
wristPosition = shoulderPosition + shoulderToWrist;
}
// cosine of angle from upper arm to hand vector
float cosA = (upperArmLength * upperArmLength + distanceToWrist * distanceToWrist - lowerArmLength * lowerArmLength) /
(2 * upperArmLength * distanceToWrist);
float mid = upperArmLength * cosA;
float height = sqrt(upperArmLength * upperArmLength + mid * mid - 2 * upperArmLength * mid * cosA);
// direction of the elbow
glm::vec3 handNormal = glm::cross(rotation * glm::vec3(0.0f, 1.0f, 0.0f), shoulderToWrist); // elbow rotating with wrist
glm::vec3 relaxedNormal = glm::cross(glm::vec3(0.0f, 1.0f, 0.0f), shoulderToWrist); // elbow pointing straight down
const float NORMAL_WEIGHT = 0.5f;
glm::vec3 finalNormal = glm::mix(relaxedNormal, handNormal, NORMAL_WEIGHT);
bool rightHand = (jointIndex == geometry.rightHandJointIndex);
if (rightHand ? (finalNormal.y > 0.0f) : (finalNormal.y < 0.0f)) {
finalNormal.y = 0.0f; // dont allow elbows to point inward (y is vertical axis)
}
glm::vec3 tangent = glm::normalize(glm::cross(shoulderToWrist, finalNormal));
// ik solution
glm::vec3 elbowPosition = shoulderPosition + glm::normalize(shoulderToWrist) * mid - tangent * height;
glm::vec3 forwardVector(rightHand ? -1.0f : 1.0f, 0.0f, 0.0f);
glm::quat shoulderRotation = rotationBetween(forwardVector, elbowPosition - shoulderPosition);
_rig->setJointRotationInBindFrame(shoulderJointIndex, shoulderRotation, PALM_PRIORITY);
_rig->setJointRotationInBindFrame(elbowJointIndex,
rotationBetween(shoulderRotation * forwardVector, wristPosition - elbowPosition) *
shoulderRotation, PALM_PRIORITY);
_rig->setJointRotationInBindFrame(jointIndex, rotation, PALM_PRIORITY);
}
bool SkeletonModel::getLeftHandPosition(glm::vec3& position) const {
return getJointPositionInWorldFrame(getLeftHandJointIndex(), position);
}

View file

@ -131,11 +131,6 @@ private:
QHash<int, OrientationLineIDs> _jointOrientationLines;
int _triangleFanID;
/// \param jointIndex index of joint in model
/// \param position position of joint in model-frame
/// \param rotation rotation of joint in model-frame
void setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation);
bool getEyeModelPositions(glm::vec3& firstEyePosition, glm::vec3& secondEyePosition) const;
Avatar* _owningAvatar;

View file

@ -1,6 +1,6 @@
//
// 3DConnexion.cpp
// hifi
// 3DConnexionClient.cpp
// interface/src/devices
//
// Created by MarcelEdward Verhagen on 09-06-15.
// Copyright 2015 High Fidelity, Inc.
@ -9,7 +9,7 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "3Dconnexion.h"
#include "3DConnexionClient.h"
#include "UserActivityLogger.h"
const float MAX_AXIS = 75.0f; // max forward = 2x speed
@ -28,7 +28,6 @@ ConnexionData::ConnexionData() {
}
void ConnexionData::handleAxisEvent() {
//qCWarning(interfaceapp) << "pos state x = " << cc_position.x << " y = " << cc_position.y << " z = " << cc_position.z << " Rotation x = " << cc_rotation.x << " y = " << cc_rotation.y << " z = " << cc_rotation.z;
_axisStateMap[makeInput(ROTATION_AXIS_Y_POS).getChannel()] = (cc_rotation.y > 0.0f) ? cc_rotation.y / MAX_AXIS : 0.0f;
_axisStateMap[makeInput(ROTATION_AXIS_Y_NEG).getChannel()] = (cc_rotation.y < 0.0f) ? -cc_rotation.y / MAX_AXIS : 0.0f;
_axisStateMap[makeInput(POSITION_AXIS_X_POS).getChannel()] = (cc_position.x > 0.0f) ? cc_position.x / MAX_AXIS : 0.0f;
@ -148,7 +147,7 @@ UserInputMapper::Input ConnexionData::makeInput(ConnexionData::PositionChannel a
void ConnexionData::update() {
// the update is done in the ConnexionClient class.
// for windows in the nativeEventFilter the inputmapper is connected or registed or removed when an 3Dconnnexion device is attached or deteched
// for windows in the nativeEventFilter the inputmapper is connected or registed or removed when an 3Dconnnexion device is attached or detached
// for osx the api will call DeviceAddedHandler or DeviceRemoveHandler when a 3Dconnexion device is attached or detached
}
@ -157,40 +156,32 @@ ConnexionClient& ConnexionClient::getInstance() {
return sharedInstance;
}
#ifdef HAVE_CONNEXIONCLIENT
#ifdef HAVE_3DCONNEXIONCLIENT
#ifdef _WIN32
#ifdef Q_OS_WIN
static ConnexionClient* gMouseInput = 0;
void ConnexionClient::toggleConnexion(bool shouldEnable)
{
void ConnexionClient::toggleConnexion(bool shouldEnable) {
ConnexionData& connexiondata = ConnexionData::getInstance();
if (shouldEnable && connexiondata.getDeviceID() == 0) {
ConnexionClient::init();
init();
}
if (!shouldEnable && connexiondata.getDeviceID() != 0) {
ConnexionClient::destroy();
destroy();
}
}
void ConnexionClient::init() {
if (Menu::getInstance()->isOptionChecked(MenuOption::Connexion)) {
ConnexionClient& cclient = ConnexionClient::getInstance();
cclient.fLast3dmouseInputTime = 0;
fLast3dmouseInputTime = 0;
cclient.InitializeRawInput(GetActiveWindow());
InitializeRawInput(GetActiveWindow());
gMouseInput = &cclient;
QAbstractEventDispatcher::instance()->installNativeEventFilter(&cclient);
QAbstractEventDispatcher::instance()->installNativeEventFilter(this);
}
}
void ConnexionClient::destroy() {
ConnexionClient& cclient = ConnexionClient::getInstance();
QAbstractEventDispatcher::instance()->removeNativeEventFilter(&cclient);
QAbstractEventDispatcher::instance()->removeNativeEventFilter(this);
ConnexionData& connexiondata = ConnexionData::getInstance();
int deviceid = connexiondata.getDeviceID();
connexiondata.setDeviceID(0);
@ -225,15 +216,15 @@ enum ConnexionPid {
// e3dmouse_virtual_key
enum V3dk {
V3DK_INVALID = 0
, V3DK_MENU = 1, V3DK_FIT
, V3DK_TOP, V3DK_LEFT, V3DK_RIGHT, V3DK_FRONT, V3DK_BOTTOM, V3DK_BACK
, V3DK_CW, V3DK_CCW
, V3DK_ISO1, V3DK_ISO2
, V3DK_1, V3DK_2, V3DK_3, V3DK_4, V3DK_5, V3DK_6, V3DK_7, V3DK_8, V3DK_9, V3DK_10
, V3DK_ESC, V3DK_ALT, V3DK_SHIFT, V3DK_CTRL
, V3DK_ROTATE, V3DK_PANZOOM, V3DK_DOMINANT
, V3DK_PLUS, V3DK_MINUS
V3DK_INVALID = 0,
V3DK_MENU = 1, V3DK_FIT,
V3DK_TOP, V3DK_LEFT, V3DK_RIGHT, V3DK_FRONT, V3DK_BOTTOM, V3DK_BACK,
V3DK_CW, V3DK_CCW,
V3DK_ISO1, V3DK_ISO2,
V3DK_1, V3DK_2, V3DK_3, V3DK_4, V3DK_5, V3DK_6, V3DK_7, V3DK_8, V3DK_9, V3DK_10,
V3DK_ESC, V3DK_ALT, V3DK_SHIFT, V3DK_CTRL,
V3DK_ROTATE, V3DK_PANZOOM, V3DK_DOMINANT,
V3DK_PLUS, V3DK_MINUS
};
struct tag_VirtualKeys {
@ -244,33 +235,33 @@ struct tag_VirtualKeys {
// e3dmouse_virtual_key
static const V3dk SpaceExplorerKeys[] = {
V3DK_INVALID // there is no button 0
, V3DK_1, V3DK_2
, V3DK_TOP, V3DK_LEFT, V3DK_RIGHT, V3DK_FRONT
, V3DK_ESC, V3DK_ALT, V3DK_SHIFT, V3DK_CTRL
, V3DK_FIT, V3DK_MENU
, V3DK_PLUS, V3DK_MINUS
, V3DK_ROTATE
V3DK_INVALID, // there is no button 0
V3DK_1, V3DK_2,
V3DK_TOP, V3DK_LEFT, V3DK_RIGHT, V3DK_FRONT,
V3DK_ESC, V3DK_ALT, V3DK_SHIFT, V3DK_CTRL,
V3DK_FIT, V3DK_MENU,
V3DK_PLUS, V3DK_MINUS,
V3DK_ROTATE
};
//e3dmouse_virtual_key
static const V3dk SpacePilotKeys[] = {
V3DK_INVALID
, V3DK_1, V3DK_2, V3DK_3, V3DK_4, V3DK_5, V3DK_6
, V3DK_TOP, V3DK_LEFT, V3DK_RIGHT, V3DK_FRONT
, V3DK_ESC, V3DK_ALT, V3DK_SHIFT, V3DK_CTRL
, V3DK_FIT, V3DK_MENU
, V3DK_PLUS, V3DK_MINUS
, V3DK_DOMINANT, V3DK_ROTATE
V3DK_INVALID,
V3DK_1, V3DK_2, V3DK_3, V3DK_4, V3DK_5, V3DK_6,
V3DK_TOP, V3DK_LEFT, V3DK_RIGHT, V3DK_FRONT,
V3DK_ESC, V3DK_ALT, V3DK_SHIFT, V3DK_CTRL,
V3DK_FIT, V3DK_MENU,
V3DK_PLUS, V3DK_MINUS,
V3DK_DOMINANT, V3DK_ROTATE,
};
static const struct tag_VirtualKeys _3dmouseVirtualKeys[] = {
CONNEXIONPID_SPACEPILOT
, sizeof(SpacePilotKeys) / sizeof(SpacePilotKeys[0])
, const_cast<V3dk *>(SpacePilotKeys),
CONNEXIONPID_SPACEEXPLORER
, sizeof(SpaceExplorerKeys) / sizeof(SpaceExplorerKeys[0])
, const_cast<V3dk *>(SpaceExplorerKeys)
CONNEXIONPID_SPACEPILOT,
sizeof(SpacePilotKeys) / sizeof(SpacePilotKeys[0]),
const_cast<V3dk *>(SpacePilotKeys),
CONNEXIONPID_SPACEEXPLORER,
sizeof(SpaceExplorerKeys) / sizeof(SpaceExplorerKeys[0]),
const_cast<V3dk *>(SpaceExplorerKeys)
};
// Converts a hid device keycode (button identifier) of a pre-2009 3Dconnexion USB device to the standard 3d mouse virtual key definition.
@ -282,10 +273,8 @@ static const struct tag_VirtualKeys _3dmouseVirtualKeys[] = {
// to the standard 3d mouse virtual key definition.
unsigned short HidToVirtualKey(unsigned long pid, unsigned short hidKeyCode) {
unsigned short virtualkey = hidKeyCode;
for (size_t i = 0; i<sizeof(_3dmouseVirtualKeys) / sizeof(_3dmouseVirtualKeys[0]); ++i)
{
if (pid == _3dmouseVirtualKeys[i].pid)
{
for (size_t i = 0; i<sizeof(_3dmouseVirtualKeys) / sizeof(_3dmouseVirtualKeys[0]); ++i) {
if (pid == _3dmouseVirtualKeys[i].pid) {
if (hidKeyCode < _3dmouseVirtualKeys[i].nKeys) {
virtualkey = _3dmouseVirtualKeys[i].vkeys[hidKeyCode];
} else {
@ -300,17 +289,17 @@ unsigned short HidToVirtualKey(unsigned long pid, unsigned short hidKeyCode) {
bool ConnexionClient::RawInputEventFilter(void* msg, long* result) {
ConnexionData& connexiondata = ConnexionData::getInstance();
if (ConnexionClient::Is3dmouseAttached() && connexiondata.getDeviceID() == 0) {
if (Is3dmouseAttached() && connexiondata.getDeviceID() == 0) {
connexiondata.registerToUserInputMapper(*Application::getUserInputMapper());
connexiondata.assignDefaultInputMapping(*Application::getUserInputMapper());
UserActivityLogger::getInstance().connectedDevice("controller", "3Dconnexion");
} else if (!ConnexionClient::Is3dmouseAttached() && connexiondata.getDeviceID() != 0) {
} else if (!Is3dmouseAttached() && connexiondata.getDeviceID() != 0) {
int deviceid = connexiondata.getDeviceID();
connexiondata.setDeviceID(0);
Application::getUserInputMapper()->removeDevice(deviceid);
}
if (!ConnexionClient::Is3dmouseAttached()) {
if (!Is3dmouseAttached()) {
return false;
}
@ -318,8 +307,8 @@ bool ConnexionClient::RawInputEventFilter(void* msg, long* result) {
if (message->message == WM_INPUT) {
HRAWINPUT hRawInput = reinterpret_cast<HRAWINPUT>(message->lParam);
gMouseInput->OnRawInput(RIM_INPUT, hRawInput);
if (result != 0) {
OnRawInput(RIM_INPUT, hRawInput);
if (result != 0) {
result = 0;
}
return true;
@ -327,15 +316,6 @@ bool ConnexionClient::RawInputEventFilter(void* msg, long* result) {
return false;
}
ConnexionClient::ConnexionClient() {
}
ConnexionClient::~ConnexionClient() {
ConnexionClient& cclient = ConnexionClient::getInstance();
QAbstractEventDispatcher::instance()->removeNativeEventFilter(&cclient);
}
// Access the mouse parameters structure
I3dMouseParam& ConnexionClient::MouseParams() {
return f3dMouseParams;
@ -680,8 +660,6 @@ void ConnexionClient::OnRawInput(UINT nInputCode, HRAWINPUT hRawInput) {
bool ConnexionClient::TranslateRawInputData(UINT nInputCode, PRAWINPUT pRawInput) {
bool bIsForeground = (nInputCode == RIM_INPUT);
//qDebug("Rawinput.header.dwType=0x%x\n", pRawInput->header.dwType);
// We are not interested in keyboard or mouse data received via raw input
if (pRawInput->header.dwType != RIM_TYPEHID) {
return false;
@ -689,9 +667,9 @@ bool ConnexionClient::TranslateRawInputData(UINT nInputCode, PRAWINPUT pRawInput
if (TRACE_RIDI_DEVICENAME == 1) {
UINT dwSize = 0;
if (::GetRawInputDeviceInfo(pRawInput->header.hDevice, RIDI_DEVICENAME, NULL, &dwSize) == 0) {
if (::GetRawInputDeviceInfo(pRawInput->header.hDevice, RIDI_DEVICENAME, NULL, &dwSize) == 0) {
std::vector<wchar_t> szDeviceName(dwSize + 1);
if (::GetRawInputDeviceInfo(pRawInput->header.hDevice, RIDI_DEVICENAME, &szDeviceName[0], &dwSize) >0) {
if (::GetRawInputDeviceInfo(pRawInput->header.hDevice, RIDI_DEVICENAME, &szDeviceName[0], &dwSize) > 0) {
qDebug("Device Name = %s\nDevice handle = 0x%x\n", &szDeviceName[0], pRawInput->header.hDevice);
}
}
@ -703,7 +681,7 @@ bool ConnexionClient::TranslateRawInputData(UINT nInputCode, PRAWINPUT pRawInput
if (::GetRawInputDeviceInfo(pRawInput->header.hDevice, RIDI_DEVICEINFO, &sRidDeviceInfo, &cbSize) == cbSize) {
if (TRACE_RIDI_DEVICEINFO == 1) {
switch (sRidDeviceInfo.dwType) {
switch (sRidDeviceInfo.dwType) {
case RIM_TYPEMOUSE:
qDebug("\tsRidDeviceInfo.dwType=RIM_TYPEMOUSE\n");
break;
@ -806,16 +784,15 @@ bool ConnexionClient::TranslateRawInputData(UINT nInputCode, PRAWINPUT pRawInput
return false;
}
MouseParameters::MouseParameters() : fNavigation(NAVIGATION_OBJECT_MODE)
, fPivot(PIVOT_AUTO)
, fPivotVisibility(PIVOT_SHOW)
, fIsLockHorizon(true)
, fIsPanZoom(true)
, fIsRotate(true)
, fSpeed(SPEED_LOW) {
}
MouseParameters::~MouseParameters() {
MouseParameters::MouseParameters() :
fNavigation(NAVIGATION_OBJECT_MODE),
fPivot(PIVOT_AUTO),
fPivotVisibility(PIVOT_SHOW),
fIsLockHorizon(true),
fIsPanZoom(true),
fIsRotate(true),
fSpeed(SPEED_LOW)
{
}
bool MouseParameters::IsPanZoom() const {
@ -831,15 +808,15 @@ MouseParameters::Speed MouseParameters::GetSpeed() const {
}
void MouseParameters::SetPanZoom(bool isPanZoom) {
fIsPanZoom=isPanZoom;
fIsPanZoom = isPanZoom;
}
void MouseParameters::SetRotate(bool isRotate) {
fIsRotate=isRotate;
fIsRotate = isRotate;
}
void MouseParameters::SetSpeed(Speed speed) {
fSpeed=speed;
fSpeed = speed;
}
MouseParameters::Navigation MouseParameters::GetNavigationMode() const {
@ -878,13 +855,6 @@ void MouseParameters::SetPivotVisibility(PivotVisibility visibility) {
#else
#define WITH_SEPARATE_THREAD false // set to true or false
// Make the linker happy for the framework check (see link below for more info)
// http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/WeakLinking.html
extern int16_t SetConnexionHandlers(ConnexionMessageHandlerProc messageHandler, ConnexionAddedHandlerProc addedHandler, ConnexionRemovedHandlerProc removedHandler, bool useSeparateThread) __attribute__((weak_import));
int fConnexionClientID;
static ConnexionDeviceState lastState;
@ -893,20 +863,18 @@ static void DeviceAddedHandler(unsigned int connection);
static void DeviceRemovedHandler(unsigned int connection);
static void MessageHandler(unsigned int connection, unsigned int messageType, void *messageArgument);
void ConnexionClient::toggleConnexion(bool shouldEnable)
{
if (shouldEnable && !ConnexionClient::Is3dmouseAttached()) {
ConnexionClient::init();
void ConnexionClient::toggleConnexion(bool shouldEnable) {
if (shouldEnable && !Is3dmouseAttached()) {
init();
}
if (!shouldEnable && ConnexionClient::Is3dmouseAttached()) {
ConnexionClient::destroy();
if (!shouldEnable && Is3dmouseAttached()) {
destroy();
}
}
void ConnexionClient::init() {
// Make sure the framework is installed
if (SetConnexionHandlers != NULL && Menu::getInstance()->isOptionChecked(MenuOption::Connexion)) {
if (Menu::getInstance()->isOptionChecked(MenuOption::Connexion)) {
// Install message handler and register our client
InstallConnexionHandlers(MessageHandler, DeviceAddedHandler, DeviceRemovedHandler);
// Either use this to take over in our application only... does not work
@ -923,7 +891,7 @@ void ConnexionClient::init() {
// use default switches
ConnexionClientControl(fConnexionClientID, kConnexionCtlSetSwitches, kConnexionSwitchesDisabled, NULL);
if (ConnexionClient::Is3dmouseAttached() && connexiondata.getDeviceID() == 0) {
if (Is3dmouseAttached() && connexiondata.getDeviceID() == 0) {
connexiondata.registerToUserInputMapper(*Application::getUserInputMapper());
connexiondata.assignDefaultInputMapping(*Application::getUserInputMapper());
UserActivityLogger::getInstance().connectedDevice("controller", "3Dconnexion");
@ -1010,4 +978,4 @@ void MessageHandler(unsigned int connection, unsigned int messageType, void *mes
#endif // __APPLE__
#endif // HAVE_CONNEXIONCLIENT
#endif // HAVE_3DCONNEXIONCLIENT

View file

@ -1,5 +1,5 @@
// 3DConnexion.h
// hifi
// 3DConnexionClient.h
// interface/src/devices
//
// Created by Marcel Verhagen on 09-06-15.
// Copyright 2015 High Fidelity, Inc.
@ -8,8 +8,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_ConnexionClient_h
#define hifi_ConnexionClient_h
#ifndef hifi_3DConnexionClient_h
#define hifi_3DConnexionClient_h
#include <qobject.h>
#include <qlibrary.h>
@ -18,22 +18,22 @@
#include "ui/UserInputMapper.h"
#ifndef HAVE_CONNEXIONCLIENT
#ifndef HAVE_3DCONNEXIONCLIENT
class ConnexionClient : public QObject {
Q_OBJECT
public:
static ConnexionClient& getInstance();
static void init() {};
static void destroy() {};
static bool Is3dmouseAttached() { return false; };
void init() {};
void destroy() {};
bool Is3dmouseAttached() { return false; };
public slots:
void toggleConnexion(bool shouldEnable) {};
};
#endif // NOT_HAVE_CONNEXIONCLIENT
#endif // NOT_HAVE_3DCONNEXIONCLIENT
#ifdef HAVE_CONNEXIONCLIENT
#ifdef HAVE_3DCONNEXIONCLIENT
// the windows connexion rawinput
#ifdef _WIN32
#ifdef Q_OS_WIN
#include "I3dMouseParams.h"
#include <QAbstractNativeEventFilter>
@ -45,7 +45,6 @@ public slots:
class MouseParameters : public I3dMouseParam {
public:
MouseParameters();
~MouseParameters();
// I3dmouseSensor interface
bool IsPanZoom() const;
@ -86,16 +85,14 @@ private:
class ConnexionClient : public QObject, public QAbstractNativeEventFilter {
Q_OBJECT
public:
ConnexionClient();
~ConnexionClient();
ConnexionClient() {};
static ConnexionClient& getInstance();
ConnexionClient* client;
static void init();
static void destroy();
static bool Is3dmouseAttached();
static ConnexionClient& getInstance();
void init();
void destroy();
bool Is3dmouseAttached();
ConnexionClient* client;
I3dMouseParam& MouseParams();
const I3dMouseParam& MouseParams() const;
@ -107,7 +104,7 @@ public:
virtual bool nativeEventFilter(const QByteArray& eventType, void* message, long* result) Q_DECL_OVERRIDE
{
MSG* msg = static_cast< MSG * >(message);
return ConnexionClient::RawInputEventFilter(message, result);
return RawInputEventFilter(message, result);
}
public slots:
@ -121,7 +118,7 @@ signals:
private:
bool InitializeRawInput(HWND hwndTarget);
static bool RawInputEventFilter(void* msg, long* result);
bool RawInputEventFilter(void* msg, long* result);
void OnRawInput(UINT nInputCode, HRAWINPUT hRawInput);
UINT GetRawInputBuffer(PRAWINPUT pData, PUINT pcbSize, UINT cbSizeHeader);
@ -166,16 +163,16 @@ class ConnexionClient : public QObject {
Q_OBJECT
public:
static ConnexionClient& getInstance();
static bool Is3dmouseAttached();
static void init();
static void destroy();
void init();
void destroy();
bool Is3dmouseAttached();
public slots:
void toggleConnexion(bool shouldEnable);
};
#endif // __APPLE__
#endif // HAVE_CONNEXIONCLIENT
#endif // HAVE_3DCONNEXIONCLIENT
// connnects to the userinputmapper
@ -241,4 +238,4 @@ protected:
AxisStateMap _axisStateMap;
};
#endif // defined(hifi_ConnexionClient_h)
#endif // defined(hifi_3DConnexionClient_h)

View file

@ -564,6 +564,13 @@ void DdeFaceTracker::decodePacket(const QByteArray& buffer) {
eyeCoefficients[1] = _filteredEyeBlinks[1];
}
// Couple eyelid values if configured - use the most "open" value for both
if (Menu::getInstance()->isOptionChecked(MenuOption::CoupleEyelids)) {
float eyeCoefficient = std::min(eyeCoefficients[0], eyeCoefficients[1]);
eyeCoefficients[0] = eyeCoefficient;
eyeCoefficients[1] = eyeCoefficient;
}
// Use EyeBlink values to control both EyeBlink and EyeOpen
if (eyeCoefficients[0] > 0) {
_coefficients[_leftBlinkIndex] = eyeCoefficients[0];

View file

@ -24,7 +24,8 @@
ControllerScriptingInterface::ControllerScriptingInterface() :
_mouseCaptured(false),
_touchCaptured(false),
_wheelCaptured(false)
_wheelCaptured(false),
_actionsCaptured(false)
{
}

View file

@ -14,7 +14,6 @@
#include <avatar/AvatarManager.h>
#include <DeferredLightingEffect.h>
#include <GLMHelpers.h>
#include <gpu/GLBackend.h>
#include <gpu/GLBackendShared.h>
#include <FramebufferCache.h>
#include <GLMHelpers.h>
@ -32,7 +31,6 @@
#include "ui/AvatarInputs.h"
const vec4 CONNECTION_STATUS_BORDER_COLOR{ 1.0f, 0.0f, 0.0f, 0.8f };
const float CONNECTION_STATUS_BORDER_LINE_WIDTH = 4.0f;
static const float ORTHO_NEAR_CLIP = -1000.0f;
static const float ORTHO_FAR_CLIP = 1000.0f;
@ -137,8 +135,7 @@ void ApplicationOverlay::renderAudioScope(RenderArgs* renderArgs) {
batch.setProjectionTransform(legacyProjection);
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch._glLineWidth(1.0f); // default
// Render the audio scope
DependencyManager::get<AudioScope>()->render(renderArgs, width, height);
}
@ -157,8 +154,7 @@ void ApplicationOverlay::renderOverlays(RenderArgs* renderArgs) {
batch.setProjectionTransform(legacyProjection);
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch._glLineWidth(1.0f); // default
// Render all of the Script based "HUD" aka 2D overlays.
// note: we call them HUD, as opposed to 2D, only because there are some cases of 3D HUD overlays, like the
// cameral controls for the edit.js
@ -195,6 +191,7 @@ void ApplicationOverlay::renderRearView(RenderArgs* renderArgs) {
glm::vec2 texCoordMinCorner(0.0f, 0.0f);
glm::vec2 texCoordMaxCorner(viewport.width() * renderRatio / float(selfieTexture->getWidth()), viewport.height() * renderRatio / float(selfieTexture->getHeight()));
geometryCache->useSimpleDrawPipeline(batch, true);
batch.setResourceTexture(0, selfieTexture);
geometryCache->renderQuad(batch, bottomLeft, topRight, texCoordMinCorner, texCoordMaxCorner, glm::vec4(1.0f, 1.0f, 1.0f, 1.0f));
@ -247,7 +244,7 @@ void ApplicationOverlay::renderDomainConnectionStatusBorder(RenderArgs* renderAr
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch.setResourceTexture(0, DependencyManager::get<TextureCache>()->getWhiteTexture());
batch._glLineWidth(CONNECTION_STATUS_BORDER_LINE_WIDTH);
// FIXME: THe line width of CONNECTION_STATUS_BORDER_LINE_WIDTH is not supported anymore, we ll need a workaround
// TODO animate the disconnect border for some excitement while not connected?
//double usecs = usecTimestampNow();

View file

@ -101,8 +101,9 @@ void Circle3DOverlay::render(RenderArgs* args) {
Q_ASSERT(args->_batch);
auto& batch = *args->_batch;
batch._glLineWidth(_lineWidth);
// FIXME: THe line width of _lineWidth is not supported anymore, we ll need a workaround
auto transform = _transform;
transform.postScale(glm::vec3(getDimensions(), 1.0f));
batch.setModelTransform(transform);

View file

@ -60,7 +60,6 @@ void Grid3DOverlay::render(RenderArgs* args) {
// Minor grid
{
batch->_glLineWidth(1.0f);
auto position = glm::vec3(_minorGridWidth * (floorf(rotated.x / spacing) - MINOR_GRID_DIVISIONS / 2),
spacing * (floorf(rotated.y / spacing) - MINOR_GRID_DIVISIONS / 2),
getPosition().z);
@ -76,7 +75,6 @@ void Grid3DOverlay::render(RenderArgs* args) {
// Major grid
{
batch->_glLineWidth(4.0f);
spacing *= _majorGridEvery;
auto position = glm::vec3(spacing * (floorf(rotated.x / spacing) - MAJOR_GRID_DIVISIONS / 2),
spacing * (floorf(rotated.y / spacing) - MAJOR_GRID_DIVISIONS / 2),
@ -86,6 +84,8 @@ void Grid3DOverlay::render(RenderArgs* args) {
transform.setTranslation(position);
transform.setScale(scale);
// FIXME: THe line width of 4.0f is not supported anymore, we ll need a workaround
batch->setModelTransform(transform);
DependencyManager::get<GeometryCache>()->renderGrid(*batch, MAJOR_GRID_DIVISIONS, MAJOR_GRID_DIVISIONS, gridColor);

View file

@ -94,8 +94,8 @@ void Image3DOverlay::render(RenderArgs* args) {
batch->setModelTransform(transform);
batch->setResourceTexture(0, _texture->getGPUTexture());
DependencyManager::get<DeferredLightingEffect>()->bindSimpleProgram(*batch, true, true, false, true);
DependencyManager::get<DeferredLightingEffect>()->bindSimpleProgram(*batch, true, false, false, true);
DependencyManager::get<GeometryCache>()->renderQuad(
*batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight,
glm::vec4(color.red / MAX_COLOR, color.green / MAX_COLOR, color.blue / MAX_COLOR, alpha)

View file

@ -15,7 +15,6 @@
#include <limits>
#include <render/Scene.h>
#include <gpu/GLBackend.h>
#include <RegisteredMetaTypes.h>
#include "Application.h"
@ -119,7 +118,6 @@ void Overlays::renderHUD(RenderArgs* renderArgs) {
batch.setProjectionTransform(legacyProjection);
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch._glLineWidth(1.0f); // default
thisOverlay->render(renderArgs);
}

View file

@ -15,7 +15,6 @@
#include <DependencyManager.h>
#include <GeometryCache.h>
#include <GLMHelpers.h>
#include <gpu/GLBackend.h>
#include <OffscreenUi.h>
#include <RegisteredMetaTypes.h>
#include <SharedUtil.h>

View file

@ -49,7 +49,7 @@ void AnimationHandle::setMaskedJoints(const QStringList& maskedJoints) {
_jointMappings.clear();
}
void AnimationHandle::setRunning(bool running) {
void AnimationHandle::setRunning(bool running, bool doRestoreJoints) {
if (running && isRunning()) {
// if we're already running, this is the same as a restart
setFrameIndex(getFirstFrame());
@ -62,7 +62,9 @@ void AnimationHandle::setRunning(bool running) {
}
} else {
_rig->removeRunningAnimation(getAnimationHandlePointer());
restoreJoints();
if (doRestoreJoints) {
restoreJoints();
}
replaceMatchingPriorities(0.0f);
}
emit runningChanged(isRunning());
@ -71,7 +73,9 @@ void AnimationHandle::setRunning(bool running) {
AnimationHandle::AnimationHandle(RigPointer rig) :
QObject(rig.get()),
_rig(rig),
_priority(1.0f)
_priority(1.0f),
_fade(0.0f),
_fadePerSecond(0.0f)
{
}

View file

@ -64,6 +64,10 @@ public:
void setPriority(float priority);
float getPriority() const { return _priority; }
void setMix(float mix) { _mix = mix; }
void setFade(float fade) { _fade = fade; }
float getFade() const { return _fade; }
void setFadePerSecond(float fadePerSecond) { _fadePerSecond = fadePerSecond; }
float getFadePerSecond() const { return _fadePerSecond; }
void setMaskedJoints(const QStringList& maskedJoints);
const QStringList& getMaskedJoints() const { return _maskedJoints; }
@ -87,7 +91,7 @@ public:
void setLastFrame(float lastFrame) { _animationLoop.setLastFrame(lastFrame); }
float getLastFrame() const { return _animationLoop.getLastFrame(); }
void setRunning(bool running);
void setRunning(bool running, bool restoreJoints = true);
bool isRunning() const { return _animationLoop.isRunning(); }
void setFrameIndex(float frameIndex) { _animationLoop.setFrameIndex(frameIndex); }
@ -111,7 +115,7 @@ signals:
public slots:
void start() { setRunning(true); }
void stop() { setRunning(false); }
void stop() { setRunning(false); _fadePerSecond = _fade = 0.0f; }
private:
@ -120,7 +124,9 @@ private:
QString _role;
QUrl _url;
float _priority;
float _mix;
float _mix; // How much of this animation to blend against what is already there. 1.0 sets to just this animation.
float _fade; // How far are we into full strength. 0.0 uses none of this animation, 1.0 (the max) is as much as possible.
float _fadePerSecond; // How fast should _fade change? +1.0 means _fade is increasing to 1.0 in 1 second. Negative is fading out.
QStringList _maskedJoints;
QVector<int> _jointMappings;

View file

@ -20,15 +20,81 @@ void AvatarRig::updateJointState(int index, glm::mat4 parentTransform) {
const FBXJoint& joint = state.getFBXJoint();
// compute model transforms
int parentIndex = joint.parentIndex;
if (parentIndex == -1) {
if (index == _rootJointIndex) {
// we always zero-out the translation part of an avatar's root join-transform.
state.computeTransform(parentTransform);
clearJointTransformTranslation(index);
} else {
// guard against out-of-bounds access to _jointStates
if (joint.parentIndex >= 0 && joint.parentIndex < _jointStates.size()) {
int parentIndex = joint.parentIndex;
if (parentIndex >= 0 && parentIndex < _jointStates.size()) {
const JointState& parentState = _jointStates.at(parentIndex);
state.computeTransform(parentState.getTransform(), parentState.getTransformChanged());
}
}
}
void AvatarRig::setHandPosition(int jointIndex,
const glm::vec3& position, const glm::quat& rotation,
float scale, float priority) {
bool rightHand = (jointIndex == _rightHandJointIndex);
int elbowJointIndex = rightHand ? _rightElbowJointIndex : _leftElbowJointIndex;
int shoulderJointIndex = rightHand ? _rightShoulderJointIndex : _leftShoulderJointIndex;
// this algorithm is from sample code from sixense
if (elbowJointIndex == -1 || shoulderJointIndex == -1) {
return;
}
glm::vec3 shoulderPosition;
if (!getJointPosition(shoulderJointIndex, shoulderPosition)) {
return;
}
// precomputed lengths
float upperArmLength = _jointStates[elbowJointIndex].getFBXJoint().distanceToParent * scale;
float lowerArmLength = _jointStates[jointIndex].getFBXJoint().distanceToParent * scale;
// first set wrist position
glm::vec3 wristPosition = position;
glm::vec3 shoulderToWrist = wristPosition - shoulderPosition;
float distanceToWrist = glm::length(shoulderToWrist);
// prevent gimbal lock
if (distanceToWrist > upperArmLength + lowerArmLength - EPSILON) {
distanceToWrist = upperArmLength + lowerArmLength - EPSILON;
shoulderToWrist = glm::normalize(shoulderToWrist) * distanceToWrist;
wristPosition = shoulderPosition + shoulderToWrist;
}
// cosine of angle from upper arm to hand vector
float cosA = (upperArmLength * upperArmLength + distanceToWrist * distanceToWrist - lowerArmLength * lowerArmLength) /
(2 * upperArmLength * distanceToWrist);
float mid = upperArmLength * cosA;
float height = sqrt(upperArmLength * upperArmLength + mid * mid - 2 * upperArmLength * mid * cosA);
// direction of the elbow
glm::vec3 handNormal = glm::cross(rotation * glm::vec3(0.0f, 1.0f, 0.0f), shoulderToWrist); // elbow rotating with wrist
glm::vec3 relaxedNormal = glm::cross(glm::vec3(0.0f, 1.0f, 0.0f), shoulderToWrist); // elbow pointing straight down
const float NORMAL_WEIGHT = 0.5f;
glm::vec3 finalNormal = glm::mix(relaxedNormal, handNormal, NORMAL_WEIGHT);
if (rightHand ? (finalNormal.y > 0.0f) : (finalNormal.y < 0.0f)) {
finalNormal.y = 0.0f; // dont allow elbows to point inward (y is vertical axis)
}
glm::vec3 tangent = glm::normalize(glm::cross(shoulderToWrist, finalNormal));
// ik solution
glm::vec3 elbowPosition = shoulderPosition + glm::normalize(shoulderToWrist) * mid - tangent * height;
glm::vec3 forwardVector(rightHand ? -1.0f : 1.0f, 0.0f, 0.0f);
glm::quat shoulderRotation = rotationBetween(forwardVector, elbowPosition - shoulderPosition);
setJointRotationInBindFrame(shoulderJointIndex, shoulderRotation, priority);
setJointRotationInBindFrame(elbowJointIndex,
rotationBetween(shoulderRotation * forwardVector, wristPosition - elbowPosition) *
shoulderRotation, priority);
setJointRotationInBindFrame(jointIndex, rotation, priority);
}

View file

@ -22,6 +22,8 @@ class AvatarRig : public Rig {
public:
~AvatarRig() {}
virtual void updateJointState(int index, glm::mat4 parentTransform);
virtual void setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation,
float scale, float priority);
};
#endif // hifi_AvatarRig_h

View file

@ -22,6 +22,8 @@ class EntityRig : public Rig {
public:
~EntityRig() {}
virtual void updateJointState(int index, glm::mat4 parentTransform);
virtual void setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation,
float scale, float priority) {}
};
#endif // hifi_EntityRig_h

View file

@ -58,15 +58,18 @@ void Rig::removeAnimationHandle(const AnimationHandlePointer& handle) {
void Rig::startAnimation(const QString& url, float fps, float priority,
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) {
//qCDebug(animation) << "startAnimation" << url << fps << priority << loop << hold << firstFrame << lastFrame << maskedJoints;
// This is different than startAnimationByRole, in which we use the existing values if the animation already exists.
// Here we reuse the animation handle if possible, but in any case, we set the values to those given (or defaulted).
AnimationHandlePointer handle = nullptr;
foreach (const AnimationHandlePointer& candidate, _animationHandles) {
if (candidate->getURL() == url) {
candidate->start();
return;
handle = candidate;
}
}
AnimationHandlePointer handle = createAnimationHandle();
handle->setURL(url);
if (!handle) {
handle = createAnimationHandle();
handle->setURL(url);
}
handle->setFPS(fps);
handle->setPriority(priority);
handle->setLoop(loop);
@ -77,8 +80,8 @@ void Rig::startAnimation(const QString& url, float fps, float priority,
handle->start();
}
void Rig::addAnimationByRole(const QString& role, const QString& url, float fps, float priority,
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints, bool startAutomatically) {
AnimationHandlePointer Rig::addAnimationByRole(const QString& role, const QString& url, float fps, float priority,
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints, bool startAutomatically) {
// check for a configured animation for the role
//qCDebug(animation) << "addAnimationByRole" << role << url << fps << priority << loop << hold << firstFrame << lastFrame << maskedJoints << startAutomatically;
foreach (const AnimationHandlePointer& candidate, _animationHandles) {
@ -86,12 +89,38 @@ void Rig::addAnimationByRole(const QString& role, const QString& url, float fps,
if (startAutomatically) {
candidate->start();
}
return;
return candidate;
}
}
AnimationHandlePointer handle = createAnimationHandle();
QString standard = "";
if (url.isEmpty()) { // Default animations for fight club
const QString& base = "https://hifi-public.s3.amazonaws.com/ozan/";
if (role == "walk") {
standard = base + "support/FightClubBotTest1/Animations/standard_walk.fbx";
lastFrame = 60;
} else if (role == "leftTurn") {
standard = base + "support/FightClubBotTest1/Animations/left_turn_noHipRotation.fbx";
lastFrame = 29;
} else if (role == "rightTurn") {
standard = base + "support/FightClubBotTest1/Animations/right_turn_noHipRotation.fbx";
lastFrame = 31;
} else if (role == "leftStrafe") {
standard = base + "animations/fightclub_bot_anims/side_step_left_inPlace.fbx";
lastFrame = 31;
} else if (role == "rightStrafe") {
standard = base + "animations/fightclub_bot_anims/side_step_right_inPlace.fbx";
lastFrame = 31;
} else if (role == "idle") {
standard = base + "support/FightClubBotTest1/Animations/standard_idle.fbx";
fps = 25.0f;
}
if (!standard.isEmpty()) {
loop = true;
}
}
handle->setRole(role);
handle->setURL(url);
handle->setURL(url.isEmpty() ? standard : url);
handle->setFPS(fps);
handle->setPriority(priority);
handle->setLoop(loop);
@ -102,16 +131,18 @@ void Rig::addAnimationByRole(const QString& role, const QString& url, float fps,
if (startAutomatically) {
handle->start();
}
return handle;
}
void Rig::startAnimationByRole(const QString& role, const QString& url, float fps, float priority,
bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) {
addAnimationByRole(role, url, fps, priority, loop, hold, firstFrame, lastFrame, maskedJoints, true);
AnimationHandlePointer handle = addAnimationByRole(role, url, fps, priority, loop, hold, firstFrame, lastFrame, maskedJoints, true);
handle->setFadePerSecond(1.0f); // For now. Could be individualized later.
}
void Rig::stopAnimationByRole(const QString& role) {
foreach (const AnimationHandlePointer& handle, getRunningAnimations()) {
if (handle->getRole() == role) {
handle->stop();
handle->setFadePerSecond(-1.0f); // For now. Could be individualized later.
}
}
}
@ -135,6 +166,14 @@ void Rig::addRunningAnimation(AnimationHandlePointer animationHandle) {
bool Rig::isRunningAnimation(AnimationHandlePointer animationHandle) {
return _runningAnimations.contains(animationHandle);
}
bool Rig::isRunningRole(const QString& role) { //obviously, there are more efficient ways to do this
for (auto animation : _runningAnimations) {
if ((animation->getRole() == role) && (animation->getFadePerSecond() >= 0.0f)) { // Don't count those being faded out
return true;
}
}
return false;
}
void Rig::deleteAnimations() {
for (auto animation : _animationHandles) {
@ -143,8 +182,24 @@ void Rig::deleteAnimations() {
_animationHandles.clear();
}
float Rig::initJointStates(QVector<JointState> states, glm::mat4 parentTransform) {
float Rig::initJointStates(QVector<JointState> states, glm::mat4 parentTransform,
int rootJointIndex,
int leftHandJointIndex,
int leftElbowJointIndex,
int leftShoulderJointIndex,
int rightHandJointIndex,
int rightElbowJointIndex,
int rightShoulderJointIndex) {
_jointStates = states;
_rootJointIndex = rootJointIndex;
_leftHandJointIndex = leftHandJointIndex;
_leftElbowJointIndex = leftElbowJointIndex;
_leftShoulderJointIndex = leftShoulderJointIndex;
_rightHandJointIndex = rightHandJointIndex;
_rightElbowJointIndex = rightElbowJointIndex;
_rightShoulderJointIndex = rightShoulderJointIndex;
initJointTransforms(parentTransform);
int numStates = _jointStates.size();
@ -356,47 +411,83 @@ glm::mat4 Rig::getJointVisibleTransform(int jointIndex) const {
}
void Rig::computeMotionAnimationState(float deltaTime, const glm::vec3& worldPosition, const glm::vec3& worldVelocity, const glm::quat& worldRotation) {
if (_enableRig) {
glm::vec3 front = worldRotation * IDENTITY_FRONT;
float forwardSpeed = glm::dot(worldVelocity, front);
float rotationalSpeed = glm::angle(front, _lastFront) / deltaTime;
bool isWalking = std::abs(forwardSpeed) > 0.01f;
bool isTurning = std::abs(rotationalSpeed) > 0.5f;
// Crude, until we have blending:
isTurning = isTurning && !isWalking; // Only one of walk/turn, walk wins.
isTurning = false; // FIXME
bool isIdle = !isWalking && !isTurning;
auto singleRole = [](bool walking, bool turning, bool idling) {
return walking ? "walk" : (turning ? "turn" : (idling ? "idle" : ""));
};
QString toStop = singleRole(_isWalking && !isWalking, _isTurning && !isTurning, _isIdle && !isIdle);
if (!toStop.isEmpty()) {
//qCDebug(animation) << "isTurning" << isTurning << "fronts" << front << _lastFront << glm::angle(front, _lastFront) << rotationalSpeed;
stopAnimationByRole(toStop);
}
QString newRole = singleRole(isWalking && !_isWalking, isTurning && !_isTurning, isIdle && !_isIdle);
if (!newRole.isEmpty()) {
startAnimationByRole(newRole);
qCDebug(animation) << deltaTime << ":" << worldVelocity << "." << front << "=> " << forwardSpeed << newRole;
}
_lastPosition = worldPosition;
_lastFront = front;
_isWalking = isWalking;
_isTurning = isTurning;
_isIdle = isIdle;
if (!_enableRig) {
return;
}
bool isMoving = false;
glm::vec3 front = worldRotation * IDENTITY_FRONT;
float forwardSpeed = glm::dot(worldVelocity, front);
float rightLateralSpeed = glm::dot(worldVelocity, worldRotation * IDENTITY_RIGHT);
float rightTurningSpeed = glm::orientedAngle(front, _lastFront, IDENTITY_UP) / deltaTime;
auto updateRole = [&](const QString& role, bool isOn) {
isMoving = isMoving || isOn;
if (isOn) {
if (!isRunningRole(role)) {
qCDebug(animation) << "Rig STARTING" << role;
startAnimationByRole(role);
}
} else {
if (isRunningRole(role)) {
qCDebug(animation) << "Rig stopping" << role;
stopAnimationByRole(role);
}
}
};
updateRole("walk", std::abs(forwardSpeed) > 0.01f);
bool isTurning = std::abs(rightTurningSpeed) > 0.5f;
updateRole("rightTurn", isTurning && (rightTurningSpeed > 0));
updateRole("leftTurn", isTurning && (rightTurningSpeed < 0));
bool isStrafing = std::abs(rightLateralSpeed) > 0.01f;
updateRole("rightStrafe", isStrafing && (rightLateralSpeed > 0.0f));
updateRole("leftStrafe", isStrafing && (rightLateralSpeed < 0.0f));
updateRole("idle", !isMoving); // Must be last, as it makes isMoving bogus.
_lastFront = front;
_lastPosition = worldPosition;
}
void Rig::updateAnimations(float deltaTime, glm::mat4 parentTransform) {
int nAnimationsSoFar = 0;
// First normalize the fades so that they sum to 1.0.
// update the fade data in each animation (not normalized as they are an independent propert of animation)
foreach (const AnimationHandlePointer& handle, _runningAnimations) {
handle->setMix(1.0f / ++nAnimationsSoFar);
handle->setPriority(1.0);
float fadePerSecond = handle->getFadePerSecond();
float fade = handle->getFade();
if (fadePerSecond != 0.0f) {
fade += fadePerSecond * deltaTime;
if ((0.0f >= fade) || (fade >= 1.0f)) {
fade = glm::clamp(fade, 0.0f, 1.0f);
handle->setFadePerSecond(0.0f);
}
handle->setFade(fade);
if (fade <= 0.0f) { // stop any finished animations now
handle->setRunning(false, false); // but do not restore joints as it causes a flicker
}
}
}
// sum the remaining fade data
float fadeTotal = 0.0f;
foreach (const AnimationHandlePointer& handle, _runningAnimations) {
fadeTotal += handle->getFade();
}
float fadeSumSoFar = 0.0f;
foreach (const AnimationHandlePointer& handle, _runningAnimations) {
handle->setPriority(1.0f);
float normalizedFade = handle->getFade() / fadeTotal;
// simulate() will blend each animation result into the result so far, based on the pairwise mix at at each step.
// i.e., slerp the 'mix' distance from the result so far towards this iteration's animation result.
// The formula here for mix is based on the idea that, at each step:
// fadeSum is to normalizedFade, as (1 - mix) is to mix
// i.e., fadeSumSoFar/normalizedFade = (1 - mix)/mix
// Then we solve for mix.
// Sanity check: For the first animation, fadeSum = 0, and the mix will always be 1.
// Sanity check: For equal blending, the formula is equivalent to mix = 1 / nAnimationsSoFar++
float mix = 1.0f / ((fadeSumSoFar / normalizedFade) + 1.0f);
assert((0.0f <= mix) && (mix <= 1.0f));
fadeSumSoFar += normalizedFade;
handle->setMix(mix);
handle->simulate(deltaTime);
}
for (int i = 0; i < _jointStates.size(); i++) {
updateJointState(i, parentTransform);
}

View file

@ -42,7 +42,6 @@
class AnimationHandle;
typedef std::shared_ptr<AnimationHandle> AnimationHandlePointer;
// typedef QWeakPointer<AnimationHandle> WeakAnimationHandlePointer;
class Rig;
typedef std::shared_ptr<Rig> RigPointer;
@ -76,6 +75,7 @@ public:
bool removeRunningAnimation(AnimationHandlePointer animationHandle);
void addRunningAnimation(AnimationHandlePointer animationHandle);
bool isRunningAnimation(AnimationHandlePointer animationHandle);
bool isRunningRole(const QString& role); // There can be multiple animations per role, so this is more general than isRunningAnimation.
const QList<AnimationHandlePointer>& getRunningAnimations() const { return _runningAnimations; }
void deleteAnimations();
const QList<AnimationHandlePointer>& getAnimationHandles() const { return _animationHandles; }
@ -86,11 +86,18 @@ public:
float priority = 1.0f, bool loop = false, bool hold = false, float firstFrame = 0.0f,
float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
void stopAnimationByRole(const QString& role);
void addAnimationByRole(const QString& role, const QString& url = QString(), float fps = 30.0f,
float priority = 1.0f, bool loop = false, bool hold = false, float firstFrame = 0.0f,
float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList(), bool startAutomatically = false);
AnimationHandlePointer addAnimationByRole(const QString& role, const QString& url = QString(), float fps = 30.0f,
float priority = 1.0f, bool loop = false, bool hold = false, float firstFrame = 0.0f,
float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList(), bool startAutomatically = false);
float initJointStates(QVector<JointState> states, glm::mat4 parentTransform);
float initJointStates(QVector<JointState> states, glm::mat4 parentTransform,
int rootJointIndex,
int leftHandJointIndex,
int leftElbowJointIndex,
int leftShoulderJointIndex,
int rightHandJointIndex,
int rightElbowJointIndex,
int rightShoulderJointIndex);
bool jointStatesEmpty() { return _jointStates.isEmpty(); };
int getJointStateCount() const { return _jointStates.size(); }
int indexOfJoint(const QString& jointName) ;
@ -149,6 +156,9 @@ public:
void updateFromHeadParameters(const HeadParameters& params);
virtual void setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation,
float scale, float priority) = 0;
protected:
void updateLeanJoint(int index, float leanSideways, float leanForward, float torsoTwist);
@ -156,16 +166,22 @@ public:
void updateEyeJoint(int index, const glm::quat& worldHeadOrientation, const glm::vec3& lookAt, const glm::vec3& saccade);
QVector<JointState> _jointStates;
int _rootJointIndex = -1;
int _leftHandJointIndex = -1;
int _leftElbowJointIndex = -1;
int _leftShoulderJointIndex = -1;
int _rightHandJointIndex = -1;
int _rightElbowJointIndex = -1;
int _rightShoulderJointIndex = -1;
QList<AnimationHandlePointer> _animationHandles;
QList<AnimationHandlePointer> _runningAnimations;
bool _enableRig;
bool _isWalking;
bool _isTurning;
bool _isIdle;
glm::vec3 _lastFront;
glm::vec3 _lastPosition;
};
};
#endif /* defined(__hifi__Rig__) */

View file

@ -46,7 +46,6 @@ typedef unsigned long long quint64;
#include <QtScript/QScriptable>
#include <QReadWriteLock>
#include <CollisionInfo.h>
#include <NLPacket.h>
#include <Node.h>
#include <RegisteredMetaTypes.h>
@ -257,10 +256,6 @@ public:
const HeadData* getHeadData() const { return _headData; }
const HandData* getHandData() const { return _handData; }
virtual bool findSphereCollisions(const glm::vec3& particleCenter, float particleRadius, CollisionList& collisions) {
return false;
}
bool hasIdentityChangedAfterParsing(NLPacket& packet);
QByteArray identityByteArray();

View file

@ -3,7 +3,7 @@ set(TARGET_NAME entities-renderer)
AUTOSCRIBE_SHADER_LIB(gpu model render)
# use setup_hifi_library macro to setup our project and link appropriate Qt modules
setup_hifi_library(Widgets OpenGL Network Script)
setup_hifi_library(Widgets Network Script)
add_dependency_external_projects(glm)
find_package(GLM REQUIRED)

View file

@ -46,13 +46,11 @@ void RenderableLineEntityItem::render(RenderArgs* args) {
transform.setTranslation(getPosition());
transform.setRotation(getRotation());
batch.setModelTransform(transform);
batch._glLineWidth(getLineWidth());
if (getLinePoints().size() > 1) {
DependencyManager::get<DeferredLightingEffect>()->bindSimpleProgram(batch);
DependencyManager::get<GeometryCache>()->renderVertices(batch, gpu::LINE_STRIP, _lineVerticesID);
}
batch._glLineWidth(1.0f);
RenderableDebugableEntityItem::render(this, args);
};

View file

@ -11,10 +11,12 @@
uniform sampler2D colorMap;
varying vec4 varColor;
varying vec2 varTexCoord;
in vec4 _color;
in vec2 _texCoord0;
out vec4 outFragColor;
void main(void) {
vec4 color = texture2D(colorMap, varTexCoord);
gl_FragColor = color * varColor;
vec4 color = texture(colorMap, _texCoord0);
outFragColor = color * _color;
}

View file

@ -10,19 +10,21 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec4 varColor;
varying vec2 varTexCoord;
out vec4 _color;
out vec2 _texCoord0;
void main(void) {
// pass along the color & uvs to fragment shader
varColor = gl_Color;
varTexCoord = gl_MultiTexCoord0.xy;
_color = inColor;
_texCoord0 = inTexCoord0.xy;
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, gl_Vertex, gl_Position)$>
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
}

View file

@ -9,8 +9,10 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
varying vec4 varColor;
in vec4 _color;
out vec4 outFragColor;
void main(void) {
gl_FragColor = varColor;
outFragColor = _color;
}

View file

@ -8,17 +8,19 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec4 varColor;
out vec4 _color;
void main(void) {
// pass along the diffuse color
varColor = gl_Color;
_color = inColor;
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, gl_Vertex, gl_Position)$>
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
}

View file

@ -18,7 +18,6 @@
#include <glm/glm.hpp>
#include <AnimationCache.h> // for Animation, AnimationCache, and AnimationPointer classes
#include <CollisionInfo.h>
#include <Octree.h> // for EncodeBitstreamParams class
#include <OctreeElement.h> // for OctreeElement::AppendState
#include <OctreePacketData.h>

View file

@ -71,7 +71,8 @@ CONSTRUCT_PROPERTY(exponent, 0.0f),
CONSTRUCT_PROPERTY(cutoff, ENTITY_ITEM_DEFAULT_CUTOFF),
CONSTRUCT_PROPERTY(locked, ENTITY_ITEM_DEFAULT_LOCKED),
CONSTRUCT_PROPERTY(textures, ""),
CONSTRUCT_PROPERTY(animationSettings, ""),
CONSTRUCT_PROPERTY(animationSettings, "{\"firstFrame\":0,\"fps\":30,\"frameIndex\":0,\"hold\":false,"
"\"lastFrame\":100000,\"loop\":false,\"running\":false,\"startAutomatically\":false}"),
CONSTRUCT_PROPERTY(userData, ENTITY_ITEM_DEFAULT_USER_DATA),
CONSTRUCT_PROPERTY(simulationOwner, SimulationOwner()),
CONSTRUCT_PROPERTY(text, TextEntityItem::DEFAULT_TEXT),

View file

@ -16,7 +16,6 @@
#include <QtCore/QObject>
#include <CollisionInfo.h>
#include <DependencyManager.h>
#include <Octree.h>
#include <OctreeScriptingInterface.h>

View file

@ -15,7 +15,7 @@
#include <QDebug>
#include <ByteCountCoding.h>
#include <PlaneShape.h>
#include <GeometryUtil.h>
#include "EntityTree.h"
#include "EntityTreeElement.h"
@ -128,46 +128,13 @@ void TextEntityItem::appendSubclassData(OctreePacketData* packetData, EncodeBits
}
bool TextEntityItem::findDetailedRayIntersection(const glm::vec3& origin, const glm::vec3& direction,
bool& keepSearching, OctreeElement*& element, float& distance, BoxFace& face,
void** intersectedObject, bool precisionPicking) const {
RayIntersectionInfo rayInfo;
rayInfo._rayStart = origin;
rayInfo._rayDirection = direction;
rayInfo._rayLength = std::numeric_limits<float>::max();
PlaneShape plane;
const glm::vec3 UNROTATED_NORMAL(0.0f, 0.0f, -1.0f);
glm::vec3 normal = getRotation() * UNROTATED_NORMAL;
plane.setNormal(normal);
plane.setPoint(getPosition()); // the position is definitely a point on our plane
bool intersects = plane.findRayIntersection(rayInfo);
if (intersects) {
glm::vec3 hitAt = origin + (direction * rayInfo._hitDistance);
// now we know the point the ray hit our plane
glm::mat4 rotation = glm::mat4_cast(getRotation());
glm::mat4 translation = glm::translate(getPosition());
glm::mat4 entityToWorldMatrix = translation * rotation;
glm::mat4 worldToEntityMatrix = glm::inverse(entityToWorldMatrix);
glm::vec3 dimensions = getDimensions();
glm::vec3 registrationPoint = getRegistrationPoint();
glm::vec3 corner = -(dimensions * registrationPoint);
AABox entityFrameBox(corner, dimensions);
glm::vec3 entityFrameHitAt = glm::vec3(worldToEntityMatrix * glm::vec4(hitAt, 1.0f));
intersects = entityFrameBox.contains(entityFrameHitAt);
}
if (intersects) {
distance = rayInfo._hitDistance;
}
return intersects;
glm::vec3 dimensions = getDimensions();
glm::vec2 xyDimensions(dimensions.x, dimensions.y);
glm::quat rotation = getRotation();
glm::vec3 position = getPosition() + rotation *
(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT));
return findRayRectangleIntersection(origin, direction, rotation, position, xyDimensions, distance);
}

View file

@ -13,7 +13,7 @@
#include <QDebug>
#include <ByteCountCoding.h>
#include <PlaneShape.h>
#include <GeometryUtil.h>
#include "EntityTree.h"
#include "EntityTreeElement.h"
@ -98,50 +98,17 @@ void WebEntityItem::appendSubclassData(OctreePacketData* packetData, EncodeBitst
APPEND_ENTITY_PROPERTY(PROP_SOURCE_URL, _sourceUrl);
}
bool WebEntityItem::findDetailedRayIntersection(const glm::vec3& origin, const glm::vec3& direction,
bool& keepSearching, OctreeElement*& element, float& distance, BoxFace& face,
void** intersectedObject, bool precisionPicking) const {
RayIntersectionInfo rayInfo;
rayInfo._rayStart = origin;
rayInfo._rayDirection = direction;
rayInfo._rayLength = std::numeric_limits<float>::max();
PlaneShape plane;
const glm::vec3 UNROTATED_NORMAL(0.0f, 0.0f, -1.0f);
glm::vec3 normal = getRotation() * UNROTATED_NORMAL;
plane.setNormal(normal);
plane.setPoint(getPosition()); // the position is definitely a point on our plane
bool intersects = plane.findRayIntersection(rayInfo);
if (intersects) {
glm::vec3 hitAt = origin + (direction * rayInfo._hitDistance);
// now we know the point the ray hit our plane
glm::mat4 rotation = glm::mat4_cast(getRotation());
glm::mat4 translation = glm::translate(getPosition());
glm::mat4 entityToWorldMatrix = translation * rotation;
glm::mat4 worldToEntityMatrix = glm::inverse(entityToWorldMatrix);
glm::vec3 dimensions = getDimensions();
glm::vec3 registrationPoint = getRegistrationPoint();
glm::vec3 corner = -(dimensions * registrationPoint);
AABox entityFrameBox(corner, dimensions);
glm::vec3 entityFrameHitAt = glm::vec3(worldToEntityMatrix * glm::vec4(hitAt, 1.0f));
intersects = entityFrameBox.contains(entityFrameHitAt);
}
if (intersects) {
distance = rayInfo._hitDistance;
}
return intersects;
glm::vec3 dimensions = getDimensions();
glm::vec2 xyDimensions(dimensions.x, dimensions.y);
glm::quat rotation = getRotation();
glm::vec3 position = getPosition() + rotation *
(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT));
return findRayRectangleIntersection(origin, direction, rotation, position, xyDimensions, distance);
}
void WebEntityItem::setSourceUrl(const QString& value) {
if (_sourceUrl != value) {
_sourceUrl = value;

View file

@ -28,7 +28,6 @@
#include <GLMHelpers.h>
#include <NumericalConstants.h>
#include <OctalCode.h>
#include <Shape.h>
#include <gpu/Format.h>
#include <LogHandler.h>

View file

@ -23,7 +23,6 @@
#include <NetworkAccessManager.h>
#include "FBXReader.h"
#include "OBJReader.h"
#include "Shape.h"
#include "ModelFormatLogging.h"

View file

@ -7,17 +7,18 @@ setup_hifi_library()
link_hifi_libraries(shared)
add_dependency_external_projects(glew)
find_package(GLEW REQUIRED)
add_definitions(-DGLEW_STATIC)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARY})
if (APPLE)
# link in required OS X frameworks and include the right GL headers
find_library(OpenGL OpenGL)
target_link_libraries(${TARGET_NAME} ${OpenGL})
elseif (WIN32)
add_dependency_external_projects(glew)
find_package(GLEW REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARIES} opengl32.lib)
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARY} opengl32.lib)
if (USE_NSIGHT)
# try to find the Nsight package and add it to the build if we find it
@ -32,8 +33,6 @@ elseif (WIN32)
elseif (ANDROID)
target_link_libraries(${TARGET_NAME} "-lGLESv3" "-lEGL")
else ()
find_package(GLEW REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
find_package(OpenGL REQUIRED)
@ -41,6 +40,5 @@ else ()
include_directories(SYSTEM "${OPENGL_INCLUDE_DIR}")
endif ()
target_link_libraries(${TARGET_NAME} "${GLEW_LIBRARIES}" "${OPENGL_LIBRARY}")
target_link_libraries(${TARGET_NAME} "${OPENGL_LIBRARY}")
endif (APPLE)

View file

@ -106,7 +106,10 @@ public:
void clearStencilFramebuffer(int stencil, bool enableScissor = false); // not a command, just a shortcut for clearFramebuffer, it touches only stencil target
void clearDepthStencilFramebuffer(float depth, int stencil, bool enableScissor = false); // not a command, just a shortcut for clearFramebuffer, it touches depth and stencil target
void blit(const FramebufferPointer& src, const Vec4i& srcViewport, const FramebufferPointer& dst, const Vec4i& dstViewport);
// Blit src framebuffer to destination
// the srcRect and dstRect are the rect region in source and destination framebuffers expressed in pixel space
// with xy and zw the bounding corners of the rect region.
void blit(const FramebufferPointer& src, const Vec4i& srcRect, const FramebufferPointer& dst, const Vec4i& dstRect);
// Query Section
void beginQuery(const QueryPointer& query);
@ -134,7 +137,6 @@ public:
void _glUniformMatrix4fv(int location, int count, unsigned char transpose, const float* value);
void _glColor4f(float red, float green, float blue, float alpha);
void _glLineWidth(float width);
enum Command {
COMMAND_draw = 0,
@ -183,7 +185,6 @@ public:
COMMAND_glUniformMatrix4fv,
COMMAND_glColor4f,
COMMAND_glLineWidth,
NUM_COMMANDS,
};

View file

@ -13,17 +13,11 @@
<@if GLPROFILE == PC_GL @>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def GPU_TRANSFORM_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 430 compatibility@>
<@def VERSION_HEADER #version 410 core@>
<@elif GLPROFILE == MAC_GL @>
<@def GPU_FEATURE_PROFILE GPU_LEGACY@>
<@def GPU_TRANSFORM_PROFILE GPU_LEGACY@>
<@def VERSION_HEADER #version 120
#extension GL_EXT_gpu_shader4 : enable@>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 410 core@>
<@else@>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def GPU_TRANSFORM_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 430 compatibility@>
<@endif@>
<@def VERSION_HEADER #version 410 core@>
<@endif@>

View file

@ -11,12 +11,13 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D colorMap;
uniform vec4 color;
varying vec2 varTexcoord;
in vec2 varTexCoord0;
out vec4 outFragColor;
void main(void) {
gl_FragColor = texture2D(colorMap, varTexcoord) * color;
outFragColor = texture(colorMap, varTexCoord0) * color;
}

View file

@ -13,27 +13,27 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Transform.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
uniform vec4 texcoordRect;
varying vec2 varTexcoord;
out vec2 varTexCoord0;
void main(void) {
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, 0.0, 1.0),
vec4(1.0, -1.0, 0.0, 1.0),
vec4(-1.0, 1.0, 0.0, 1.0),
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, 0.0, 1.0),
vec4(1.0, -1.0, 0.0, 1.0),
vec4(-1.0, 1.0, 0.0, 1.0),
vec4(1.0, 1.0, 0.0, 1.0)
);
vec4 pos = UNIT_QUAD[gl_VertexID];
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
varTexcoord = ((pos.xy + 1) * 0.5) * texcoordRect.zw + texcoordRect.xy;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
varTexCoord0 = ((pos.xy + 1) * 0.5) * texcoordRect.zw + texcoordRect.xy;
}

View file

@ -11,11 +11,12 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D colorMap;
varying vec2 varTexcoord;
in vec2 varTexCoord0;
out vec4 outFragColor;
void main(void) {
gl_FragColor = texture2D(colorMap, varTexcoord);
outFragColor = texture(colorMap, varTexCoord0);
}

View file

@ -12,11 +12,12 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D colorMap;
varying vec2 varTexcoord;
in vec2 varTexCoord0;
out vec4 outFragColor;
void main(void) {
gl_FragColor = vec4(texture2D(colorMap, varTexcoord).xyz, 1.0);
outFragColor = vec4(texture(colorMap, varTexCoord0).xyz, 1.0);
}

View file

@ -12,25 +12,25 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Transform.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec2 varTexcoord;
out vec2 varTexCoord0;
void main(void) {
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, 0.0, 1.0),
vec4(1.0, -1.0, 0.0, 1.0),
vec4(-1.0, 1.0, 0.0, 1.0),
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, 0.0, 1.0),
vec4(1.0, -1.0, 0.0, 1.0),
vec4(-1.0, 1.0, 0.0, 1.0),
vec4(1.0, 1.0, 0.0, 1.0)
);
vec4 pos = UNIT_QUAD[gl_VertexID];
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
varTexcoord = (pos.xy + 1) * 0.5;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
varTexCoord0 = (pos.xy + 1) * 0.5;
}

View file

@ -16,7 +16,7 @@
<$declareStandardTransform()$>
varying vec2 varTexcoord;
out vec2 varTexCoord0;
void main(void) {
const vec4 UNIT_QUAD[4] = vec4[4](
@ -34,5 +34,5 @@ void main(void) {
<$transformModelToWorldPos(obj, tc, tc)$>
gl_Position = pos;
varTexcoord = tc.xy;
varTexCoord0 = tc.xy;
}

View file

@ -88,7 +88,26 @@ static const int TYPE_SIZE[NUM_TYPES] = {
1,
1
};
// Array answering the question Does this type is integer or not
static const bool TYPE_IS_INTEGER[NUM_TYPES] = {
false,
true,
true,
false,
true,
true,
true,
true,
false,
true,
true,
false,
true,
true,
true,
true
};
// Dimension of an Element
enum Dimension {
@ -168,6 +187,7 @@ public:
Type getType() const { return (Type)_type; }
bool isNormalized() const { return (getType() >= NFLOAT); }
bool isInteger() const { return TYPE_IS_INTEGER[getType()]; }
uint32 getSize() const { return DIMENSION_COUNT[_dimension] * TYPE_SIZE[_type]; }

View file

@ -60,7 +60,6 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::GLBackend::do_glUniformMatrix4fv),
(&::gpu::GLBackend::do_glColor4f),
(&::gpu::GLBackend::do_glLineWidth),
};
void GLBackend::init() {
@ -74,14 +73,16 @@ void GLBackend::init() {
qCDebug(gpulogging) << "GL Renderer: " << QString((const char*) glGetString(GL_RENDERER));
#ifdef WIN32
glewExperimental = true;
GLenum err = glewInit();
glGetError();
if (GLEW_OK != err) {
/* Problem: glewInit failed, something is seriously wrong. */
qCDebug(gpulogging, "Error: %s\n", glewGetErrorString(err));
}
qCDebug(gpulogging, "Status: Using GLEW %s\n", glewGetString(GLEW_VERSION));
#if defined(Q_OS_WIN)
if (wglewGetExtension("WGL_EXT_swap_control")) {
int swapInterval = wglGetSwapIntervalEXT();
qCDebug(gpulogging, "V-Sync is %s\n", (swapInterval > 0 ? "ON" : "OFF"));
@ -89,13 +90,6 @@ void GLBackend::init() {
#endif
#if defined(Q_OS_LINUX)
GLenum err = glewInit();
if (GLEW_OK != err) {
/* Problem: glewInit failed, something is seriously wrong. */
qCDebug(gpulogging, "Error: %s\n", glewGetErrorString(err));
}
qCDebug(gpulogging, "Status: Using GLEW %s\n", glewGetString(GLEW_VERSION));
// TODO: Write the correct code for Linux...
/* if (wglewGetExtension("WGL_EXT_swap_control")) {
int swapInterval = wglGetSwapIntervalEXT();
@ -200,7 +194,6 @@ void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
GLenum mode = _primitiveToGLmode[primitiveType];
uint32 numVertices = batch._params[paramOffset + 1]._uint;
uint32 startVertex = batch._params[paramOffset + 0]._uint;
glDrawArrays(mode, startVertex, numVertices);
(void) CHECK_GL_ERROR();
}
@ -494,22 +487,11 @@ void Batch::_glColor4f(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha)
DO_IT_NOW(_glColor4f, 4);
}
void GLBackend::do_glColor4f(Batch& batch, uint32 paramOffset) {
glColor4f(
// TODO Replace this with a proper sticky Input attribute buffer with frequency 0
glVertexAttrib4f( gpu::Stream::COLOR,
batch._params[paramOffset + 3]._float,
batch._params[paramOffset + 2]._float,
batch._params[paramOffset + 1]._float,
batch._params[paramOffset + 0]._float);
(void) CHECK_GL_ERROR();
}
void Batch::_glLineWidth(GLfloat width) {
ADD_COMMAND_GL(glLineWidth);
_params.push_back(width);
DO_IT_NOW(_glLineWidth, 1);
}
void GLBackend::do_glLineWidth(Batch& batch, uint32 paramOffset) {
glLineWidth(batch._params[paramOffset]._float);
(void) CHECK_GL_ERROR();
}

View file

@ -93,13 +93,6 @@ public:
GLint _transformCameraSlot = -1;
GLint _transformObjectSlot = -1;
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
GLint _transformObject_model = -1;
GLint _transformCamera_viewInverse = -1;
GLint _transformCamera_viewport = -1;
#endif
GLShader();
~GLShader();
};
@ -327,8 +320,6 @@ protected:
bool _invalidProj;
bool _invalidViewport;
GLenum _lastMode;
TransformStageState() :
_transformObjectBuffer(0),
_transformCameraBuffer(0),
@ -339,14 +330,13 @@ protected:
_invalidModel(true),
_invalidView(true),
_invalidProj(false),
_invalidViewport(false),
_lastMode(GL_TEXTURE) {}
_invalidViewport(false) {}
} _transform;
// Uniform Stage
void do_setUniformBuffer(Batch& batch, uint32 paramOffset);
void releaseUniformBuffer(int slot);
void releaseUniformBuffer(uint32_t slot);
void resetUniformStage();
struct UniformStageState {
Buffers _buffers;
@ -359,7 +349,7 @@ protected:
// Resource Stage
void do_setResourceTexture(Batch& batch, uint32 paramOffset);
void releaseResourceTexture(int slot);
void releaseResourceTexture(uint32_t slot);
void resetResourceStage();
struct ResourceStageState {
Textures _textures;
@ -392,13 +382,6 @@ protected:
GLuint _program;
bool _invalidProgram;
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
GLint _program_transformObject_model = -1;
GLint _program_transformCamera_viewInverse = -1;
GLint _program_transformCamera_viewport = -1;
#endif
State::Data _stateCache;
State::Signature _stateSignatureCache;
@ -462,7 +445,6 @@ protected:
void do_glUniformMatrix4fv(Batch& batch, uint32 paramOffset);
void do_glColor4f(Batch& batch, uint32 paramOffset);
void do_glLineWidth(Batch& batch, uint32 paramOffset);
typedef void (GLBackend::*CommandCall)(Batch&, uint32);
static CommandCall _commandCalls[Batch::NUM_COMMANDS];

View file

@ -57,66 +57,41 @@ void GLBackend::do_setInputBuffer(Batch& batch, uint32 paramOffset) {
}
}
#define NOT_SUPPORT_VAO
#if defined(SUPPORT_VAO)
#if (GPU_INPUT_PROFILE == GPU_CORE_41)
#define NO_SUPPORT_VERTEX_ATTRIB_FORMAT
#else
#define SUPPORT_VERTEX_ATTRIB_FORMAT
#endif
#define SUPPORT_LEGACY_OPENGL
#if defined(SUPPORT_LEGACY_OPENGL)
static const int NUM_CLASSIC_ATTRIBS = Stream::TANGENT;
static const GLenum attributeSlotToClassicAttribName[NUM_CLASSIC_ATTRIBS] = {
GL_VERTEX_ARRAY,
GL_NORMAL_ARRAY,
GL_COLOR_ARRAY,
GL_TEXTURE_COORD_ARRAY
};
#endif
#endif
void GLBackend::initInput() {
#if defined(SUPPORT_VAO)
if(!_input._defaultVAO) {
glGenVertexArrays(1, &_input._defaultVAO);
}
glBindVertexArray(_input._defaultVAO);
(void) CHECK_GL_ERROR();
#endif
}
void GLBackend::killInput() {
#if defined(SUPPORT_VAO)
glBindVertexArray(0);
if(_input._defaultVAO) {
glDeleteVertexArrays(1, &_input._defaultVAO);
}
(void) CHECK_GL_ERROR();
#endif
}
void GLBackend::syncInputStateCache() {
#if defined(SUPPORT_VAO)
for (int i = 0; i < NUM_CLASSIC_ATTRIBS; i++) {
_input._attributeActivation[i] = glIsEnabled(attributeSlotToClassicAttribName[i]);
}
//_input._defaultVAO
glBindVertexArray(_input._defaultVAO);
#else
size_t i = 0;
#if defined(SUPPORT_LEGACY_OPENGL)
for (; i < NUM_CLASSIC_ATTRIBS; i++) {
_input._attributeActivation[i] = glIsEnabled(attributeSlotToClassicAttribName[i]);
}
#endif
for (; i < _input._attributeActivation.size(); i++) {
for (uint32_t i = 0; i < _input._attributeActivation.size(); i++) {
GLint active = 0;
glGetVertexAttribiv(i, GL_VERTEX_ATTRIB_ARRAY_ENABLED, &active);
_input._attributeActivation[i] = active;
}
#endif
//_input._defaultVAO
glBindVertexArray(_input._defaultVAO);
}
void GLBackend::updateInput() {
#if defined(SUPPORT_VAO)
#if defined(SUPPORT_VERTEX_ATTRIB_FORMAT)
if (_input._invalidFormat) {
InputStageState::ActivationCache newActivation;
@ -193,21 +168,11 @@ void GLBackend::updateInput() {
for (unsigned int i = 0; i < newActivation.size(); i++) {
bool newState = newActivation[i];
if (newState != _input._attributeActivation[i]) {
#if defined(SUPPORT_LEGACY_OPENGL)
if (i < NUM_CLASSIC_ATTRIBS) {
if (newState) {
glEnableClientState(attributeSlotToClassicAttribName[i]);
} else {
glDisableClientState(attributeSlotToClassicAttribName[i]);
}
} else
#endif
{
if (newState) {
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
if (newState) {
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
(void) CHECK_GL_ERROR();
@ -249,30 +214,13 @@ void GLBackend::updateInput() {
GLenum type = _elementTypeToGLType[attrib._element.getType()];
GLuint stride = strides[bufferNum];
GLuint pointer = attrib._offset + offsets[bufferNum];
#if defined(SUPPORT_LEGACY_OPENGL)
const bool useClientState = slot < NUM_CLASSIC_ATTRIBS;
if (useClientState) {
switch (slot) {
case Stream::POSITION:
glVertexPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::NORMAL:
glNormalPointer(type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::COLOR:
glColorPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::TEXCOORD:
glTexCoordPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
};
} else
#endif
{
GLboolean isNormalized = attrib._element.isNormalized();
glVertexAttribPointer(slot, count, type, isNormalized, stride,
GLboolean isNormalized = attrib._element.isNormalized();
glVertexAttribPointer(slot, count, type, isNormalized, stride,
reinterpret_cast<GLvoid*>(pointer));
}
// TODO: Support properly the IAttrib version
(void) CHECK_GL_ERROR();
}
}
@ -293,35 +241,20 @@ void GLBackend::resetInputStage() {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
(void) CHECK_GL_ERROR();
#if defined(SUPPORT_VAO)
// TODO
#else
glBindBuffer(GL_ARRAY_BUFFER, 0);
size_t i = 0;
#if defined(SUPPORT_LEGACY_OPENGL)
for (; i < NUM_CLASSIC_ATTRIBS; i++) {
glDisableClientState(attributeSlotToClassicAttribName[i]);
}
glVertexPointer(4, GL_FLOAT, 0, 0);
glNormalPointer(GL_FLOAT, 0, 0);
glColorPointer(4, GL_FLOAT, 0, 0);
glTexCoordPointer(4, GL_FLOAT, 0, 0);
#endif
for (; i < _input._attributeActivation.size(); i++) {
for (uint32_t i = 0; i < _input._attributeActivation.size(); i++) {
glDisableVertexAttribArray(i);
glVertexAttribPointer(i, 4, GL_FLOAT, GL_FALSE, 0, 0);
}
#endif
// Reset vertex buffer and format
_input._format.reset();
_input._invalidFormat = false;
_input._attributeActivation.reset();
for (int i = 0; i < _input._buffers.size(); i++) {
for (uint32_t i = 0; i < _input._buffers.size(); i++) {
_input._buffers[i].reset();
_input._bufferOffsets[i] = 0;
_input._bufferStrides[i] = 0;

View file

@ -71,13 +71,6 @@ void GLBackend::do_setPipeline(Batch& batch, uint32 paramOffset) {
_pipeline._program = 0;
_pipeline._invalidProgram = true;
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
_pipeline._program_transformObject_model = -1;
_pipeline._program_transformCamera_viewInverse = -1;
_pipeline._program_transformCamera_viewport = -1;
#endif
_pipeline._state = nullptr;
_pipeline._invalidState = true;
} else {
@ -90,13 +83,6 @@ void GLBackend::do_setPipeline(Batch& batch, uint32 paramOffset) {
if (_pipeline._program != pipelineObject->_program->_program) {
_pipeline._program = pipelineObject->_program->_program;
_pipeline._invalidProgram = true;
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
_pipeline._program_transformObject_model = pipelineObject->_program->_transformObject_model;
_pipeline._program_transformCamera_viewInverse = pipelineObject->_program->_transformCamera_viewInverse;
_pipeline._program_transformCamera_viewport = pipelineObject->_program->_transformCamera_viewport;
#endif
}
// Now for the state
@ -144,24 +130,6 @@ void GLBackend::updatePipeline() {
}
_pipeline._invalidState = false;
}
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
// If shader program needs the model we need to provide it
if (_pipeline._program_transformObject_model >= 0) {
glUniformMatrix4fv(_pipeline._program_transformObject_model, 1, false, (const GLfloat*) &_transform._transformObject._model);
}
// If shader program needs the inverseView we need to provide it
if (_pipeline._program_transformCamera_viewInverse >= 0) {
glUniformMatrix4fv(_pipeline._program_transformCamera_viewInverse, 1, false, (const GLfloat*) &_transform._transformCamera._viewInverse);
}
// If shader program needs the viewport we need to provide it
if (_pipeline._program_transformCamera_viewport >= 0) {
glUniform4fv(_pipeline._program_transformCamera_viewport, 1, (const GLfloat*) &_transform._transformCamera._viewport);
}
#endif
}
void GLBackend::resetPipelineStage() {
@ -179,13 +147,12 @@ void GLBackend::resetPipelineStage() {
}
void GLBackend::releaseUniformBuffer(int slot) {
void GLBackend::releaseUniformBuffer(uint32_t slot) {
#if (GPU_FEATURE_PROFILE == GPU_CORE)
auto& buf = _uniform._buffers[slot];
if (buf) {
auto* object = Backend::getGPUObject<GLBackend::GLBuffer>(*buf);
if (object) {
GLuint bo = object->_buffer;
glBindBufferBase(GL_UNIFORM_BUFFER, slot, 0); // RELEASE
(void) CHECK_GL_ERROR();
@ -196,7 +163,7 @@ void GLBackend::releaseUniformBuffer(int slot) {
}
void GLBackend::resetUniformStage() {
for (int i = 0; i < _uniform._buffers.size(); i++) {
for (uint32_t i = 0; i < _uniform._buffers.size(); i++) {
releaseUniformBuffer(i);
}
}
@ -249,12 +216,11 @@ void GLBackend::do_setUniformBuffer(Batch& batch, uint32 paramOffset) {
#endif
}
void GLBackend::releaseResourceTexture(int slot) {
void GLBackend::releaseResourceTexture(uint32_t slot) {
auto& tex = _resource._textures[slot];
if (tex) {
auto* object = Backend::getGPUObject<GLBackend::GLTexture>(*tex);
if (object) {
GLuint to = object->_texture;
GLuint target = object->_target;
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(target, 0); // RELEASE
@ -266,7 +232,7 @@ void GLBackend::releaseResourceTexture(int slot) {
}
void GLBackend::resetResourceStage() {
for (int i = 0; i < _resource._textures.size(); i++) {
for (uint32_t i = 0; i < _resource._textures.size(); i++) {
releaseResourceTexture(i);
}
}

View file

@ -35,71 +35,44 @@ void makeBindings(GLBackend::GLShader* shader) {
GLint loc = -1;
//Check for gpu specific attribute slotBindings
loc = glGetAttribLocation(glprogram, "position");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::POSITION, "position");
loc = glGetAttribLocation(glprogram, "inPosition");
if (loc >= 0 && loc != gpu::Stream::POSITION) {
glBindAttribLocation(glprogram, gpu::Stream::POSITION, "inPosition");
}
loc = glGetAttribLocation(glprogram, "attribPosition");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::POSITION, "attribPosition");
loc = glGetAttribLocation(glprogram, "inNormal");
if (loc >= 0 && loc != gpu::Stream::NORMAL) {
glBindAttribLocation(glprogram, gpu::Stream::NORMAL, "inNormal");
}
//Check for gpu specific attribute slotBindings
loc = glGetAttribLocation(glprogram, "gl_Vertex");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::POSITION, "gl_Vertex");
loc = glGetAttribLocation(glprogram, "inColor");
if (loc >= 0 && loc != gpu::Stream::COLOR) {
glBindAttribLocation(glprogram, gpu::Stream::COLOR, "inColor");
}
loc = glGetAttribLocation(glprogram, "normal");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::NORMAL, "normal");
}
loc = glGetAttribLocation(glprogram, "attribNormal");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::NORMAL, "attribNormal");
loc = glGetAttribLocation(glprogram, "inTexCoord0");
if (loc >= 0 && loc != gpu::Stream::TEXCOORD) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD, "inTexCoord0");
}
loc = glGetAttribLocation(glprogram, "color");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::COLOR, "color");
}
loc = glGetAttribLocation(glprogram, "attribColor");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::COLOR, "attribColor");
loc = glGetAttribLocation(glprogram, "inTangent");
if (loc >= 0 && loc != gpu::Stream::TANGENT) {
glBindAttribLocation(glprogram, gpu::Stream::TANGENT, "inTangent");
}
loc = glGetAttribLocation(glprogram, "texcoord");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD, "texcoord");
}
loc = glGetAttribLocation(glprogram, "attribTexcoord");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD, "attribTexcoord");
}
loc = glGetAttribLocation(glprogram, "tangent");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TANGENT, "tangent");
loc = glGetAttribLocation(glprogram, "inTexCoord1");
if (loc >= 0 && loc != gpu::Stream::TEXCOORD1) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD1, "inTexCoord1");
}
loc = glGetAttribLocation(glprogram, "texcoord1");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD1, "texcoord1");
}
loc = glGetAttribLocation(glprogram, "attribTexcoord1");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD1, "texcoord1");
loc = glGetAttribLocation(glprogram, "inSkinClusterIndex");
if (loc >= 0 && loc != gpu::Stream::SKIN_CLUSTER_INDEX) {
glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_INDEX, "inSkinClusterIndex");
}
loc = glGetAttribLocation(glprogram, "clusterIndices");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_INDEX, "clusterIndices");
}
loc = glGetAttribLocation(glprogram, "clusterWeights");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_WEIGHT, "clusterWeights");
loc = glGetAttribLocation(glprogram, "inSkinClusterWeight");
if (loc >= 0 && loc != gpu::Stream::SKIN_CLUSTER_WEIGHT) {
glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_WEIGHT, "inSkinClusterWeight");
}
// Link again to take into account the assigned attrib location
@ -114,7 +87,6 @@ void makeBindings(GLBackend::GLShader* shader) {
// now assign the ubo binding, then DON't relink!
//Check for gpu specific uniform slotBindings
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
loc = glGetUniformBlockIndex(glprogram, "transformObjectBuffer");
if (loc >= 0) {
glUniformBlockBinding(glprogram, loc, gpu::TRANSFORM_OBJECT_SLOT);
@ -126,22 +98,6 @@ void makeBindings(GLBackend::GLShader* shader) {
glUniformBlockBinding(glprogram, loc, gpu::TRANSFORM_CAMERA_SLOT);
shader->_transformCameraSlot = gpu::TRANSFORM_CAMERA_SLOT;
}
#else
loc = glGetUniformLocation(glprogram, "transformObject_model");
if (loc >= 0) {
shader->_transformObject_model = loc;
}
loc = glGetUniformLocation(glprogram, "transformCamera_viewInverse");
if (loc >= 0) {
shader->_transformCamera_viewInverse = loc;
}
loc = glGetUniformLocation(glprogram, "transformCamera_viewport");
if (loc >= 0) {
shader->_transformCamera_viewport = loc;
}
#endif
}
GLBackend::GLShader* compileShader(const Shader& shader) {
@ -191,8 +147,8 @@ GLBackend::GLShader* compileShader(const Shader& shader) {
char* temp = new char[infoLength] ;
glGetShaderInfoLog(glshader, infoLength, NULL, temp);
qCDebug(gpulogging) << "GLShader::compileShader - failed to compile the gl shader object:";
qCDebug(gpulogging) << temp;
qCWarning(gpulogging) << "GLShader::compileShader - failed to compile the gl shader object:";
qCWarning(gpulogging) << temp;
/*
filestream.open("debugshader.glsl.info.txt");
@ -635,8 +591,6 @@ bool isUnusedSlot(GLint binding) {
int makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& buffers) {
GLint buffersCount = 0;
#if (GPU_FEATURE_PROFILE == GPU_CORE)
glGetProgramiv(glprogram, GL_ACTIVE_UNIFORM_BLOCKS, &buffersCount);
// fast exit
@ -689,7 +643,6 @@ int makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet& slotBindin
Element element(SCALAR, gpu::UINT32, gpu::UNIFORM_BUFFER);
buffers.insert(Shader::Slot(name, binding, element, Resource::BUFFER));
}
#endif
return buffersCount;
}
@ -750,11 +703,7 @@ bool GLBackend::makeProgram(Shader& shader, const Shader::BindingSet& slotBindin
Shader::SlotSet uniforms;
Shader::SlotSet textures;
Shader::SlotSet samplers;
#if (GPU_FEATURE_PROFILE == GPU_CORE)
makeUniformSlots(object->_program, slotBindings, uniforms, textures, samplers);
#else
makeUniformSlots(object->_program, slotBindings, uniforms, textures, samplers, buffers);
#endif
Shader::SlotSet inputs;
makeInputSlots(object->_program, slotBindings, inputs);

View file

@ -484,10 +484,14 @@ void GLBackend::syncPipelineStateCache() {
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
// Point size is always on
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
// FIXME CORE
//glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
glEnable(GL_PROGRAM_POINT_SIZE_EXT);
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
// Default line width accross the board
glLineWidth(1.0f);
getCurrentGLState(state);
State::Signature signature = State::evalSignature(state);
@ -583,10 +587,8 @@ void GLBackend::do_setStateMultisampleEnable(bool enable) {
void GLBackend::do_setStateAntialiasedLineEnable(bool enable) {
if (_pipeline._stateCache.antialisedLineEnable != enable) {
if (enable) {
glEnable(GL_POINT_SMOOTH);
glEnable(GL_LINE_SMOOTH);
} else {
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
}
(void) CHECK_GL_ERROR();

View file

@ -41,7 +41,6 @@ void GLBackend::do_setViewportTransform(Batch& batch, uint32 paramOffset) {
}
void GLBackend::initTransform() {
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
glGenBuffers(1, &_transform._transformObjectBuffer);
glGenBuffers(1, &_transform._transformCameraBuffer);
@ -51,18 +50,12 @@ void GLBackend::initTransform() {
glBindBuffer(GL_UNIFORM_BUFFER, _transform._transformCameraBuffer);
glBufferData(GL_UNIFORM_BUFFER, sizeof(_transform._transformCamera), (const void*) &_transform._transformCamera, GL_DYNAMIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
#else
#endif
}
void GLBackend::killTransform() {
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
glDeleteBuffers(1, &_transform._transformObjectBuffer);
glDeleteBuffers(1, &_transform._transformCameraBuffer);
#else
#endif
}
void GLBackend::syncTransformStateCache() {
@ -73,14 +66,7 @@ void GLBackend::syncTransformStateCache() {
glGetIntegerv(GL_VIEWPORT, (GLint*) &_transform._viewport);
GLint currentMode;
glGetIntegerv(GL_MATRIX_MODE, &currentMode);
_transform._lastMode = currentMode;
glGetFloatv(GL_PROJECTION_MATRIX, (float*) &_transform._projection);
Mat4 modelView;
glGetFloatv(GL_MODELVIEW_MATRIX, (float*) &modelView);
auto modelViewInv = glm::inverse(modelView);
_transform._view.evalFromRawMatrix(modelViewInv);
_transform._model.setIdentity();
@ -113,7 +99,8 @@ void GLBackend::updateTransform() {
_transform._transformCamera._projectionViewUntranslated = _transform._transformCamera._projection * viewUntranslated;
}
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
// TODO: WE need a ring buffer to do effcient dynamic updates here
// FOr now let's just do that bind and update sequence
if (_transform._invalidView || _transform._invalidProj || _transform._invalidViewport) {
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_CAMERA_SLOT, 0);
glBindBuffer(GL_ARRAY_BUFFER, _transform._transformCameraBuffer);
@ -133,52 +120,6 @@ void GLBackend::updateTransform() {
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_OBJECT_SLOT, _transform._transformObjectBuffer);
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_CAMERA_SLOT, _transform._transformCameraBuffer);
CHECK_GL_ERROR();
#endif
#if (GPU_TRANSFORM_PROFILE == GPU_LEGACY)
// Do it again for fixed pipeline until we can get rid of it
GLint originalMatrixMode;
glGetIntegerv(GL_MATRIX_MODE, &originalMatrixMode);
if (_transform._invalidProj) {
if (_transform._lastMode != GL_PROJECTION) {
glMatrixMode(GL_PROJECTION);
_transform._lastMode = GL_PROJECTION;
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&_transform._projection));
(void) CHECK_GL_ERROR();
}
if (_transform._invalidModel || _transform._invalidView) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
if (!_transform._model.isIdentity()) {
Transform::Mat4 modelView;
if (!_transform._view.isIdentity()) {
Transform mvx;
Transform::inverseMult(mvx, _transform._view, _transform._model);
mvx.getMatrix(modelView);
} else {
_transform._model.getMatrix(modelView);
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else if (!_transform._view.isIdentity()) {
Transform::Mat4 modelView;
_transform._view.getInverseMatrix(modelView);
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
glLoadIdentity();
}
(void) CHECK_GL_ERROR();
}
glMatrixMode(originalMatrixMode);
#endif
// Flags are clean
_transform._invalidView = _transform._invalidProj = _transform._invalidModel = _transform._invalidViewport = false;

View file

@ -18,18 +18,18 @@
#define GPU_LEGACY 0
#if defined(__APPLE__)
#include <OpenGL/gl.h>
#include <OpenGL/glext.h>
#define GPU_FEATURE_PROFILE GPU_LEGACY
#define GPU_TRANSFORM_PROFILE GPU_LEGACY
#include <GL/glew.h>
#define GPU_FEATURE_PROFILE GPU_CORE
#define GPU_INPUT_PROFILE GPU_CORE_41
#elif defined(WIN32)
#include <GL/glew.h>
#include <GL/wglew.h>
#define GPU_FEATURE_PROFILE GPU_CORE
#define GPU_TRANSFORM_PROFILE GPU_CORE
#define GPU_INPUT_PROFILE GPU_CORE_41
#elif defined(ANDROID)
@ -38,7 +38,7 @@
#include <GL/glew.h>
#define GPU_FEATURE_PROFILE GPU_CORE
#define GPU_TRANSFORM_PROFILE GPU_CORE
#define GPU_INPUT_PROFILE GPU_CORE_41
#endif

View file

@ -0,0 +1,21 @@
<!
// Input.slh
// interface/src
//
// Created by Bradley Austin Davis on 2015/06/19.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
!>
<@if not GPU_INPUTS_SLH@>
<@def GPU_INPUTS_SLH@>
in vec4 inPosition;
in vec4 inNormal;
in vec4 inColor;
in vec4 inTexCoord0;
in vec4 inTangent;
in vec4 inSkinClusterIndex;
in vec4 inSkinClusterWeight;
in vec4 inTexCoord1;
<@endif@>

View file

@ -14,7 +14,7 @@
using namespace gpu;
const Element Element::COLOR_RGBA_32 = Element(VEC4, UINT8, RGBA);
const Element Element::COLOR_RGBA_32 = Element(VEC4, NUINT8, RGBA);
const Element Element::VEC3F_XYZ = Element(VEC3, FLOAT, XYZ);
const Element Element::INDEX_UINT16 = Element(SCALAR, UINT16, INDEX);
const Element Element::PART_DRAWCALL = Element(VEC4, UINT32, PART);

View file

@ -260,7 +260,7 @@ public:
depthClampEnable(false),
scissorEnable(false),
multisampleEnable(false),
antialisedLineEnable(false),
antialisedLineEnable(true),
alphaToCoverageEnable(false)
{}
};

View file

@ -27,13 +27,17 @@ public:
// Possible input slots identifiers
enum InputSlot {
POSITION = 0,
NORMAL,
COLOR,
TEXCOORD,
TANGENT,
SKIN_CLUSTER_INDEX,
SKIN_CLUSTER_WEIGHT,
TEXCOORD1,
NORMAL = 1,
COLOR = 2,
TEXCOORD0 = 3,
TEXCOORD = TEXCOORD0,
TANGENT = 4,
SKIN_CLUSTER_INDEX = 5,
SKIN_CLUSTER_WEIGHT = 6,
TEXCOORD1 = 7,
INSTANCE_XFM = 8,
INSTANCE_SCALE = 9,
INSTANCE_TRANSLATE = 10,
NUM_INPUT_SLOTS,
};

View file

@ -25,7 +25,6 @@ struct TransformCamera {
vec4 _viewport;
};
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
uniform transformObjectBuffer {
TransformObject _object;
};
@ -39,83 +38,22 @@ uniform transformCameraBuffer {
TransformCamera getTransformCamera() {
return _camera;
}
<@else@>
//uniform vec4 transformObjectBuffer[8];
TransformObject getTransformObject() {
TransformObject object;
/* object._model[0] = transformObjectBuffer[0];
object._model[1] = transformObjectBuffer[1];
object._model[2] = transformObjectBuffer[2];
object._model[3] = transformObjectBuffer[3];
object._modelInverse[0] = transformObjectBuffer[4];
object._modelInverse[1] = transformObjectBuffer[5];
object._modelInverse[2] = transformObjectBuffer[6];
object._modelInverse[3] = transformObjectBuffer[7];
*/
return object;
}
//uniform vec4 transformCameraBuffer[17];
TransformCamera getTransformCamera() {
TransformCamera camera;
/* camera._view[0] = transformCameraBuffer[0];
camera._view[1] = transformCameraBuffer[1];
camera._view[2] = transformCameraBuffer[2];
camera._view[3] = transformCameraBuffer[3];
camera._viewInverse[0] = transformCameraBuffer[4];
camera._viewInverse[1] = transformCameraBuffer[5];
camera._viewInverse[2] = transformCameraBuffer[6];
camera._viewInverse[3] = transformCameraBuffer[7];
camera._projectionViewUntranslated[0] = transformCameraBuffer[8];
camera._projectionViewUntranslated[1] = transformCameraBuffer[9];
camera._projectionViewUntranslated[2] = transformCameraBuffer[10];
camera._projectionViewUntranslated[3] = transformCameraBuffer[11];
camera._projection[0] = transformCameraBuffer[12];
camera._projection[1] = transformCameraBuffer[13];
camera._projection[2] = transformCameraBuffer[14];
camera._projection[3] = transformCameraBuffer[15];
camera._viewport = transformCameraBuffer[16];
*/
return camera;
}
uniform mat4 transformObject_model;
uniform mat4 transformCamera_viewInverse;
uniform vec4 transformCamera_viewport;
<@endif@>
<@endfunc@>
<@func transformCameraViewport(cameraTransform, viewport)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
<$viewport$> = <$cameraTransform$>._viewport;
<@else@>
<$viewport$> = transformCamera_viewport;
<@endif@>
<@endfunc@>
<@func transformModelToClipPos(cameraTransform, objectTransform, modelPos, clipPos)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
<!// Equivalent to the following but hoppefully a tad more accurate
//return camera._projection * camera._view * object._model * pos; !>
{ // transformModelToClipPos
vec4 _eyepos = (<$objectTransform$>._model * <$modelPos$>) + vec4(-<$modelPos$>.w * <$cameraTransform$>._viewInverse[3].xyz, 0.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * _eyepos;
}
<@else@>
<$clipPos$> = gl_ModelViewProjectionMatrix * <$modelPos$>;
<@endif@>
<@endfunc@>
<@func $transformModelToEyeAndClipPos(cameraTransform, objectTransform, modelPos, eyePos, clipPos)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
<!// Equivalent to the following but hoppefully a tad more accurate
//return camera._projection * camera._view * object._model * pos; !>
{ // transformModelToClipPos
@ -125,24 +63,15 @@ uniform vec4 transformCamera_viewport;
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * _eyepos;
// <$eyePos$> = (<$cameraTransform$>._projectionInverse * <$clipPos$>);
}
<@else@>
<$eyePos$> = gl_ModelViewMatrix * <$modelPos$>;
<$clipPos$> = gl_ModelViewProjectionMatrix * <$modelPos$>;
<@endif@>
<@endfunc@>
<@func transformModelToWorldPos(objectTransform, modelPos, worldPos)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
{ // transformModelToWorldPos
<$worldPos$> = (<$objectTransform$>._model * <$modelPos$>);
}
<@else@>
<$worldPos$> = (transformObject_model * <$modelPos$>);
<@endif@>
<@endfunc@>
<@func transformModelToEyeDir(cameraTransform, objectTransform, modelDir, eyeDir)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
{ // transformModelToEyeDir
vec3 mr0 = vec3(<$objectTransform$>._modelInverse[0].x, <$objectTransform$>._modelInverse[1].x, <$objectTransform$>._modelInverse[2].x);
vec3 mr1 = vec3(<$objectTransform$>._modelInverse[0].y, <$objectTransform$>._modelInverse[1].y, <$objectTransform$>._modelInverse[2].y);
@ -154,29 +83,18 @@ uniform vec4 transformCamera_viewport;
<$eyeDir$> = vec3(dot(mvc0, <$modelDir$>), dot(mvc1, <$modelDir$>), dot(mvc2, <$modelDir$>));
}
<@else@>
<$eyeDir$> = gl_NormalMatrix * <$modelDir$>;
<@endif@>
<@endfunc@>
<@func transformEyeToWorldDir(cameraTransform, eyeDir, worldDir)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
{ // transformEyeToWorldDir
<$worldDir$> = vec3(<$cameraTransform$>._viewInverse * vec4(<$eyeDir$>.xyz, 0.0));
}
<@else@>
<$worldDir$> = vec3(transformCamera_viewInverse * vec4(<$eyeDir$>.xyz, 0.0));
<@endif@>
<@endfunc@>
<@func transformClipToEyeDir(cameraTransform, clipPos, eyeDir)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
{ // transformClipToEyeDir
<$eyeDir$> = vec3(<$cameraTransform$>._projectionInverse * vec4(<$clipPos$>.xyz, 1.0));
}
<@else@>
<$eyeDir$> = vec3(gl_ProjectionMatrixInverse * vec4(<$clipPos$>.xyz, 1.0));
<@endif@>
<@endfunc@>
<@endif@>

View file

@ -1,6 +1,6 @@
set(TARGET_NAME model)
AUTOSCRIBE_SHADER_LIB(gpu)
AUTOSCRIBE_SHADER_LIB(gpu model)
# use setup_hifi_library macro to setup our project and link appropriate Qt modules
setup_hifi_library()

View file

@ -6,53 +6,53 @@
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
!>
!>
<@if not MODEL_ATMOSPHERE_SLH@>
<@def MODEL_ATMOSPHERE_SLH@>
<!
// Code is a modified version of:
// http://http.developer.nvidia.com/GPUGems/gpugems_app01.html
// Atmospheric scattering fragment shader
//
// Author: Sean O'Neil
//
// Copyright (c) 2004 Sean O'Neil
//
// For licensing information, see http://http.developer.nvidia.com/GPUGems/gpugems_app01.html:
//
// NVIDIA Statement on the Software
//
// The source code provided is freely distributable, so long as the NVIDIA header remains unaltered and user modifications are
// detailed.
//
// No Warranty
//
// THE SOFTWARE AND ANY OTHER MATERIALS PROVIDED BY NVIDIA ON THE ENCLOSED CD-ROM ARE PROVIDED "AS IS." NVIDIA DISCLAIMS ALL
// WARRANTIES, EXPRESS, IMPLIED OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
//
// Limitation of Liability
//
// NVIDIA SHALL NOT BE LIABLE TO ANY USER, DEVELOPER, DEVELOPER'S CUSTOMERS, OR ANY OTHER PERSON OR ENTITY CLAIMING THROUGH OR
// UNDER DEVELOPER FOR ANY LOSS OF PROFITS, INCOME, SAVINGS, OR ANY OTHER CONSEQUENTIAL, INCIDENTAL, SPECIAL, PUNITIVE, DIRECT
// OR INDIRECT DAMAGES (WHETHER IN AN ACTION IN CONTRACT, TORT OR BASED ON A WARRANTY), EVEN IF NVIDIA HAS BEEN ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS SHALL APPLY NOTWITHSTANDING ANY FAILURE OF THE ESSENTIAL PURPOSE OF ANY
// LIMITED REMEDY. IN NO EVENT SHALL NVIDIA'S AGGREGATE LIABILITY TO DEVELOPER OR ANY OTHER PERSON OR ENTITY CLAIMING THROUGH
// OR UNDER DEVELOPER EXCEED THE AMOUNT OF MONEY ACTUALLY PAID BY DEVELOPER TO NVIDIA FOR THE SOFTWARE OR ANY OTHER MATERIALS.
//
!>
<@def MODEL_ATMOSPHERE_SLH@>
<!
// Code is a modified version of:
// http://http.developer.nvidia.com/GPUGems/gpugems_app01.html
// Atmospheric scattering fragment shader
//
// Author: Sean O'Neil
//
// Copyright (c) 2004 Sean O'Neil
//
// For licensing information, see http://http.developer.nvidia.com/GPUGems/gpugems_app01.html:
//
// NVIDIA Statement on the Software
//
// The source code provided is freely distributable, so long as the NVIDIA header remains unaltered and user modifications are
// detailed.
//
// No Warranty
//
// THE SOFTWARE AND ANY OTHER MATERIALS PROVIDED BY NVIDIA ON THE ENCLOSED CD-ROM ARE PROVIDED "AS IS." NVIDIA DISCLAIMS ALL
// WARRANTIES, EXPRESS, IMPLIED OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
//
// Limitation of Liability
//
// NVIDIA SHALL NOT BE LIABLE TO ANY USER, DEVELOPER, DEVELOPER'S CUSTOMERS, OR ANY OTHER PERSON OR ENTITY CLAIMING THROUGH OR
// UNDER DEVELOPER FOR ANY LOSS OF PROFITS, INCOME, SAVINGS, OR ANY OTHER CONSEQUENTIAL, INCIDENTAL, SPECIAL, PUNITIVE, DIRECT
// OR INDIRECT DAMAGES (WHETHER IN AN ACTION IN CONTRACT, TORT OR BASED ON A WARRANTY), EVEN IF NVIDIA HAS BEEN ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS SHALL APPLY NOTWITHSTANDING ANY FAILURE OF THE ESSENTIAL PURPOSE OF ANY
// LIMITED REMEDY. IN NO EVENT SHALL NVIDIA'S AGGREGATE LIABILITY TO DEVELOPER OR ANY OTHER PERSON OR ENTITY CLAIMING THROUGH
// OR UNDER DEVELOPER EXCEED THE AMOUNT OF MONEY ACTUALLY PAID BY DEVELOPER TO NVIDIA FOR THE SOFTWARE OR ANY OTHER MATERIALS.
//
!>
struct Atmosphere {
vec4 _invWaveLength;
vec4 _radiuses;
vec4 _scales;
vec4 _scatterings;
vec4 _control;
};
const int numSamples = 2;
};
const int numSamples = 2;
vec3 getAtmosphereInvWaveLength(Atmosphere a) { return a._invWaveLength.xyz; } // 1 / pow(wavelength, 4) for the red, green, and blue channels
@ -68,88 +68,88 @@ float getAtmosphereKrESun(Atmosphere a) { return a._scatterings.x; } // Kr * ESu
float getAtmosphereKmESun(Atmosphere a) { return a._scatterings.y; } // Km * ESun
float getAtmosphereKr4PI(Atmosphere a) { return a._scatterings.z; } // Kr * 4 * PI
float getAtmosphereKm4PI(Atmosphere a) { return a._scatterings.w; } // Km * 4 * PI
float getAtmosphereNumSamples(Atmosphere a) { return a._control.x; } // numSamples
vec2 getAtmosphereGAndG2(Atmosphere a) { return a._control.yz; } // g and g2
float atmosphereScale(float scaleDepth, float fCos)
{
float x = 1.0 - fCos;
return scaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
vec4 evalAtmosphereContribution(Atmosphere atmospheric, vec3 position, vec3 cameraPos, vec3 lightPos) {
float fInnerRadius = getAtmosphereInnerRadius(atmospheric);
float fSamples = getAtmosphereNumSamples(atmospheric);
vec3 v3InvWavelength = getAtmosphereInvWaveLength(atmospheric);
vec4 scatteringCoefs = getAtmosphereScattering(atmospheric);
float fKrESun = scatteringCoefs.x;
float fKmESun = scatteringCoefs.y;
float fKr4PI = scatteringCoefs.z;
float fKm4PI = scatteringCoefs.w;
vec2 gAndg2 = getAtmosphereGAndG2(atmospheric);
float g = gAndg2.x;
float g2 = gAndg2.y;
float fScale = getAtmosphereScale(atmospheric);
float fScaleDepth = getAtmosphereScaleDepth(atmospheric);
float fScaleOverScaleDepth = getAtmosphereScaleOverScaleDepth(atmospheric);
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - cameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = cameraPos;
float fHeight = length(v3Start);
float fDepthStart = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepthStart * atmosphereScale(fScaleDepth, fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
// int nSamples = numSamples;
int nSamples = int(fSamples);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(lightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (atmosphereScale(fScaleDepth, fLightAngle) - atmosphereScale(fScaleDepth, fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = cameraPos - v3Pos;
float fCos = dot(lightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
vec4 finalColor;
finalColor.rgb = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
finalColor.a = finalColor.b;
finalColor.rgb = pow(finalColor.rgb, vec3(1.0/2.2));
return finalColor;
}
float atmosphereScale(float scaleDepth, float fCos)
{
float x = 1.0 - fCos;
return scaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
vec4 evalAtmosphereContribution(Atmosphere atmospheric, vec3 position, vec3 cameraPos, vec3 lightPos) {
float fInnerRadius = getAtmosphereInnerRadius(atmospheric);
float fSamples = getAtmosphereNumSamples(atmospheric);
vec3 v3InvWavelength = getAtmosphereInvWaveLength(atmospheric);
vec4 scatteringCoefs = getAtmosphereScattering(atmospheric);
float fKrESun = scatteringCoefs.x;
float fKmESun = scatteringCoefs.y;
float fKr4PI = scatteringCoefs.z;
float fKm4PI = scatteringCoefs.w;
vec2 gAndg2 = getAtmosphereGAndG2(atmospheric);
float g = gAndg2.x;
float g2 = gAndg2.y;
float fScale = getAtmosphereScale(atmospheric);
float fScaleDepth = getAtmosphereScaleDepth(atmospheric);
float fScaleOverScaleDepth = getAtmosphereScaleOverScaleDepth(atmospheric);
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - cameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = cameraPos;
float fHeight = length(v3Start);
float fDepthStart = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepthStart * atmosphereScale(fScaleDepth, fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
// int nSamples = numSamples;
int nSamples = int(fSamples);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(lightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (atmosphereScale(fScaleDepth, fLightAngle) - atmosphereScale(fScaleDepth, fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = cameraPos - v3Pos;
float fCos = dot(lightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
vec4 finalColor;
finalColor.rgb = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
finalColor.a = finalColor.b;
finalColor.rgb = pow(finalColor.rgb, vec3(1.0/2.2));
return finalColor;
}
<@if GLPROFILE == PC_GL@>
uniform atmosphereBuffer {
@ -171,75 +171,75 @@ Atmosphere getAtmosphere() {
return atmosphere;
}
<@endif@>
<!
/*
// uniform vec3 v3CameraPos; // The camera's current position
const int nSamples = 2;
const float fSamples = 2.0;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
varying vec3 position;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main (void)
{
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos;
float fHeight = length(v3Start);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepth * scale(fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = v3CameraPos - v3Pos;
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
gl_FragColor.rgb = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
gl_FragColor.a = gl_FragColor.b;
gl_FragColor.rgb = pow(gl_FragColor.rgb, vec3(1.0/2.2));
}
*/
!>
<!
/*
// uniform vec3 v3CameraPos; // The camera's current position
const int nSamples = 2;
const float fSamples = 2.0;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
varying vec3 position;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main (void)
{
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos;
float fHeight = length(v3Start);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepth * scale(fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = v3CameraPos - v3Pos;
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
outFragColor.rgb = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
outFragColor.a = outFragColor.b;
outFragColor.rgb = pow(outFragColor.rgb, vec3(1.0/2.2));
}
*/
!>
<@endif@>

View file

@ -18,32 +18,17 @@ struct Material {
vec4 _spare;
};
uniform materialBuffer {
Material _mat;
};
Material getMaterial() {
return _mat;
}
float getMaterialOpacity(Material m) { return m._diffuse.a; }
vec3 getMaterialDiffuse(Material m) { return m._diffuse.rgb; }
vec3 getMaterialSpecular(Material m) { return m._specular.rgb; }
float getMaterialShininess(Material m) { return m._specular.a; }
<@if GPU_FEATURE_PROFILE == GPU_CORE@>
uniform materialBuffer {
Material _mat;
};
Material getMaterial() {
return _mat;
}
<@else@>
uniform vec4 materialBuffer[4];
Material getMaterial() {
Material mat;
mat._diffuse = materialBuffer[0];
mat._specular = materialBuffer[1];
mat._emissive = materialBuffer[2];
mat._spare = materialBuffer[3];
return mat;
}
<@endif@>
<@endif@>

View file

@ -2,6 +2,7 @@
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
// skybox.frag
// fragment shader
//
// Created by Sam Gateau on 5/5/2015.
// Copyright 2015 High Fidelity, Inc.
@ -12,14 +13,22 @@
uniform samplerCube cubeMap;
varying vec3 normal;
varying vec2 texcoord;
varying vec3 color;
struct Skybox {
vec4 _color;
};
uniform skyboxBuffer {
Skybox _skybox;
};
in vec3 _normal;
out vec4 _fragColor;
void main(void) {
vec3 coord = normalize(normal);
vec4 texel = textureCube(cubeMap, coord);
vec3 pixel = pow(texel.xyz * color, vec3(1.0/2.2)); // manual Gamma correction
gl_FragData[0] = vec4(pixel, 0.0);
vec3 coord = normalize(_normal);
vec3 texel = texture(cubeMap, coord).rgb;
vec3 color = texel * _skybox._color.rgb;
vec3 pixel = pow(color, vec3(1.0/2.2)); // manual Gamma correction
_fragColor = vec4(pixel, 0.0);
}

View file

@ -11,48 +11,24 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
struct Skybox {
vec4 _color;
};
<@if GPU_FEATURE_PROFILE == GPU_CORE @>
uniform skyboxBuffer {
Skybox _skybox;
};
Skybox getSkybox() {
return _skybox;
}
<@else@>
uniform vec4 skyboxBuffer[1];
Skybox getSkybox() {
Skybox _skybox;
_skybox._color = skyboxBuffer[0];
return _skybox;
}
<@endif@>
varying vec3 normal;
varying vec2 texcoord;
varying vec3 color;
void main(void) {
texcoord = gl_Vertex.xy;
Skybox skybox = getSkybox();
color = skybox._color.xyz;
out vec3 _normal;
void main(void) {
// standard transform
TransformCamera cam = getTransformCamera();
vec3 clipDir = vec3(texcoord.xy, 0.0);
vec3 clipDir = vec3(inPosition.xy, 0.0);
vec3 eyeDir;
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>;
<$transformEyeToWorldDir(cam, eyeDir, normal)$>;
// Position is supposed to cmoe in clip space
gl_Position = vec4(texcoord.xy, 0.0, 1.0);
}
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>
<$transformEyeToWorldDir(cam, eyeDir, _normal)$>
// Position is supposed to come in clip space
gl_Position = vec4(inPosition.xy, 0.0, 1.0);
}

View file

@ -38,8 +38,8 @@
#include <OctalCode.h>
#include <udt/PacketHeaders.h>
#include <SharedUtil.h>
#include <Shape.h>
#include <PathUtils.h>
#include <Gzip.h>
#include "CoverageMap.h"
#include "OctreeConstants.h"
@ -49,7 +49,7 @@
#include "OctreeLogging.h"
QVector<QString> PERSIST_EXTENSIONS = {"svo", "json"};
QVector<QString> PERSIST_EXTENSIONS = {"svo", "json", "json.gz"};
float boundaryDistanceForRenderLevel(unsigned int renderLevel, float voxelSizeScale) {
return voxelSizeScale / powf(2, renderLevel);
@ -791,13 +791,6 @@ public:
bool found;
};
class ShapeArgs {
public:
const Shape* shape;
CollisionList& collisions;
bool found;
};
class ContentArgs {
public:
AACube cube;
@ -1809,29 +1802,52 @@ int Octree::encodeTreeBitstreamRecursion(OctreeElement* element,
}
bool Octree::readFromFile(const char* fileName) {
bool fileOk = false;
QString qFileName = findMostRecentFileExtension(fileName, PERSIST_EXTENSIONS);
QFile file(qFileName);
fileOk = file.open(QIODevice::ReadOnly);
if(fileOk) {
QDataStream fileInputStream(&file);
QFileInfo fileInfo(qFileName);
unsigned long fileLength = fileInfo.size();
emit importSize(1.0f, 1.0f, 1.0f);
emit importProgress(0);
qCDebug(octree) << "Loading file" << qFileName << "...";
fileOk = readFromStream(fileLength, fileInputStream);
emit importProgress(100);
file.close();
if (qFileName.endsWith(".json.gz")) {
return readJSONFromGzippedFile(qFileName);
}
return fileOk;
QFile file(qFileName);
if (!file.open(QIODevice::ReadOnly)) {
qCritical() << "unable to open for reading: " << fileName;
return false;
}
QDataStream fileInputStream(&file);
QFileInfo fileInfo(qFileName);
unsigned long fileLength = fileInfo.size();
emit importSize(1.0f, 1.0f, 1.0f);
emit importProgress(0);
qCDebug(octree) << "Loading file" << qFileName << "...";
bool success = readFromStream(fileLength, fileInputStream);
emit importProgress(100);
file.close();
return success;
}
bool Octree::readJSONFromGzippedFile(QString qFileName) {
QFile file(qFileName);
if (!file.open(QIODevice::ReadOnly)) {
qCritical() << "Cannot open gzipped json file for reading: " << qFileName;
return false;
}
QByteArray compressedJsonData = file.readAll();
QByteArray jsonData;
if (!gunzip(compressedJsonData, jsonData)) {
qCritical() << "json File not in gzip format: " << qFileName;
return false;
}
QDataStream jsonStream(jsonData);
return readJSONFromStream(-1, jsonStream);
}
bool Octree::readFromURL(const QString& urlString) {
@ -1867,18 +1883,17 @@ bool Octree::readFromURL(const QString& urlString) {
bool Octree::readFromStream(unsigned long streamLength, QDataStream& inputStream) {
// decide if this is SVO or JSON
// decide if this is binary SVO or JSON-formatted SVO
QIODevice *device = inputStream.device();
char firstChar;
device->getChar(&firstChar);
device->ungetChar(firstChar);
if (firstChar == (char) PacketType::EntityData) {
qCDebug(octree) << "Reading from SVO Stream length:" << streamLength;
qCDebug(octree) << "Reading from binary SVO Stream length:" << streamLength;
return readSVOFromStream(streamLength, inputStream);
} else {
qCDebug(octree) << "Reading from JSON Stream length:" << streamLength;
qCDebug(octree) << "Reading from JSON SVO Stream length:" << streamLength;
return readJSONFromStream(streamLength, inputStream);
}
}
@ -2013,12 +2028,28 @@ bool Octree::readSVOFromStream(unsigned long streamLength, QDataStream& inputStr
return fileOk;
}
bool Octree::readJSONFromStream(unsigned long streamLength, QDataStream& inputStream) {
char* rawData = new char[streamLength + 1]; // allocate enough room to null terminate
inputStream.readRawData(rawData, streamLength);
rawData[streamLength] = 0; // make sure we null terminate this string
const int READ_JSON_BUFFER_SIZE = 2048;
QJsonDocument asDocument = QJsonDocument::fromJson(rawData);
bool Octree::readJSONFromStream(unsigned long streamLength, QDataStream& inputStream) {
// if the data is gzipped we may not have a useful bytesAvailable() result, so just keep reading until
// we get an eof. Leave streamLength parameter for consistency.
QByteArray jsonBuffer;
char* rawData = new char[READ_JSON_BUFFER_SIZE];
while (true) {
int got = inputStream.readRawData(rawData, READ_JSON_BUFFER_SIZE - 1);
if (got < 0) {
qCritical() << "error while reading from json stream";
delete[] rawData;
return false;
}
if (got == 0) {
break;
}
jsonBuffer += QByteArray(rawData, got);
}
QJsonDocument asDocument = QJsonDocument::fromJson(jsonBuffer);
QVariant asVariant = asDocument.toVariant();
QVariantMap asMap = asVariant.toMap();
readFromMap(asMap);
@ -2036,13 +2067,14 @@ void Octree::writeToFile(const char* fileName, OctreeElement* element, QString p
writeToSVOFile(fileName, element);
} else if (persistAsFileType == "json") {
writeToJSONFile(cFileName, element);
} else if (persistAsFileType == "json.gz") {
writeToJSONFile(cFileName, element, true);
} else {
qCDebug(octree) << "unable to write octree to file of type" << persistAsFileType;
}
}
void Octree::writeToJSONFile(const char* fileName, OctreeElement* element) {
QFile persistFile(fileName);
void Octree::writeToJSONFile(const char* fileName, OctreeElement* element, bool doGzip) {
QVariantMap entityDescription;
qCDebug(octree, "Saving JSON SVO to file %s...", fileName);
@ -2061,10 +2093,27 @@ void Octree::writeToJSONFile(const char* fileName, OctreeElement* element) {
// store the entity data
bool entityDescriptionSuccess = writeToMap(entityDescription, top, true);
if (!entityDescriptionSuccess) {
qCritical("Failed to convert Entities to QVariantMap while saving to json.");
return;
}
// convert the QVariantMap to JSON
if (entityDescriptionSuccess && persistFile.open(QIODevice::WriteOnly)) {
persistFile.write(QJsonDocument::fromVariant(entityDescription).toJson());
QByteArray jsonData = QJsonDocument::fromVariant(entityDescription).toJson();
QByteArray jsonDataForFile;
if (doGzip) {
if (!gzip(jsonData, jsonDataForFile, -1)) {
qCritical("unable to gzip data while saving to json.");
return;
}
} else {
jsonDataForFile = jsonData;
}
QFile persistFile(fileName);
if (persistFile.open(QIODevice::WriteOnly)) {
persistFile.write(jsonDataForFile);
} else {
qCritical("Could not write to JSON description of entities.");
}

View file

@ -330,7 +330,7 @@ public:
// Octree exporters
void writeToFile(const char* filename, OctreeElement* element = NULL, QString persistAsFileType = "svo");
void writeToJSONFile(const char* filename, OctreeElement* element = NULL);
void writeToJSONFile(const char* filename, OctreeElement* element = NULL, bool doGzip = false);
void writeToSVOFile(const char* filename, OctreeElement* element = NULL);
virtual bool writeToMap(QVariantMap& entityDescription, OctreeElement* element, bool skipDefaultValues) = 0;
@ -340,6 +340,7 @@ public:
bool readFromStream(unsigned long streamLength, QDataStream& inputStream);
bool readSVOFromStream(unsigned long streamLength, QDataStream& inputStream);
bool readJSONFromStream(unsigned long streamLength, QDataStream& inputStream);
bool readJSONFromGzippedFile(QString qFileName);
virtual bool readFromMap(QVariantMap& entityDescription) = 0;
unsigned long getOctreeElementsCount();

View file

@ -19,7 +19,6 @@
#include <LogHandler.h>
#include <NodeList.h>
#include <PerfStat.h>
#include <AACubeShape.h>
#include "AACube.h"
#include "OctalCode.h"
@ -637,53 +636,8 @@ OctreeElement* OctreeElement::getOrCreateChildElementAt(float x, float y, float
if (s > halfOurScale) {
return this;
}
// otherwise, we need to find which of our children we should recurse
glm::vec3 ourCenter = _cube.calcCenter();
int childIndex = CHILD_UNKNOWN;
// left half
if (x > ourCenter.x) {
if (y > ourCenter.y) {
// top left
if (z > ourCenter.z) {
// top left far
childIndex = CHILD_TOP_LEFT_FAR;
} else {
// top left near
childIndex = CHILD_TOP_LEFT_NEAR;
}
} else {
// bottom left
if (z > ourCenter.z) {
// bottom left far
childIndex = CHILD_BOTTOM_LEFT_FAR;
} else {
// bottom left near
childIndex = CHILD_BOTTOM_LEFT_NEAR;
}
}
} else {
// right half
if (y > ourCenter.y) {
// top right
if (z > ourCenter.z) {
// top right far
childIndex = CHILD_TOP_RIGHT_FAR;
} else {
// top right near
childIndex = CHILD_TOP_RIGHT_NEAR;
}
} else {
// bottom right
if (z > ourCenter.z) {
// bottom right far
childIndex = CHILD_BOTTOM_RIGHT_FAR;
} else {
// bottom right near
childIndex = CHILD_BOTTOM_RIGHT_NEAR;
}
}
}
int childIndex = getMyChildContainingPoint(glm::vec3(x, y, z));
// Now, check if we have a child at that location
child = getChildAtIndex(childIndex);

View file

@ -24,7 +24,6 @@
#include "ViewFrustum.h"
#include "OctreeConstants.h"
class CollisionList;
class EncodeBitstreamParams;
class Octree;
class OctreeElement;

View file

@ -8,8 +8,6 @@ qt5_add_resources(QT_RESOURCES_FILE "${CMAKE_CURRENT_SOURCE_DIR}/res/fonts/fonts
# use setup_hifi_library macro to setup our project and link appropriate Qt modules
setup_hifi_library(Widgets OpenGL Network Qml Quick Script)
setup_hifi_opengl()
add_dependency_external_projects(glm)
find_package(GLM REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLM_INCLUDE_DIRS})

View file

@ -50,10 +50,10 @@ struct DeferredFragment {
DeferredFragment unpackDeferredFragment(vec2 texcoord) {
DeferredFragment frag;
frag.depthVal = texture2D(depthMap, texcoord).r;
frag.normalVal = texture2D(normalMap, texcoord);
frag.diffuseVal = texture2D(diffuseMap, texcoord);
frag.specularVal = texture2D(specularMap, texcoord);
frag.depthVal = texture(depthMap, texcoord).r;
frag.normalVal = texture(normalMap, texcoord);
frag.diffuseVal = texture(diffuseMap, texcoord);
frag.specularVal = texture(specularMap, texcoord);
// compute the view space position using the depth
float z = near / (frag.depthVal * depthScale - 1.0);

View file

@ -11,12 +11,33 @@
<@if not DEFERRED_BUFFER_WRITE_SLH@>
<@def DEFERRED_BUFFER_WRITE_SLH@>
layout(location = 0) out vec4 _fragColor0;
layout(location = 1) out vec4 _fragColor1;
layout(location = 2) out vec4 _fragColor2;
// the glow intensity
uniform float glowIntensity;
// the alpha threshold
uniform float alphaThreshold;
uniform sampler2D normalFittingMap;
vec3 bestFitNormal(vec3 normal) {
vec3 absNorm = abs(normal);
float maxNAbs = max(absNorm.z, max(absNorm.x, absNorm.y));
vec2 texcoord = (absNorm.z < maxNAbs ?
(absNorm.y < maxNAbs ? absNorm.yz : absNorm.xz) :
absNorm.xy);
texcoord = (texcoord.x < texcoord.y ? texcoord.yx : texcoord.xy);
texcoord.y /= texcoord.x;
vec3 cN = normal / maxNAbs;
float fittingScale = texture(normalFittingMap, texcoord).a;
cN *= fittingScale;
return (cN * 0.5 + 0.5);
}
float evalOpaqueFinalAlpha(float alpha, float mapAlpha) {
return mix(alpha * glowIntensity, 1.0 - alpha * glowIntensity, step(mapAlpha, alphaThreshold));
}
@ -28,9 +49,9 @@ void packDeferredFragment(vec3 normal, float alpha, vec3 diffuse, vec3 specular,
if (alpha != glowIntensity) {
discard;
}
gl_FragData[0] = vec4(diffuse.rgb, alpha);
gl_FragData[1] = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
gl_FragData[2] = vec4(specular, shininess / 128.0);
_fragColor0 = vec4(diffuse.rgb, alpha);
_fragColor1 = vec4(bestFitNormal(normal), 1.0);
_fragColor2 = vec4(specular, shininess / 128.0);
}
void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 diffuse, vec3 specular, float shininess, vec3 emissive) {
@ -38,10 +59,10 @@ void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 diffuse, vec3 s
discard;
}
gl_FragData[0] = vec4(diffuse.rgb, alpha);
//gl_FragData[1] = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
gl_FragData[1] = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 0.5);
gl_FragData[2] = vec4(emissive, shininess / 128.0);
_fragColor0 = vec4(diffuse.rgb, alpha);
//_fragColor1 = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
_fragColor1 = vec4(bestFitNormal(normal), 0.5);
_fragColor2 = vec4(emissive, shininess / 128.0);
}
void packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 diffuse, vec3 specular, float shininess) {
@ -49,9 +70,9 @@ void packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 diffuse, vec
discard;
}
gl_FragData[0] = vec4(diffuse.rgb, alpha);
// gl_FragData[1] = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
// gl_FragData[2] = vec4(specular, shininess / 128.0);
_fragColor0 = vec4(diffuse.rgb, alpha);
// _fragColor1 = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
// _fragColor2 = vec4(specular, shininess / 128.0);
}
<@endif@>

View file

@ -16,12 +16,9 @@
uniform samplerCube skyboxMap;
vec4 evalSkyboxLight(vec3 direction, float lod) {
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
vec4 skytexel = textureCubeLod(skyboxMap, direction, lod * textureQueryLevels(skyboxMap));
<@else@>
vec4 skytexel = textureCube(skyboxMap, direction);
<@endif@>
// FIXME
//vec4 skytexel = textureLod(skyboxMap, direction, lod * textureQueryLevels(skyboxMap));
vec4 skytexel = texture(skyboxMap, direction);
return skytexel;
}

Some files were not shown because too many files have changed in this diff Show more