Merge branch 'master' of https://github.com/worklist/hifi into 20638

This commit is contained in:
Thijs Wenker 2015-08-19 16:49:39 +02:00
commit 9af7205672
32 changed files with 818 additions and 764 deletions

View file

@ -187,6 +187,7 @@ option(GET_OPENVR "Get OpenVR library automatically as external project" 1)
option(GET_BOOSTCONFIG "Get Boost-config library automatically as external project" 1)
option(GET_OGLPLUS "Get OGLplus library automatically as external project" 1)
option(GET_GLEW "Get GLEW library automatically as external project" 1)
option(GET_SIXENSE "Get Sixense library automatically as external project" 1)
option(USE_NSIGHT "Attempt to find the nSight libraries" 1)

View file

@ -971,12 +971,7 @@ void OctreeServer::readConfiguration() {
strcpy(_persistFilename, qPrintable(persistFilename));
qDebug("persistFilename=%s", _persistFilename);
QString persistAsFileType;
if (!readOptionString(QString("persistAsFileType"), settingsSectionObject, persistAsFileType)) {
persistAsFileType = "svo";
}
_persistAsFileType = persistAsFileType;
qDebug() << "persistAsFileType=" << _persistAsFileType;
_persistAsFileType = "json.gz";
_persistInterval = OctreePersistThread::DEFAULT_PERSIST_INTERVAL;
readOptionInt(QString("persistInterval"), settingsSectionObject, _persistInterval);

62
cmake/externals/Sixense/CMakeLists.txt vendored Normal file
View file

@ -0,0 +1,62 @@
include(ExternalProject)
include(SelectLibraryConfigurations)
set(EXTERNAL_NAME Sixense)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://hifi-public.s3.amazonaws.com/dependencies/SixenseSDK_071615.zip
URL_MD5 752a3901f334124e9cffc2ba4136ef7d
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
LOG_DOWNLOAD 1
)
set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals")
ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR)
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${SOURCE_DIR}/include CACHE TYPE INTERNAL)
if (WIN32)
if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set(ARCH_DIR "x64/VS2013")
set(ARCH_SUFFIX "_x64")
else()
set(ARCH_DIR "Win32/VS2013")
set(ARCH_SUFFIX "")
endif()
set(LIB_DIR "${SOURCE_DIR}/lib/${ARCH_DIR}")
set(BIN_DIR "${SOURCE_DIR}/bin/${ARCH_DIR}")
set(SIXENSE_LIBRARY_RELEASE "${LIB_DIR}/release_dll/sixense${ARCH_SUFFIX}.lib" CACHE FILEPATH "Sixense release lib")
set(SIXENSE_LIBRARY_DEBUG "${LIB_DIR}/debug_dll/sixensed${ARCH_SUFFIX}.lib" CACHE FILEPATH "Sixense debug lib")
set(SIXENSE_RELEASE_DLL_PATH "${BIN_DIR}/release_dll" CACHE FILEPATH "Sixense release DLL path")
set(SIXENSE_DEBUG_DLL_PATH "${BIN_DIR}/debug_dll" CACHE FILEPATH "Sixense debug DLL path")
# FIXME need to account for different architectures
add_paths_to_fixup_libs(${SIXENSE_DEBUG_DLL_PATH})
add_paths_to_fixup_libs(${SIXENSE_RELEASE_DLL_PATH})
elseif(APPLE)
set(ARCH_DIR "osx_x64")
set(ARCH_SUFFIX "_x64")
set(LIB_DIR "${SOURCE_DIR}/lib/${ARCH_DIR}")
set(SIXENSE_LIBRARY_RELEASE "${LIB_DIR}/release_dll/libsixense${ARCH_SUFFIX}.dylib" CACHE FILEPATH "Sixense release lib")
set(SIXENSE_LIBRARY_DEBUG "${LIB_DIR}/debug_dll/libsixensed${ARCH_SUFFIX}.dylib" CACHE FILEPATH "Sixense debug lib")
elseif(NOT ANDROID)
set(ARCH_DIR "linux_x64")
set(ARCH_SUFFIX "_x64")
set(LIB_DIR "${SOURCE_DIR}/lib/${ARCH_DIR}")
set(SIXENSE_LIBRARY_RELEASE "${LIB_DIR}/release/libsixense${ARCH_SUFFIX}.so" CACHE FILEPATH "Sixense release lib")
set(SIXENSE_LIBRARY_DEBUG "${LIB_DIR}/debug/libsixensed${ARCH_SUFFIX}.so" CACHE FILEPATH "Sixense debug lib")
endif()

View file

@ -1,60 +0,0 @@
include(ExternalProject)
include(SelectLibraryConfigurations)
set(EXTERNAL_NAME Sixense)
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
ExternalProject_Add(
${EXTERNAL_NAME}
URL ./SixenseSDK_062612.zip
URL_MD5 10cc8dc470d2ac1244a88cf04bc549cc
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
LOG_DOWNLOAD 1
)
if (APPLE)
find_library(SIXENSE_LIBRARY_RELEASE lib/osx_x64/release_dll/libsixense_x64.dylib HINTS ${SIXENSE_SEARCH_DIRS})
find_library(SIXENSE_LIBRARY_DEBUG lib/osx_x64/debug_dll/libsixensed_x64.dylib HINTS ${SIXENSE_SEARCH_DIRS})
elseif (UNIX)
find_library(SIXENSE_LIBRARY_RELEASE lib/linux_x64/release/libsixense_x64.so HINTS ${SIXENSE_SEARCH_DIRS})
# find_library(SIXENSE_LIBRARY_DEBUG lib/linux_x64/debug/libsixensed_x64.so HINTS ${SIXENSE_SEARCH_DIRS})
elseif (WIN32)
endif ()
ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${SOURCE_DIR}/include CACHE TYPE INTERNAL)
if (WIN32)
if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set(ARCH_DIR "x64")
set(ARCH_SUFFIX "_x64")
else()
set(ARCH_DIR "Win32")
set(ARCH_SUFFIX "")
endif()
# FIXME need to account for different architectures
set(${EXTERNAL_NAME_UPPER}_LIBRARIES "${SOURCE_DIR}/lib/${ARCH_DIR}/release_dll/sixense${ARCH_SUFFIX}.lib" CACHE TYPE INTERNAL)
add_paths_to_fixup_libs(${SOURCE_DIR}/bin/win32)
elseif(APPLE)
# FIXME need to account for different architectures
set(${EXTERNAL_NAME_UPPER}_LIBRARIES ${SOURCE_DIR}/lib/osx32/libopenvr_api.dylib CACHE TYPE INTERNAL)
add_paths_to_fixup_libs(${SOURCE_DIR}/bin/osx32)
elseif(NOT ANDROID)
# FIXME need to account for different architectures
set(${EXTERNAL_NAME_UPPER}_LIBRARIES ${SOURCE_DIR}/lib/linux32/libopenvr_api.so CACHE TYPE INTERNAL)
add_paths_to_fixup_libs(${SOURCE_DIR}/bin/linux32)
endif()

View file

@ -51,7 +51,7 @@ select_library_configurations(SIXENSE)
set(SIXENSE_REQUIREMENTS SIXENSE_INCLUDE_DIRS SIXENSE_LIBRARIES)
if (WIN32)
list(APPEND SIXENSE_REQUIREMENTS SIXENSE_DEBUG_DLL_PATH SIXENSE_RELEASE_DLL_PATH SIXENSE_DEVICE_DLL_PATH)
list(APPEND SIXENSE_REQUIREMENTS SIXENSE_DEBUG_DLL_PATH SIXENSE_RELEASE_DLL_PATH)
endif ()
set(SIXENSE_LIBRARIES "${SIXENSE_LIBRARY}")

View file

@ -371,30 +371,8 @@
"name": "persistFilename",
"label": "Entities Filename",
"help": "the path to the file entities are stored in. Make sure the path exists.",
"placeholder": "resources/models.svo",
"default": "resources/models.svo",
"advanced": true
},
{
"name": "persistAsFileType",
"label": "File format for entity server's persistent data",
"help": "This defines how the entity server will save entities to disk.",
"default": "svo",
"type": "select",
"options": [
{
"value": "svo",
"label": "Entity server persists data as SVO"
},
{
"value": "json",
"label": "Entity server persists data as JSON"
},
{
"value": "json.gz",
"label": "Entity server persists data as gzipped JSON"
}
],
"placeholder": "resources/models.json.gz",
"default": "resources/models.json.gz",
"advanced": true
},
{

View file

@ -362,8 +362,11 @@ ToolBar = function(x, y, direction, optionalPersistenceKey, optionalInitialPosit
this.fractionKey = optionalPersistenceKey + '.fraction';
this.save = function () {
var screenSize = Controller.getViewportDimensions();
var fraction = {x: that.x / screenSize.x, y: that.y / screenSize.y};
Settings.setValue(this.fractionKey, JSON.stringify(fraction));
if (screenSize.x > 0 && screenSize.y > 0) {
// Guard against invalid screen size that can occur at shut-down.
var fraction = {x: that.x / screenSize.x, y: that.y / screenSize.y};
Settings.setValue(this.fractionKey, JSON.stringify(fraction));
}
}
} else {
this.save = function () { }; // Called on move. Can be overriden or extended by clients.

View file

@ -47,6 +47,11 @@ Item {
font.pixelSize: root.fontSize
text: "Framerate: " + root.framerate
}
Text {
color: root.fontColor;
font.pixelSize: root.fontSize
text: "Simrate: " + root.simrate
}
Text {
color: root.fontColor;
font.pixelSize: root.fontSize

View file

@ -112,9 +112,7 @@
#include "InterfaceActionFactory.h"
#include "avatar/AvatarManager.h"
#include "audio/AudioScope.h"
#include "devices/DdeFaceTracker.h"
#include "devices/EyeTracker.h"
#include "devices/Faceshift.h"
@ -148,6 +146,8 @@
#include "ui/AddressBarDialog.h"
#include "ui/UpdateDialog.h"
#include "ui/overlays/Cube3DOverlay.h"
// ON WIndows PC, NVidia Optimus laptop, we want to enable NVIDIA GPU
// FIXME seems to be broken.
#if defined(Q_OS_WIN)
@ -172,7 +172,6 @@ public:
void call() { _fun(); }
};
using namespace std;
// Starfield information
@ -299,6 +298,13 @@ bool setupEssentials(int& argc, char** argv) {
return true;
}
// FIXME move to header, or better yet, design some kind of UI manager
// to take care of highlighting keyboard focused items, rather than
// continuing to overburden Application.cpp
Cube3DOverlay* _keyboardFocusHighlight{ nullptr };
int _keyboardFocusHighlightID{ -1 };
Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
QApplication(argc, argv),
_dependencyManagerIsSetup(setupEssentials(argc, argv)),
@ -554,7 +560,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_toolWindow = new ToolWindow();
_toolWindow->setWindowFlags(_toolWindow->windowFlags() | Qt::WindowStaysOnTopHint);
_toolWindow->setWindowFlags((_toolWindow->windowFlags() | Qt::WindowStaysOnTopHint) & ~Qt::WindowMinimizeButtonHint);
_toolWindow->setWindowTitle("Tools");
_offscreenContext->makeCurrent();
@ -673,8 +679,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
auto& packetReceiver = nodeList->getPacketReceiver();
packetReceiver.registerListener(PacketType::DomainConnectionDenied, this, "handleDomainConnectionDeniedPacket");
// If the user clicks an an entity, we will check that it's an unlocked web entity, and if so, set the focus to it
auto entityScriptingInterface = DependencyManager::get<EntityScriptingInterface>();
connect(entityScriptingInterface.data(), &EntityScriptingInterface::hoverEnterEntity, [this, entityScriptingInterface](const EntityItemID& entityItemID, const MouseEvent& event) {
connect(entityScriptingInterface.data(), &EntityScriptingInterface::clickDownOnEntity,
[this, entityScriptingInterface](const EntityItemID& entityItemID, const MouseEvent& event) {
if (_keyboardFocusedItem != entityItemID) {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
auto properties = entityScriptingInterface->getEntityProperties(entityItemID);
@ -684,14 +692,49 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
if (webEntity) {
webEntity->setProxyWindow(_window->windowHandle());
_keyboardFocusedItem = entityItemID;
_lastAcceptedKeyPress = usecTimestampNow();
if (_keyboardFocusHighlightID < 0 || !getOverlays().isAddedOverlay(_keyboardFocusHighlightID)) {
_keyboardFocusHighlight = new Cube3DOverlay();
_keyboardFocusHighlight->setAlpha(1.0f);
_keyboardFocusHighlight->setBorderSize(1.0f);
_keyboardFocusHighlight->setColor({ 0xFF, 0xEF, 0x00 });
_keyboardFocusHighlight->setIsSolid(false);
_keyboardFocusHighlight->setPulseMin(0.5);
_keyboardFocusHighlight->setPulseMax(1.0);
_keyboardFocusHighlight->setColorPulse(1.0);
_keyboardFocusHighlight->setIgnoreRayIntersection(true);
_keyboardFocusHighlight->setDrawInFront(true);
}
_keyboardFocusHighlight->setRotation(webEntity->getRotation());
_keyboardFocusHighlight->setPosition(webEntity->getPosition());
_keyboardFocusHighlight->setDimensions(webEntity->getDimensions() * 1.05f);
_keyboardFocusHighlight->setVisible(true);
_keyboardFocusHighlightID = getOverlays().addOverlay(_keyboardFocusHighlight);
}
}
if (_keyboardFocusedItem == UNKNOWN_ENTITY_ID && _keyboardFocusHighlight) {
_keyboardFocusHighlight->setVisible(false);
}
}
});
connect(entityScriptingInterface.data(), &EntityScriptingInterface::deletingEntity,
[=](const EntityItemID& entityItemID) {
if (entityItemID == _keyboardFocusedItem) {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
if (_keyboardFocusHighlight) {
_keyboardFocusHighlight->setVisible(false);
}
}
});
connect(entityScriptingInterface.data(), &EntityScriptingInterface::hoverLeaveEntity, [=](const EntityItemID& entityItemID, const MouseEvent& event) {
if (_keyboardFocusedItem == entityItemID) {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
// If the user clicks somewhere where there is NO entity at all, we will release focus
connect(getEntities(), &EntityTreeRenderer::mousePressOffEntity,
[=](const RayToEntityIntersectionResult& entityItemID, const QMouseEvent* event, unsigned int deviceId) {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
if (_keyboardFocusHighlight) {
_keyboardFocusHighlight->setVisible(false);
}
});
}
@ -704,6 +747,11 @@ void Application::aboutToQuit() {
}
void Application::cleanupBeforeQuit() {
if (_keyboardFocusHighlightID > 0) {
getOverlays().deleteOverlay(_keyboardFocusHighlightID);
_keyboardFocusHighlightID = -1;
}
_keyboardFocusHighlight = nullptr;
_entities.clear(); // this will allow entity scripts to properly shutdown
@ -1270,6 +1318,7 @@ bool Application::event(QEvent* event) {
event->setAccepted(false);
QCoreApplication::sendEvent(webEntity->getEventHandler(), event);
if (event->isAccepted()) {
_lastAcceptedKeyPress = usecTimestampNow();
return true;
}
}
@ -1965,10 +2014,19 @@ void Application::checkFPS() {
void Application::idle() {
PROFILE_RANGE(__FUNCTION__);
static SimpleAverage<float> interIdleDurations;
static uint64_t lastIdleStart{ 0 };
static uint64_t lastIdleEnd{ 0 };
uint64_t now = usecTimestampNow();
uint64_t idleStartToStartDuration = now - lastIdleStart;
if (lastIdleStart > 0 && idleStartToStartDuration > 0) {
_simsPerSecond.updateAverage((float)USECS_PER_SECOND / (float)idleStartToStartDuration);
}
lastIdleStart = now;
if (lastIdleEnd != 0) {
uint64_t now = usecTimestampNow();
interIdleDurations.update(now - lastIdleEnd);
static uint64_t lastReportTime = now;
if ((now - lastReportTime) >= (USECS_PER_SECOND)) {
@ -1986,6 +2044,15 @@ void Application::idle() {
return; // bail early, nothing to do here.
}
// Drop focus from _keyboardFocusedItem if no keyboard messages for 30 seconds
if (!_keyboardFocusedItem.isInvalidID()) {
const quint64 LOSE_FOCUS_AFTER_ELAPSED_TIME = 30 * USECS_PER_SECOND; // if idle for 30 seconds, drop focus
quint64 elapsedSinceAcceptedKeyPress = usecTimestampNow() - _lastAcceptedKeyPress;
if (elapsedSinceAcceptedKeyPress > LOSE_FOCUS_AFTER_ELAPSED_TIME) {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
}
}
// Normally we check PipelineWarnings, but since idle will often take more than 10ms we only show these idle timing
// details if we're in ExtraDebugging mode. However, the ::update() and its subcomponents will show their timing
// details normally.
@ -2054,6 +2121,16 @@ void Application::idle() {
lastIdleEnd = usecTimestampNow();
}
float Application::getAverageSimsPerSecond() {
uint64_t now = usecTimestampNow();
if (now - _lastSimsPerSecondUpdate > USECS_PER_SECOND) {
_simsPerSecondReport = _simsPerSecond.getAverage();
_lastSimsPerSecondUpdate = now;
}
return _simsPerSecondReport;
}
void Application::setLowVelocityFilter(bool lowVelocityFilter) {
InputDevice::setLowVelocityFilter(lowVelocityFilter);
}
@ -2402,6 +2479,10 @@ void Application::updateMouseRay() {
}
}
// Called during Application::update immediately before AvatarManager::updateMyAvatar, updating my data that is then sent to everyone.
// (Maybe this code should be moved there?)
// The principal result is to call updateLookAtTargetAvatar() and then setLookAtPosition().
// Note that it is called BEFORE we update position or joints based on sensors, etc.
void Application::updateMyAvatarLookAtPosition() {
PerformanceTimer perfTimer("lookAt");
bool showWarnings = Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings);

View file

@ -38,6 +38,7 @@
#include <ViewFrustum.h>
#include <plugins/PluginContainer.h>
#include <plugins/PluginManager.h>
#include <SimpleMovingAverage.h>
#include "AudioClient.h"
#include "Bookmarks.h"
@ -350,6 +351,8 @@ public:
const QRect& getMirrorViewRect() const { return _mirrorViewRect; }
float getAverageSimsPerSecond();
signals:
/// Fired when we're simulating; allows external parties to hook in.
@ -680,6 +683,11 @@ private:
DialogsManagerScriptingInterface* _dialogsManagerScriptingInterface = new DialogsManagerScriptingInterface();
EntityItemID _keyboardFocusedItem;
quint64 _lastAcceptedKeyPress = 0;
SimpleMovingAverage _simsPerSecond{10};
int _simsPerSecondReport = 0;
quint64 _lastSimsPerSecondUpdate = 0;
};
#endif // hifi_Application_h

View file

@ -258,7 +258,7 @@ Menu::Menu() {
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::NamesAboveHeads, 0, true);
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::BlueSpeechSphere, 0, true);
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::EnableCharacterController, 0, true,
avatar, SLOT(updateMotionBehavior()));
avatar, SLOT(updateMotionBehaviorFromMenu()));
MenuWrapper* viewMenu = addMenu("View");
addActionToQMenuAndActionHash(viewMenu, MenuOption::ReloadContent, 0, qApp, SLOT(reloadResourceCaches()));

View file

@ -563,6 +563,9 @@ glm::quat Avatar::computeRotationFromBodyToWorldUp(float proportion) const {
}
void Avatar::fixupModelsInScene() {
if (!(_skeletonModel.isRenderable() && getHead()->getFaceModel().isRenderable())) {
return;
}
// check to see if when we added our models to the scene they were ready, if they were not ready, then
// fix them up in the scene
@ -688,6 +691,23 @@ glm::vec3 Avatar::getDisplayNamePosition() const {
const float HEAD_PROPORTION = 0.75f;
namePosition = _position + getBodyUpDirection() * (getBillboardSize() * HEAD_PROPORTION);
}
#ifdef DEBUG
// TODO: Temporary logging to track cause of invalid scale value; remove once cause has been fixed.
// See other TODO below.
if (glm::isnan(namePosition.x) || glm::isnan(namePosition.y) || glm::isnan(namePosition.z)
|| glm::isinf(namePosition.x) || glm::isinf(namePosition.y) || glm::isinf(namePosition.z)) {
qDebug() << "namePosition =" << namePosition;
glm::vec3 tempPosition(0.0f);
if (getSkeletonModel().getNeckPosition(tempPosition)) {
qDebug() << "getBodyUpDirection() =" << getBodyUpDirection();
qDebug() << "getHeadHeight() =" << getHeadHeight();
} else {
qDebug() << "_position =" << _position;
qDebug() << "getBodyUpDirection() =" << getBodyUpDirection();
qDebug() << "getBillboardSize() =" << getBillboardSize();
}
}
#endif
return namePosition;
}
@ -722,7 +742,8 @@ Transform Avatar::calculateDisplayNameTransform(const ViewFrustum& frustum, floa
// Compute correct scale to apply
float scale = DESIRED_HIGHT_ON_SCREEN / (fontSize * pixelHeight) * devicePixelRatio;
#ifdef DEBUG
// TODO: Temporary logging to track cause of invalid scale vale; remove once cause has been fixed.
// TODO: Temporary logging to track cause of invalid scale value; remove once cause has been fixed.
// Problem is probably due to an invalid getDisplayNamePosition(). See extra logging above.
if (scale == 0.0f || glm::isnan(scale) || glm::isinf(scale)) {
if (scale == 0.0f) {
qDebug() << "ASSERT because scale == 0.0f";
@ -733,6 +754,7 @@ Transform Avatar::calculateDisplayNameTransform(const ViewFrustum& frustum, floa
if (glm::isinf(scale)) {
qDebug() << "ASSERT because isinf(scale)";
}
qDebug() << "textPosition =" << textPosition;
qDebug() << "windowSizeY =" << windowSizeY;
qDebug() << "p1.y =" << p1.y;
qDebug() << "p1.w =" << p1.w;

View file

@ -344,6 +344,20 @@ glm::quat Head::getFinalOrientationInLocalFrame() const {
return glm::quat(glm::radians(glm::vec3(getFinalPitch(), getFinalYaw(), getFinalRoll() )));
}
// Everyone else's head keeps track of a lookAtPosition that everybody sees the same, and refers to where that head
// is looking in model space -- e.g., at someone's eyeball, or between their eyes, or mouth, etc. Everyon's Interface
// will have the same value for the lookAtPosition of any given head.
//
// Everyone else's head also keeps track of a correctedLookAtPosition that may be different for the same head within
// different Interfaces. If that head is not looking at me, the correctedLookAtPosition is the same as the lookAtPosition.
// However, if that head is looking at me, then I will attempt to adjust the lookAtPosition by the difference between
// my (singular) eye position and my actual camera position. This adjustment is used on their eyeballs during rendering
// (and also on any lookAt vector display for that head, during rendering). Note that:
// 1. this adjustment can be made directly to the other head's eyeball joints, because we won't be send their joint information to others.
// 2. the corrected position is a separate ivar, so the common/uncorrected value is still available
//
// There is a pun here: The two lookAtPositions will always be the same for my own avatar in my own Interface, because I
// will not be looking at myself. (Even in a mirror, I will be looking at the camera.)
glm::vec3 Head::getCorrectedLookAtPosition() {
if (isLookingAtMe()) {
return _correctedLookAtPosition;
@ -364,7 +378,7 @@ void Head::setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition) {
bool Head::isLookingAtMe() {
// Allow for outages such as may be encountered during avatar movement
quint64 now = usecTimestampNow();
const quint64 LOOKING_AT_ME_GAP_ALLOWED = 1000000; // microseconds
const quint64 LOOKING_AT_ME_GAP_ALLOWED = (5 * 1000 * 1000) / 60; // n frames, in microseconds
return _isLookingAtMe || (now - _wasLastLookingAtMe) < LOOKING_AT_ME_GAP_ALLOWED;
}

View file

@ -97,7 +97,7 @@ void SkeletonModel::initJointStates(QVector<JointState> states) {
}
const float PALM_PRIORITY = DEFAULT_PRIORITY;
// Called within Model::simulate call, below.
void SkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
if (_owningAvatar->isMyAvatar()) {
_rig->computeMotionAnimationState(deltaTime, _owningAvatar->getPosition(), _owningAvatar->getVelocity(), _owningAvatar->getOrientation());
@ -105,26 +105,43 @@ void SkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
Model::updateRig(deltaTime, parentTransform);
if (_owningAvatar->isMyAvatar()) {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
Head* head = _owningAvatar->getHead();
Rig::HeadParameters params;
params.modelRotation = getRotation();
params.modelTranslation = getTranslation();
params.leanSideways = _owningAvatar->getHead()->getFinalLeanSideways();
params.leanForward = _owningAvatar->getHead()->getFinalLeanForward();
params.torsoTwist = _owningAvatar->getHead()->getTorsoTwist();
params.localHeadOrientation = _owningAvatar->getHead()->getFinalOrientationInLocalFrame();
params.worldHeadOrientation = _owningAvatar->getHead()->getFinalOrientationInWorldFrame();
params.eyeLookAt = _owningAvatar->getHead()->getLookAtPosition();
params.eyeSaccade = _owningAvatar->getHead()->getSaccade();
params.leanSideways = head->getFinalLeanSideways();
params.leanForward = head->getFinalLeanForward();
params.torsoTwist = head->getTorsoTwist();
params.localHeadOrientation = head->getFinalOrientationInLocalFrame();
params.worldHeadOrientation = head->getFinalOrientationInWorldFrame();
params.eyeLookAt = head->getLookAtPosition();
params.eyeSaccade = head->getSaccade();
params.leanJointIndex = geometry.leanJointIndex;
params.neckJointIndex = geometry.neckJointIndex;
params.leftEyeJointIndex = geometry.leftEyeJointIndex;
params.rightEyeJointIndex = geometry.rightEyeJointIndex;
_rig->updateFromHeadParameters(params);
} else {
// This is a little more work than we really want.
//
// Other avatars joint, including their eyes, should already be set just like any other joints
// from the wire data. But when looking at me, we want the eyes to use the corrected lookAt.
//
// Thus this should really only be ... else if (_owningAvatar->getHead()->isLookingAtMe()) {...
// However, in the !isLookingAtMe case, the eyes aren't rotating the way they should right now.
// (They latch their looking at me position.) We will revisit that as priorities allow.
const FBXGeometry& geometry = _geometry->getFBXGeometry();
Head* head = _owningAvatar->getHead();
_rig->updateEyeJoints(geometry.leftEyeJointIndex, geometry.rightEyeJointIndex,
getTranslation(), getRotation(),
head->getFinalOrientationInWorldFrame(), head->getCorrectedLookAtPosition());
}
}
// Called by Avatar::simulate after it has set the joint states (fullUpdate true if changed),
// but just before head has been simulated.
void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
setTranslation(_owningAvatar->getSkeletonPosition());
static const glm::quat refOrientation = glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f));

View file

@ -153,21 +153,20 @@ void EyeTracker::onStreamStarted() {
qCDebug(interfaceapp) << "Eye Tracker: Started streaming";
}
// TODO: Re-enable once saving / loading calibrations is working
//if (_isStreaming) {
// // Automatically load calibration if one has been saved.
// QString availableCalibrations = QString(smi_getAvailableCalibrations());
// if (availableCalibrations.contains(HIGH_FIDELITY_EYE_TRACKER_CALIBRATION)) {
// result = smi_loadCalibration(HIGH_FIDELITY_EYE_TRACKER_CALIBRATION);
// if (result != SMI_RET_SUCCESS) {
// qCWarning(interfaceapp) << "Eye Tracker: Error loading calibration:" << smiReturnValueToString(result);
// QMessageBox::warning(nullptr, "Eye Tracker Error", "Error loading calibration"
// + smiReturnValueToString(result));
// } else {
// qCDebug(interfaceapp) << "Eye Tracker: Loaded calibration";
// }
// }
//}
if (_isStreaming) {
// Automatically load calibration if one has been saved.
QString availableCalibrations = QString(smi_getAvailableCalibrations());
if (availableCalibrations.contains(HIGH_FIDELITY_EYE_TRACKER_CALIBRATION)) {
result = smi_loadCalibration(HIGH_FIDELITY_EYE_TRACKER_CALIBRATION);
if (result != SMI_RET_SUCCESS) {
qCWarning(interfaceapp) << "Eye Tracker: Error loading calibration:" << smiReturnValueToString(result);
QMessageBox::warning(nullptr, "Eye Tracker Error", "Error loading calibration"
+ smiReturnValueToString(result));
} else {
qCDebug(interfaceapp) << "Eye Tracker: Loaded calibration";
}
}
}
}
#endif
@ -260,11 +259,10 @@ void EyeTracker::calibrate(int points) {
if (result != SMI_RET_SUCCESS) {
qCWarning(interfaceapp) << "Eye Tracker: Error performing calibration:" << smiReturnValueToString(result);
} else {
// TODO: Re - enable once saving / loading calibrations is working
//result = smi_saveCalibration(HIGH_FIDELITY_EYE_TRACKER_CALIBRATION);
//if (result != SMI_RET_SUCCESS) {
// qCWarning(interfaceapp) << "Eye Tracker: Error saving calibration:" << smiReturnValueToString(result);
//}
result = smi_saveCalibration(HIGH_FIDELITY_EYE_TRACKER_CALIBRATION);
if (result != SMI_RET_SUCCESS) {
qCWarning(interfaceapp) << "Eye Tracker: Error saving calibration:" << smiReturnValueToString(result);
}
}
}
@ -292,11 +290,10 @@ QString EyeTracker::smiReturnValueToString(int value) {
return "Eye cameras not available";
case smi_ErrorReturnValue::SMI_ERROR_OCULUS_RUNTIME_NOT_SUPPORTED:
return "Oculus runtime not supported";
// TODO: Re-enable once saving / loading calibrations is working
//case smi_ErrorReturnValue::SMI_ERROR_FILE_NOT_FOUND:
// return "File not found";
//case smi_ErrorReturnValue::SMI_ERROR_FILE_EMPTY:
// return "File empty";
case smi_ErrorReturnValue::SMI_ERROR_FILE_NOT_FOUND:
return "File not found";
case smi_ErrorReturnValue::SMI_ERROR_FILE_EMPTY:
return "File empty";
case smi_ErrorReturnValue::SMI_ERROR_UNKNOWN:
return "Unknown error";
default:

View file

@ -114,6 +114,7 @@ void Stats::updateStats() {
STAT_UPDATE(avatarCount, avatarManager->size() - 1);
STAT_UPDATE(serverCount, nodeList->size());
STAT_UPDATE(framerate, (int)qApp->getFps());
STAT_UPDATE(simrate, (int)Application::getInstance()->getAverageSimsPerSecond());
auto bandwidthRecorder = DependencyManager::get<BandwidthRecorder>();
STAT_UPDATE(packetInCount, bandwidthRecorder->getCachedTotalAverageInputPacketsPerSecond());

View file

@ -30,6 +30,7 @@ class Stats : public QQuickItem {
STATS_PROPERTY(int, serverCount, 0)
STATS_PROPERTY(int, framerate, 0)
STATS_PROPERTY(int, simrate, 0)
STATS_PROPERTY(int, avatarCount, 0)
STATS_PROPERTY(int, packetInCount, 0)
STATS_PROPERTY(int, packetOutCount, 0)
@ -95,6 +96,7 @@ signals:
void timingExpandedChanged();
void serverCountChanged();
void framerateChanged();
void simrateChanged();
void avatarCountChanged();
void packetInCountChanged();
void packetOutCountChanged();

View file

@ -786,8 +786,8 @@ glm::quat Rig::getJointDefaultRotationInParentFrame(int jointIndex) {
void Rig::updateFromHeadParameters(const HeadParameters& params) {
updateLeanJoint(params.leanJointIndex, params.leanSideways, params.leanForward, params.torsoTwist);
updateNeckJoint(params.neckJointIndex, params.localHeadOrientation, params.leanSideways, params.leanForward, params.torsoTwist);
updateEyeJoint(params.leftEyeJointIndex, params.modelTranslation, params.modelRotation, params.worldHeadOrientation, params.eyeLookAt, params.eyeSaccade);
updateEyeJoint(params.rightEyeJointIndex, params.modelTranslation, params.modelRotation, params.worldHeadOrientation, params.eyeLookAt, params.eyeSaccade);
updateEyeJoints(params.leftEyeJointIndex, params.rightEyeJointIndex, params.modelTranslation, params.modelRotation,
params.worldHeadOrientation, params.eyeLookAt, params.eyeSaccade);
}
void Rig::updateLeanJoint(int index, float leanSideways, float leanForward, float torsoTwist) {
@ -828,6 +828,11 @@ void Rig::updateNeckJoint(int index, const glm::quat& localHeadOrientation, floa
}
}
void Rig::updateEyeJoints(int leftEyeIndex, int rightEyeIndex, const glm::vec3& modelTranslation, const glm::quat& modelRotation,
const glm::quat& worldHeadOrientation, const glm::vec3& lookAtSpot, const glm::vec3& saccade) {
updateEyeJoint(leftEyeIndex, modelTranslation, modelRotation, worldHeadOrientation, lookAtSpot, saccade);
updateEyeJoint(rightEyeIndex, modelTranslation, modelRotation, worldHeadOrientation, lookAtSpot, saccade);
}
void Rig::updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm::quat& modelRotation, const glm::quat& worldHeadOrientation, const glm::vec3& lookAtSpot, const glm::vec3& saccade) {
if (index >= 0 && _jointStates[index].getParentIndex() >= 0) {
auto& state = _jointStates[index];

View file

@ -157,6 +157,8 @@ public:
void setEnableRig(bool isEnabled) { _enableRig = isEnabled; }
void updateFromHeadParameters(const HeadParameters& params);
void updateEyeJoints(int leftEyeIndex, int rightEyeIndex, const glm::vec3& modelTranslation, const glm::quat& modelRotation,
const glm::quat& worldHeadOrientation, const glm::vec3& lookAtSpot, const glm::vec3& saccade = glm::vec3(0.0f));
virtual void setHandPosition(int jointIndex, const glm::vec3& position, const glm::quat& rotation,
float scale, float priority) = 0;

View file

@ -861,6 +861,8 @@ void EntityTreeRenderer::mousePressEvent(QMouseEvent* event, unsigned int device
if (entityScript.property("clickDownOnEntity").isValid()) {
entityScript.property("clickDownOnEntity").call(entityScript, entityScriptArgs);
}
} else {
emit mousePressOffEntity(rayPickResult, event, deviceID);
}
_lastMouseEvent = MouseEvent(*event, deviceID);
_lastMouseEventValid = true;

View file

@ -95,6 +95,7 @@ public:
signals:
void mousePressOnEntity(const RayToEntityIntersectionResult& entityItemID, const QMouseEvent* event, unsigned int deviceId);
void mousePressOffEntity(const RayToEntityIntersectionResult& entityItemID, const QMouseEvent* event, unsigned int deviceId);
void mouseMoveOnEntity(const RayToEntityIntersectionResult& entityItemID, const QMouseEvent* event, unsigned int deviceId);
void mouseReleaseOnEntity(const RayToEntityIntersectionResult& entityItemID, const QMouseEvent* event, unsigned int deviceId);

View file

@ -56,6 +56,16 @@ RenderableWebEntityItem::~RenderableWebEntityItem() {
}
void RenderableWebEntityItem::render(RenderArgs* args) {
#ifdef WANT_EXTRA_DEBUGGING
{
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(getTransformToCenter()); // we want to include the scale as well
glm::vec4 cubeColor{ 1.0f, 0.0f, 0.0f, 1.0f};
DependencyManager::get<DeferredLightingEffect>()->renderWireCube(batch, 1.0f, cubeColor);
}
#endif
QOpenGLContext * currentContext = QOpenGLContext::currentContext();
QSurface * currentSurface = currentContext->surface();
if (!_webSurface) {

View file

@ -36,9 +36,9 @@ void main(void) {
float inPositionY = (_inPosition.y - 0.5) / voxelVolumeSize.y;
float inPositionZ = (_inPosition.z - 0.5) / voxelVolumeSize.z;
vec4 xyDiffuse = texture2D(xMap, vec2(-inPositionX, -inPositionY));
vec4 xzDiffuse = texture2D(yMap, vec2(-inPositionX, inPositionZ));
vec4 yzDiffuse = texture2D(zMap, vec2(inPositionZ, -inPositionY));
vec4 xyDiffuse = texture(xMap, vec2(-inPositionX, -inPositionY));
vec4 xzDiffuse = texture(yMap, vec2(-inPositionX, inPositionZ));
vec4 yzDiffuse = texture(zMap, vec2(inPositionZ, -inPositionY));
vec3 xyDiffuseScaled = xyDiffuse.rgb * abs(worldNormal.z);
vec3 xzDiffuseScaled = xzDiffuse.rgb * abs(worldNormal.y);

View file

@ -154,9 +154,9 @@ public:
DEFINE_PROPERTY_REF(PROP_HREF, Href, href, QString);
DEFINE_PROPERTY_REF(PROP_DESCRIPTION, Description, description, QString);
DEFINE_PROPERTY(PROP_FACE_CAMERA, FaceCamera, faceCamera, bool);
DEFINE_PROPERTY_REF(PROP_ACTION_DATA, ActionData, actionData, QByteArray);
DEFINE_PROPERTY(PROP_NORMALS, Normals, normals, QVector<glm::vec3>);
DEFINE_PROPERTY(PROP_STROKE_WIDTHS, StrokeWidths, strokeWidths, QVector<float>);
DEFINE_PROPERTY_REF(PROP_ACTION_DATA, ActionData, actionData, QByteArray);
DEFINE_PROPERTY_REF(PROP_X_TEXTURE_URL, XTextureURL, xTextureURL, QString);
DEFINE_PROPERTY_REF(PROP_Y_TEXTURE_URL, YTextureURL, yTextureURL, QString);
DEFINE_PROPERTY_REF(PROP_Z_TEXTURE_URL, ZTextureURL, zTextureURL, QString);

View file

@ -170,9 +170,10 @@ bool PolyLineEntityItem::setLinePoints(const QVector<glm::vec3>& points) {
for (int i = 0; i < points.size(); i++) {
glm::vec3 point = points.at(i);
glm::vec3 pos = getPosition();
glm::vec3 halfBox = getDimensions() * 0.5f;
if ( (point.x < - halfBox.x || point.x > halfBox.x) || (point.y < -halfBox.y || point.y > halfBox.y) || (point.z < - halfBox.z || point.z > halfBox.z) ) {
if ((point.x < - halfBox.x || point.x > halfBox.x) ||
(point.y < -halfBox.y || point.y > halfBox.y) ||
(point.z < - halfBox.z || point.z > halfBox.z)) {
qDebug() << "Point is outside entity's bounding box";
return false;
}

View file

@ -1108,11 +1108,10 @@ ExtractedMesh extractMesh(const FBXNode& object, unsigned int& meshIndex) {
if (subdata.name == "Materials") {
materials = getIntVector(subdata);
} else if (subdata.name == "MappingInformationType") {
if (subdata.properties.at(0) == "ByPolygon") {
if (subdata.properties.at(0) == "ByPolygon")
isMaterialPerPolygon = true;
} else {
isMaterialPerPolygon = false;
}
} else {
isMaterialPerPolygon = false;
}
}
@ -1126,7 +1125,6 @@ ExtractedMesh extractMesh(const FBXNode& object, unsigned int& meshIndex) {
}
}
bool isMultiMaterial = false;
if (isMaterialPerPolygon) {
isMultiMaterial = true;

View file

@ -1,408 +1,408 @@
//
// State
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_State_h
#define hifi_gpu_State_h
#include "Format.h"
#include <memory>
#include <vector>
#include <unordered_map>
#include <bitset>
// Why a macro and not a fancy template you will ask me ?
// Because some of the fields are bool packed tightly in the State::Cache class
// and it s just not good anymore for template T& variable manipulation...
#define SET_FIELD(field, defaultValue, value, dest) {\
dest = value;\
if (value == defaultValue) {\
_signature.reset(field);\
} else {\
_signature.set(field);\
}\
_stamp++;\
}\
namespace gpu {
class GPUObject;
class State {
public:
State();
virtual ~State();
Stamp getStamp() const { return _stamp; }
typedef ::gpu::ComparisonFunction ComparisonFunction;
enum FillMode {
FILL_POINT = 0,
FILL_LINE,
FILL_FACE,
NUM_FILL_MODES,
};
enum CullMode {
CULL_NONE = 0,
CULL_FRONT,
CULL_BACK,
NUM_CULL_MODES,
};
enum StencilOp {
STENCIL_OP_KEEP = 0,
STENCIL_OP_ZERO,
STENCIL_OP_REPLACE,
STENCIL_OP_INCR_SAT,
STENCIL_OP_DECR_SAT,
STENCIL_OP_INVERT,
STENCIL_OP_INCR,
STENCIL_OP_DECR,
NUM_STENCIL_OPS,
};
enum BlendArg {
ZERO = 0,
ONE,
SRC_COLOR,
INV_SRC_COLOR,
SRC_ALPHA,
INV_SRC_ALPHA,
DEST_ALPHA,
INV_DEST_ALPHA,
DEST_COLOR,
INV_DEST_COLOR,
SRC_ALPHA_SAT,
FACTOR_COLOR,
INV_FACTOR_COLOR,
FACTOR_ALPHA,
INV_FACTOR_ALPHA,
NUM_BLEND_ARGS,
};
enum BlendOp {
BLEND_OP_ADD = 0,
BLEND_OP_SUBTRACT,
BLEND_OP_REV_SUBTRACT,
BLEND_OP_MIN,
BLEND_OP_MAX,
NUM_BLEND_OPS,
};
enum ColorMask
{
WRITE_NONE = 0,
WRITE_RED = 1,
WRITE_GREEN = 2,
WRITE_BLUE = 4,
WRITE_ALPHA = 8,
WRITE_ALL = (WRITE_RED | WRITE_GREEN | WRITE_BLUE | WRITE_ALPHA ),
};
class DepthTest {
uint8 _function = LESS;
uint8 _writeMask = true;
uint8 _enabled = false;
uint8 _spare = 0;
public:
DepthTest(bool enabled = false, bool writeMask = true, ComparisonFunction func = LESS) :
_function(func), _writeMask(writeMask), _enabled(enabled) {}
bool isEnabled() const { return _enabled; }
ComparisonFunction getFunction() const { return ComparisonFunction(_function); }
bool getWriteMask() const { return _writeMask; }
int32 getRaw() const { return *(reinterpret_cast<const int32*>(this)); }
DepthTest(int32 raw) { *(reinterpret_cast<int32*>(this)) = raw; }
bool operator== (const DepthTest& right) const { return getRaw() == right.getRaw(); }
bool operator!= (const DepthTest& right) const { return getRaw() != right.getRaw(); }
};
class StencilTest {
static const int FUNC_MASK = 0x000f;
static const int FAIL_OP_MASK = 0x00f0;
static const int DEPTH_FAIL_OP_MASK = 0x0f00;
static const int PASS_OP_MASK = 0xf000;
static const int FAIL_OP_OFFSET = 4;
static const int DEPTH_FAIL_OP_OFFSET = 8;
static const int PASS_OP_OFFSET = 12;
uint16 _functionAndOperations;
uint8 _reference = 0;
uint8 _readMask = 0xff;
public:
StencilTest(uint8 reference = 0, uint8 readMask =0xFF, ComparisonFunction func = ALWAYS, StencilOp failOp = STENCIL_OP_KEEP, StencilOp depthFailOp = STENCIL_OP_KEEP, StencilOp passOp = STENCIL_OP_KEEP) :
_functionAndOperations(func | (failOp << FAIL_OP_OFFSET) | (depthFailOp << DEPTH_FAIL_OP_OFFSET) | (passOp << PASS_OP_OFFSET)),
_reference(reference), _readMask(readMask)
{}
ComparisonFunction getFunction() const { return ComparisonFunction(_functionAndOperations & FUNC_MASK); }
StencilOp getFailOp() const { return StencilOp((_functionAndOperations & FAIL_OP_MASK) >> FAIL_OP_OFFSET); }
StencilOp getDepthFailOp() const { return StencilOp((_functionAndOperations & DEPTH_FAIL_OP_MASK) >> DEPTH_FAIL_OP_OFFSET); }
StencilOp getPassOp() const { return StencilOp((_functionAndOperations & PASS_OP_MASK) >> PASS_OP_OFFSET); }
uint8 getReference() const { return _reference; }
uint8 getReadMask() const { return _readMask; }
int32 getRaw() const { return *(reinterpret_cast<const int32*>(this)); }
StencilTest(int32 raw) { *(reinterpret_cast<int32*>(this)) = raw; }
bool operator== (const StencilTest& right) const { return getRaw() == right.getRaw(); }
bool operator!= (const StencilTest& right) const { return getRaw() != right.getRaw(); }
};
class StencilActivation {
uint8 _frontWriteMask = 0xFF;
uint8 _backWriteMask = 0xFF;
uint16 _enabled = 0;
public:
StencilActivation(bool enabled, uint8 frontWriteMask = 0xFF, uint8 backWriteMask = 0xFF) :
_frontWriteMask(frontWriteMask), _backWriteMask(backWriteMask), _enabled(enabled) {}
bool isEnabled() const { return (_enabled != 0); }
uint8 getWriteMaskFront() const { return _frontWriteMask; }
uint8 getWriteMaskBack() const { return _backWriteMask; }
int32 getRaw() const { return *(reinterpret_cast<const int32*>(this)); }
StencilActivation(int32 raw) { *(reinterpret_cast<int32*>(this)) = raw; }
bool operator== (const StencilActivation& right) const { return getRaw() == right.getRaw(); }
bool operator!= (const StencilActivation& right) const { return getRaw() != right.getRaw(); }
};
class BlendFunction {
static const int COLOR_MASK = 0x0f;
static const int ALPHA_MASK = 0xf0;
static const int ALPHA_OFFSET = 4;
uint8 _enabled;
uint8 _source;
uint8 _destination;
uint8 _operation;
public:
BlendFunction(bool enabled,
BlendArg sourceColor, BlendOp operationColor, BlendArg destinationColor,
BlendArg sourceAlpha, BlendOp operationAlpha, BlendArg destinationAlpha) :
_enabled(enabled),
_source(sourceColor | (sourceAlpha << ALPHA_OFFSET)),
_destination(destinationColor | (destinationAlpha << ALPHA_OFFSET)),
_operation(operationColor | (operationAlpha << ALPHA_OFFSET)) {}
BlendFunction(bool enabled, BlendArg source = ONE, BlendOp operation = BLEND_OP_ADD, BlendArg destination = ZERO) :
_enabled(enabled),
_source(source | (source << ALPHA_OFFSET)),
_destination(destination | (destination << ALPHA_OFFSET)),
_operation(operation | (operation << ALPHA_OFFSET)) {}
bool isEnabled() const { return (_enabled != 0); }
BlendArg getSourceColor() const { return BlendArg(_source & COLOR_MASK); }
BlendArg getDestinationColor() const { return BlendArg(_destination & COLOR_MASK); }
BlendOp getOperationColor() const { return BlendOp(_operation & COLOR_MASK); }
BlendArg getSourceAlpha() const { return BlendArg((_source & ALPHA_MASK) >> ALPHA_OFFSET); }
BlendArg getDestinationAlpha() const { return BlendArg((_destination & ALPHA_MASK) >> ALPHA_OFFSET); }
BlendOp getOperationAlpha() const { return BlendOp((_operation & ALPHA_MASK) >> ALPHA_OFFSET); }
int32 getRaw() const { return *(reinterpret_cast<const int32*>(this)); }
BlendFunction(int32 raw) { *(reinterpret_cast<int32*>(this)) = raw; }
bool operator== (const BlendFunction& right) const { return getRaw() == right.getRaw(); }
bool operator!= (const BlendFunction& right) const { return getRaw() != right.getRaw(); }
};
// The Data class is the full explicit description of the State class fields value.
// Useful for having one const static called Default for reference or for the gpu::Backend to keep track of the current value
class Data {
public:
float depthBias = 0.0f;
float depthBiasSlopeScale = 0.0f;
DepthTest depthTest = DepthTest(false, true, LESS);
StencilActivation stencilActivation = StencilActivation(false);
StencilTest stencilTestFront = StencilTest(0, 0xff, ALWAYS, STENCIL_OP_KEEP, STENCIL_OP_KEEP, STENCIL_OP_KEEP);
StencilTest stencilTestBack = StencilTest(0, 0xff, ALWAYS, STENCIL_OP_KEEP, STENCIL_OP_KEEP, STENCIL_OP_KEEP);
uint32 sampleMask = 0xFFFFFFFF;
BlendFunction blendFunction = BlendFunction(false);
uint8 fillMode = FILL_FACE;
uint8 cullMode = CULL_NONE;
uint8 colorWriteMask = WRITE_ALL;
bool frontFaceClockwise : 1;
bool depthClampEnable : 1;
bool scissorEnable : 1;
bool multisampleEnable : 1;
bool antialisedLineEnable : 1;
bool alphaToCoverageEnable : 1;
Data() :
frontFaceClockwise(false),
depthClampEnable(false),
scissorEnable(false),
multisampleEnable(false),
antialisedLineEnable(true),
alphaToCoverageEnable(false)
{}
};
// The unique default values for all the fields
static const Data DEFAULT;
void setFillMode(FillMode fill) { SET_FIELD(FILL_MODE, DEFAULT.fillMode, fill, _values.fillMode); }
FillMode getFillMode() const { return FillMode(_values.fillMode); }
void setCullMode(CullMode cull) { SET_FIELD(CULL_MODE, DEFAULT.cullMode, cull, _values.cullMode); }
CullMode getCullMode() const { return CullMode(_values.cullMode); }
void setFrontFaceClockwise(bool isClockwise) { SET_FIELD(FRONT_FACE_CLOCKWISE, DEFAULT.frontFaceClockwise, isClockwise, _values.frontFaceClockwise); }
bool isFrontFaceClockwise() const { return _values.frontFaceClockwise; }
void setDepthClampEnable(bool enable) { SET_FIELD(DEPTH_CLAMP_ENABLE, DEFAULT.depthClampEnable, enable, _values.depthClampEnable); }
bool isDepthClampEnable() const { return _values.depthClampEnable; }
void setScissorEnable(bool enable) { SET_FIELD(SCISSOR_ENABLE, DEFAULT.scissorEnable, enable, _values.scissorEnable); }
bool isScissorEnable() const { return _values.scissorEnable; }
void setMultisampleEnable(bool enable) { SET_FIELD(MULTISAMPLE_ENABLE, DEFAULT.multisampleEnable, enable, _values.multisampleEnable); }
bool isMultisampleEnable() const { return _values.multisampleEnable; }
void setAntialiasedLineEnable(bool enable) { SET_FIELD(ANTIALISED_LINE_ENABLE, DEFAULT.antialisedLineEnable, enable, _values.antialisedLineEnable); }
bool isAntialiasedLineEnable() const { return _values.antialisedLineEnable; }
// Depth Bias
void setDepthBias(float bias) { SET_FIELD(DEPTH_BIAS, DEFAULT.depthBias, bias, _values.depthBias); }
float getDepthBias() const { return _values.depthBias; }
void setDepthBiasSlopeScale(float scale) { SET_FIELD(DEPTH_BIAS_SLOPE_SCALE, DEFAULT.depthBiasSlopeScale, scale, _values.depthBiasSlopeScale); }
float getDepthBiasSlopeScale() const { return _values.depthBiasSlopeScale; }
// Depth Test
void setDepthTest(DepthTest depthTest) { SET_FIELD(DEPTH_TEST, DEFAULT.depthTest, depthTest, _values.depthTest); }
void setDepthTest(bool enable, bool writeMask, ComparisonFunction func) { setDepthTest(DepthTest(enable, writeMask, func)); }
DepthTest getDepthTest() const { return _values.depthTest; }
bool isDepthTestEnabled() const { return getDepthTest().isEnabled(); }
bool getDepthTestWriteMask() const { return getDepthTest().getWriteMask(); }
ComparisonFunction getDepthTestFunc() const { return getDepthTest().getFunction(); }
// Stencil test
void setStencilTest(bool enabled, uint8 frontWriteMask, StencilTest frontTest, uint8 backWriteMask, StencilTest backTest) {
SET_FIELD(STENCIL_ACTIVATION, DEFAULT.stencilActivation, StencilActivation(enabled, frontWriteMask, backWriteMask), _values.stencilActivation);
SET_FIELD(STENCIL_TEST_FRONT, DEFAULT.stencilTestFront, frontTest, _values.stencilTestFront);
SET_FIELD(STENCIL_TEST_BACK, DEFAULT.stencilTestBack, backTest, _values.stencilTestBack); }
void setStencilTest(bool enabled, uint8 frontWriteMask, StencilTest frontTest) {
setStencilTest(enabled, frontWriteMask, frontTest, frontWriteMask, frontTest); }
StencilActivation getStencilActivation() const { return _values.stencilActivation; }
StencilTest getStencilTestFront() const { return _values.stencilTestFront; }
StencilTest getStencilTestBack() const { return _values.stencilTestBack; }
bool isStencilEnabled() const { return getStencilActivation().isEnabled(); }
uint8 getStencilWriteMaskFront() const { return getStencilActivation().getWriteMaskFront(); }
uint8 getStencilWriteMaskBack() const { return getStencilActivation().getWriteMaskBack(); }
// Alpha to coverage
void setAlphaToCoverageEnable(bool enable) { SET_FIELD(ALPHA_TO_COVERAGE_ENABLE, DEFAULT.alphaToCoverageEnable, enable, _values.alphaToCoverageEnable); }
bool isAlphaToCoverageEnabled() const { return _values.alphaToCoverageEnable; }
// Sample mask
void setSampleMask(uint32 mask) { SET_FIELD(SAMPLE_MASK, DEFAULT.sampleMask, mask, _values.sampleMask); }
uint32 getSampleMask() const { return _values.sampleMask; }
// Blend Function
void setBlendFunction(BlendFunction function) { SET_FIELD(BLEND_FUNCTION, DEFAULT.blendFunction, function, _values.blendFunction); }
BlendFunction getBlendFunction() const { return _values.blendFunction; }
void setBlendFunction(bool enabled, BlendArg sourceColor, BlendOp operationColor, BlendArg destinationColor, BlendArg sourceAlpha, BlendOp operationAlpha, BlendArg destinationAlpha) {
setBlendFunction(BlendFunction(enabled, sourceColor, operationColor, destinationColor, sourceAlpha, operationAlpha, destinationAlpha)); }
void setBlendFunction(bool enabled, BlendArg source, BlendOp operation, BlendArg destination) {
setBlendFunction(BlendFunction(enabled, source, operation, destination)); }
bool isBlendEnabled() const { return getBlendFunction().isEnabled(); }
// Color write mask
void setColorWriteMask(uint8 mask) { SET_FIELD(COLOR_WRITE_MASK, DEFAULT.colorWriteMask, mask, _values.colorWriteMask); }
void setColorWriteMask(bool red, bool green, bool blue, bool alpha) { uint32 value = ((WRITE_RED * red) | (WRITE_GREEN * green) | (WRITE_BLUE * blue) | (WRITE_ALPHA * alpha)); SET_FIELD(COLOR_WRITE_MASK, DEFAULT.colorWriteMask, value, _values.colorWriteMask); }
uint8 getColorWriteMask() const { return _values.colorWriteMask; }
// All the possible fields
enum Field {
FILL_MODE,
CULL_MODE,
FRONT_FACE_CLOCKWISE,
DEPTH_CLAMP_ENABLE,
SCISSOR_ENABLE,
MULTISAMPLE_ENABLE,
ANTIALISED_LINE_ENABLE,
DEPTH_BIAS,
DEPTH_BIAS_SLOPE_SCALE,
DEPTH_TEST,
STENCIL_ACTIVATION,
STENCIL_TEST_FRONT,
STENCIL_TEST_BACK,
SAMPLE_MASK,
ALPHA_TO_COVERAGE_ENABLE,
BLEND_FUNCTION,
COLOR_WRITE_MASK,
NUM_FIELDS, // not a valid field, just the count
};
// The signature of the state tells which fields of the state are not default
// this way during rendering the Backend can compare it's current state and try to minimize the job to do
typedef std::bitset<NUM_FIELDS> Signature;
Signature getSignature() const { return _signature; }
static Signature evalSignature(const Data& state);
// For convenience, create a State from the values directly
State(const Data& values);
const Data& getValues() const { return _values; }
protected:
State(const State& state);
State& operator=(const State& state);
Data _values;
Signature _signature{0};
Stamp _stamp{0};
// This shouldn't be used by anything else than the Backend class with the proper casting.
mutable GPUObject* _gpuObject = nullptr;
void setGPUObject(GPUObject* gpuObject) const { _gpuObject = gpuObject; }
GPUObject* getGPUObject() const { return _gpuObject; }
friend class Backend;
};
typedef std::shared_ptr< State > StatePointer;
typedef std::vector< StatePointer > States;
};
#endif
//
// State
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_State_h
#define hifi_gpu_State_h
#include "Format.h"
#include <memory>
#include <vector>
#include <unordered_map>
#include <bitset>
// Why a macro and not a fancy template you will ask me ?
// Because some of the fields are bool packed tightly in the State::Cache class
// and it s just not good anymore for template T& variable manipulation...
#define SET_FIELD(field, defaultValue, value, dest) {\
dest = value;\
if (value == defaultValue) {\
_signature.reset(field);\
} else {\
_signature.set(field);\
}\
_stamp++;\
}\
namespace gpu {
class GPUObject;
class State {
public:
State();
virtual ~State();
Stamp getStamp() const { return _stamp; }
typedef ::gpu::ComparisonFunction ComparisonFunction;
enum FillMode {
FILL_POINT = 0,
FILL_LINE,
FILL_FACE,
NUM_FILL_MODES,
};
enum CullMode {
CULL_NONE = 0,
CULL_FRONT,
CULL_BACK,
NUM_CULL_MODES,
};
enum StencilOp {
STENCIL_OP_KEEP = 0,
STENCIL_OP_ZERO,
STENCIL_OP_REPLACE,
STENCIL_OP_INCR_SAT,
STENCIL_OP_DECR_SAT,
STENCIL_OP_INVERT,
STENCIL_OP_INCR,
STENCIL_OP_DECR,
NUM_STENCIL_OPS,
};
enum BlendArg {
ZERO = 0,
ONE,
SRC_COLOR,
INV_SRC_COLOR,
SRC_ALPHA,
INV_SRC_ALPHA,
DEST_ALPHA,
INV_DEST_ALPHA,
DEST_COLOR,
INV_DEST_COLOR,
SRC_ALPHA_SAT,
FACTOR_COLOR,
INV_FACTOR_COLOR,
FACTOR_ALPHA,
INV_FACTOR_ALPHA,
NUM_BLEND_ARGS,
};
enum BlendOp {
BLEND_OP_ADD = 0,
BLEND_OP_SUBTRACT,
BLEND_OP_REV_SUBTRACT,
BLEND_OP_MIN,
BLEND_OP_MAX,
NUM_BLEND_OPS,
};
enum ColorMask
{
WRITE_NONE = 0,
WRITE_RED = 1,
WRITE_GREEN = 2,
WRITE_BLUE = 4,
WRITE_ALPHA = 8,
WRITE_ALL = (WRITE_RED | WRITE_GREEN | WRITE_BLUE | WRITE_ALPHA ),
};
class DepthTest {
uint8 _function = LESS;
uint8 _writeMask = true;
uint8 _enabled = false;
uint8 _spare = 0;
public:
DepthTest(bool enabled = false, bool writeMask = true, ComparisonFunction func = LESS) :
_function(func), _writeMask(writeMask), _enabled(enabled) {}
bool isEnabled() const { return _enabled != 0; }
ComparisonFunction getFunction() const { return ComparisonFunction(_function); }
uint8 getWriteMask() const { return _writeMask; }
int32 getRaw() const { return *(reinterpret_cast<const int32*>(this)); }
DepthTest(int32 raw) { *(reinterpret_cast<int32*>(this)) = raw; }
bool operator== (const DepthTest& right) const { return getRaw() == right.getRaw(); }
bool operator!= (const DepthTest& right) const { return getRaw() != right.getRaw(); }
};
class StencilTest {
static const int FUNC_MASK = 0x000f;
static const int FAIL_OP_MASK = 0x00f0;
static const int DEPTH_FAIL_OP_MASK = 0x0f00;
static const int PASS_OP_MASK = 0xf000;
static const int FAIL_OP_OFFSET = 4;
static const int DEPTH_FAIL_OP_OFFSET = 8;
static const int PASS_OP_OFFSET = 12;
uint16 _functionAndOperations;
uint8 _reference = 0;
uint8 _readMask = 0xff;
public:
StencilTest(uint8 reference = 0, uint8 readMask =0xFF, ComparisonFunction func = ALWAYS, StencilOp failOp = STENCIL_OP_KEEP, StencilOp depthFailOp = STENCIL_OP_KEEP, StencilOp passOp = STENCIL_OP_KEEP) :
_functionAndOperations(func | (failOp << FAIL_OP_OFFSET) | (depthFailOp << DEPTH_FAIL_OP_OFFSET) | (passOp << PASS_OP_OFFSET)),
_reference(reference), _readMask(readMask)
{}
ComparisonFunction getFunction() const { return ComparisonFunction(_functionAndOperations & FUNC_MASK); }
StencilOp getFailOp() const { return StencilOp((_functionAndOperations & FAIL_OP_MASK) >> FAIL_OP_OFFSET); }
StencilOp getDepthFailOp() const { return StencilOp((_functionAndOperations & DEPTH_FAIL_OP_MASK) >> DEPTH_FAIL_OP_OFFSET); }
StencilOp getPassOp() const { return StencilOp((_functionAndOperations & PASS_OP_MASK) >> PASS_OP_OFFSET); }
uint8 getReference() const { return _reference; }
uint8 getReadMask() const { return _readMask; }
int32 getRaw() const { return *(reinterpret_cast<const int32*>(this)); }
StencilTest(int32 raw) { *(reinterpret_cast<int32*>(this)) = raw; }
bool operator== (const StencilTest& right) const { return getRaw() == right.getRaw(); }
bool operator!= (const StencilTest& right) const { return getRaw() != right.getRaw(); }
};
class StencilActivation {
uint8 _frontWriteMask = 0xFF;
uint8 _backWriteMask = 0xFF;
uint16 _enabled = 0;
public:
StencilActivation(bool enabled, uint8 frontWriteMask = 0xFF, uint8 backWriteMask = 0xFF) :
_frontWriteMask(frontWriteMask), _backWriteMask(backWriteMask), _enabled(enabled) {}
bool isEnabled() const { return (_enabled != 0); }
uint8 getWriteMaskFront() const { return _frontWriteMask; }
uint8 getWriteMaskBack() const { return _backWriteMask; }
int32 getRaw() const { return *(reinterpret_cast<const int32*>(this)); }
StencilActivation(int32 raw) { *(reinterpret_cast<int32*>(this)) = raw; }
bool operator== (const StencilActivation& right) const { return getRaw() == right.getRaw(); }
bool operator!= (const StencilActivation& right) const { return getRaw() != right.getRaw(); }
};
class BlendFunction {
static const int COLOR_MASK = 0x0f;
static const int ALPHA_MASK = 0xf0;
static const int ALPHA_OFFSET = 4;
uint8 _enabled;
uint8 _source;
uint8 _destination;
uint8 _operation;
public:
BlendFunction(bool enabled,
BlendArg sourceColor, BlendOp operationColor, BlendArg destinationColor,
BlendArg sourceAlpha, BlendOp operationAlpha, BlendArg destinationAlpha) :
_enabled(enabled),
_source(sourceColor | (sourceAlpha << ALPHA_OFFSET)),
_destination(destinationColor | (destinationAlpha << ALPHA_OFFSET)),
_operation(operationColor | (operationAlpha << ALPHA_OFFSET)) {}
BlendFunction(bool enabled, BlendArg source = ONE, BlendOp operation = BLEND_OP_ADD, BlendArg destination = ZERO) :
_enabled(enabled),
_source(source | (source << ALPHA_OFFSET)),
_destination(destination | (destination << ALPHA_OFFSET)),
_operation(operation | (operation << ALPHA_OFFSET)) {}
bool isEnabled() const { return (_enabled != 0); }
BlendArg getSourceColor() const { return BlendArg(_source & COLOR_MASK); }
BlendArg getDestinationColor() const { return BlendArg(_destination & COLOR_MASK); }
BlendOp getOperationColor() const { return BlendOp(_operation & COLOR_MASK); }
BlendArg getSourceAlpha() const { return BlendArg((_source & ALPHA_MASK) >> ALPHA_OFFSET); }
BlendArg getDestinationAlpha() const { return BlendArg((_destination & ALPHA_MASK) >> ALPHA_OFFSET); }
BlendOp getOperationAlpha() const { return BlendOp((_operation & ALPHA_MASK) >> ALPHA_OFFSET); }
int32 getRaw() const { return *(reinterpret_cast<const int32*>(this)); }
BlendFunction(int32 raw) { *(reinterpret_cast<int32*>(this)) = raw; }
bool operator== (const BlendFunction& right) const { return getRaw() == right.getRaw(); }
bool operator!= (const BlendFunction& right) const { return getRaw() != right.getRaw(); }
};
// The Data class is the full explicit description of the State class fields value.
// Useful for having one const static called Default for reference or for the gpu::Backend to keep track of the current value
class Data {
public:
float depthBias = 0.0f;
float depthBiasSlopeScale = 0.0f;
DepthTest depthTest = DepthTest(false, true, LESS);
StencilActivation stencilActivation = StencilActivation(false);
StencilTest stencilTestFront = StencilTest(0, 0xff, ALWAYS, STENCIL_OP_KEEP, STENCIL_OP_KEEP, STENCIL_OP_KEEP);
StencilTest stencilTestBack = StencilTest(0, 0xff, ALWAYS, STENCIL_OP_KEEP, STENCIL_OP_KEEP, STENCIL_OP_KEEP);
uint32 sampleMask = 0xFFFFFFFF;
BlendFunction blendFunction = BlendFunction(false);
uint8 fillMode = FILL_FACE;
uint8 cullMode = CULL_NONE;
uint8 colorWriteMask = WRITE_ALL;
bool frontFaceClockwise : 1;
bool depthClampEnable : 1;
bool scissorEnable : 1;
bool multisampleEnable : 1;
bool antialisedLineEnable : 1;
bool alphaToCoverageEnable : 1;
Data() :
frontFaceClockwise(false),
depthClampEnable(false),
scissorEnable(false),
multisampleEnable(false),
antialisedLineEnable(true),
alphaToCoverageEnable(false)
{}
};
// The unique default values for all the fields
static const Data DEFAULT;
void setFillMode(FillMode fill) { SET_FIELD(FILL_MODE, DEFAULT.fillMode, fill, _values.fillMode); }
FillMode getFillMode() const { return FillMode(_values.fillMode); }
void setCullMode(CullMode cull) { SET_FIELD(CULL_MODE, DEFAULT.cullMode, cull, _values.cullMode); }
CullMode getCullMode() const { return CullMode(_values.cullMode); }
void setFrontFaceClockwise(bool isClockwise) { SET_FIELD(FRONT_FACE_CLOCKWISE, DEFAULT.frontFaceClockwise, isClockwise, _values.frontFaceClockwise); }
bool isFrontFaceClockwise() const { return _values.frontFaceClockwise; }
void setDepthClampEnable(bool enable) { SET_FIELD(DEPTH_CLAMP_ENABLE, DEFAULT.depthClampEnable, enable, _values.depthClampEnable); }
bool isDepthClampEnable() const { return _values.depthClampEnable; }
void setScissorEnable(bool enable) { SET_FIELD(SCISSOR_ENABLE, DEFAULT.scissorEnable, enable, _values.scissorEnable); }
bool isScissorEnable() const { return _values.scissorEnable; }
void setMultisampleEnable(bool enable) { SET_FIELD(MULTISAMPLE_ENABLE, DEFAULT.multisampleEnable, enable, _values.multisampleEnable); }
bool isMultisampleEnable() const { return _values.multisampleEnable; }
void setAntialiasedLineEnable(bool enable) { SET_FIELD(ANTIALISED_LINE_ENABLE, DEFAULT.antialisedLineEnable, enable, _values.antialisedLineEnable); }
bool isAntialiasedLineEnable() const { return _values.antialisedLineEnable; }
// Depth Bias
void setDepthBias(float bias) { SET_FIELD(DEPTH_BIAS, DEFAULT.depthBias, bias, _values.depthBias); }
float getDepthBias() const { return _values.depthBias; }
void setDepthBiasSlopeScale(float scale) { SET_FIELD(DEPTH_BIAS_SLOPE_SCALE, DEFAULT.depthBiasSlopeScale, scale, _values.depthBiasSlopeScale); }
float getDepthBiasSlopeScale() const { return _values.depthBiasSlopeScale; }
// Depth Test
void setDepthTest(DepthTest depthTest) { SET_FIELD(DEPTH_TEST, DEFAULT.depthTest, depthTest, _values.depthTest); }
void setDepthTest(bool enable, bool writeMask, ComparisonFunction func) { setDepthTest(DepthTest(enable, writeMask, func)); }
DepthTest getDepthTest() const { return _values.depthTest; }
bool isDepthTestEnabled() const { return getDepthTest().isEnabled(); }
uint8 getDepthTestWriteMask() const { return getDepthTest().getWriteMask(); }
ComparisonFunction getDepthTestFunc() const { return getDepthTest().getFunction(); }
// Stencil test
void setStencilTest(bool enabled, uint8 frontWriteMask, StencilTest frontTest, uint8 backWriteMask, StencilTest backTest) {
SET_FIELD(STENCIL_ACTIVATION, DEFAULT.stencilActivation, StencilActivation(enabled, frontWriteMask, backWriteMask), _values.stencilActivation);
SET_FIELD(STENCIL_TEST_FRONT, DEFAULT.stencilTestFront, frontTest, _values.stencilTestFront);
SET_FIELD(STENCIL_TEST_BACK, DEFAULT.stencilTestBack, backTest, _values.stencilTestBack); }
void setStencilTest(bool enabled, uint8 frontWriteMask, StencilTest frontTest) {
setStencilTest(enabled, frontWriteMask, frontTest, frontWriteMask, frontTest); }
StencilActivation getStencilActivation() const { return _values.stencilActivation; }
StencilTest getStencilTestFront() const { return _values.stencilTestFront; }
StencilTest getStencilTestBack() const { return _values.stencilTestBack; }
bool isStencilEnabled() const { return getStencilActivation().isEnabled(); }
uint8 getStencilWriteMaskFront() const { return getStencilActivation().getWriteMaskFront(); }
uint8 getStencilWriteMaskBack() const { return getStencilActivation().getWriteMaskBack(); }
// Alpha to coverage
void setAlphaToCoverageEnable(bool enable) { SET_FIELD(ALPHA_TO_COVERAGE_ENABLE, DEFAULT.alphaToCoverageEnable, enable, _values.alphaToCoverageEnable); }
bool isAlphaToCoverageEnabled() const { return _values.alphaToCoverageEnable; }
// Sample mask
void setSampleMask(uint32 mask) { SET_FIELD(SAMPLE_MASK, DEFAULT.sampleMask, mask, _values.sampleMask); }
uint32 getSampleMask() const { return _values.sampleMask; }
// Blend Function
void setBlendFunction(BlendFunction function) { SET_FIELD(BLEND_FUNCTION, DEFAULT.blendFunction, function, _values.blendFunction); }
BlendFunction getBlendFunction() const { return _values.blendFunction; }
void setBlendFunction(bool enabled, BlendArg sourceColor, BlendOp operationColor, BlendArg destinationColor, BlendArg sourceAlpha, BlendOp operationAlpha, BlendArg destinationAlpha) {
setBlendFunction(BlendFunction(enabled, sourceColor, operationColor, destinationColor, sourceAlpha, operationAlpha, destinationAlpha)); }
void setBlendFunction(bool enabled, BlendArg source, BlendOp operation, BlendArg destination) {
setBlendFunction(BlendFunction(enabled, source, operation, destination)); }
bool isBlendEnabled() const { return getBlendFunction().isEnabled(); }
// Color write mask
void setColorWriteMask(uint8 mask) { SET_FIELD(COLOR_WRITE_MASK, DEFAULT.colorWriteMask, mask, _values.colorWriteMask); }
void setColorWriteMask(bool red, bool green, bool blue, bool alpha) { uint32 value = ((WRITE_RED * red) | (WRITE_GREEN * green) | (WRITE_BLUE * blue) | (WRITE_ALPHA * alpha)); SET_FIELD(COLOR_WRITE_MASK, DEFAULT.colorWriteMask, value, _values.colorWriteMask); }
uint8 getColorWriteMask() const { return _values.colorWriteMask; }
// All the possible fields
enum Field {
FILL_MODE,
CULL_MODE,
FRONT_FACE_CLOCKWISE,
DEPTH_CLAMP_ENABLE,
SCISSOR_ENABLE,
MULTISAMPLE_ENABLE,
ANTIALISED_LINE_ENABLE,
DEPTH_BIAS,
DEPTH_BIAS_SLOPE_SCALE,
DEPTH_TEST,
STENCIL_ACTIVATION,
STENCIL_TEST_FRONT,
STENCIL_TEST_BACK,
SAMPLE_MASK,
ALPHA_TO_COVERAGE_ENABLE,
BLEND_FUNCTION,
COLOR_WRITE_MASK,
NUM_FIELDS, // not a valid field, just the count
};
// The signature of the state tells which fields of the state are not default
// this way during rendering the Backend can compare it's current state and try to minimize the job to do
typedef std::bitset<NUM_FIELDS> Signature;
Signature getSignature() const { return _signature; }
static Signature evalSignature(const Data& state);
// For convenience, create a State from the values directly
State(const Data& values);
const Data& getValues() const { return _values; }
protected:
State(const State& state);
State& operator=(const State& state);
Data _values;
Signature _signature{0};
Stamp _stamp{0};
// This shouldn't be used by anything else than the Backend class with the proper casting.
mutable GPUObject* _gpuObject = nullptr;
void setGPUObject(GPUObject* gpuObject) const { _gpuObject = gpuObject; }
GPUObject* getGPUObject() const { return _gpuObject; }
friend class Backend;
};
typedef std::shared_ptr< State > StatePointer;
typedef std::vector< StatePointer > States;
};
#endif

View file

@ -28,10 +28,11 @@ if (WIN32)
target_link_libraries(${TARGET_NAME} ${OPENVR_LIBRARIES})
endif()
#add_dependency_external_projects(Sixense)
#find_package(Sixense REQUIRED)
#target_include_directories(${TARGET_NAME} PRIVATE ${SIXENSE_INCLUDE_DIRS})
#target_link_libraries(${TARGET_NAME} ${SIXENSE_LIBRARIES})
add_dependency_external_projects(Sixense)
find_package(Sixense REQUIRED)
target_include_directories(${TARGET_NAME} PRIVATE ${SIXENSE_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${SIXENSE_LIBRARIES})
message( ${SIXENSE_INCLUDE_DIRS})
# perform standard include and linking for found externals
foreach(EXTERNAL ${OPTIONAL_EXTERNALS})
@ -69,4 +70,4 @@ foreach(EXTERNAL ${OPTIONAL_EXTERNALS})
add_definitions(-DSIXENSE_LIB_FILENAME=\"${${${EXTERNAL}_UPPERCASE}_LIBRARY_RELEASE}\")
endif ()
endif ()
endforeach()
endforeach()

View file

@ -92,7 +92,7 @@ void ObjectActionSpring::updateActionWorker(btScalar deltaTimeStep) {
// dQ = Q1 * Q0^
btQuaternion deltaQ = target * bodyRotation.inverse();
float angle = deltaQ.getAngle();
const float MIN_ANGLE = 1.0e-4;
const float MIN_ANGLE = 1.0e-4f;
if (angle > MIN_ANGLE) {
targetVelocity = (angle / _angularTimeScale) * deltaQ.getAxis();
}

View file

@ -112,14 +112,14 @@ void Model::RenderPipelineLib::addRenderPipeline(Model::RenderKey key,
gpu::ShaderPointer program = gpu::ShaderPointer(gpu::Shader::createProgram(vertexShader, pixelShader));
gpu::Shader::makeProgram(*program, slotBindings);
auto locations = std::make_shared<Locations>();
initLocations(program, *locations);
auto state = std::make_shared<gpu::State>();
// Backface on shadow
if (key.isShadow()) {
state->setCullMode(gpu::State::CULL_FRONT);
@ -140,36 +140,36 @@ void Model::RenderPipelineLib::addRenderPipeline(Model::RenderKey key,
// Good to go add the brand new pipeline
auto pipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, state));
insert(value_type(key.getRaw(), RenderPipeline(pipeline, locations)));
if (!key.isWireFrame()) {
RenderKey wireframeKey(key.getRaw() | RenderKey::IS_WIREFRAME);
auto wireframeState = std::make_shared<gpu::State>(state->getValues());
wireframeState->setFillMode(gpu::State::FILL_LINE);
// create a new RenderPipeline with the same shader side and the mirrorState
auto wireframePipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, wireframeState));
insert(value_type(wireframeKey.getRaw(), RenderPipeline(wireframePipeline, locations)));
}
// If not a shadow pass, create the mirror version from the same state, just change the FrontFace
if (!key.isShadow()) {
RenderKey mirrorKey(key.getRaw() | RenderKey::IS_MIRROR);
auto mirrorState = std::make_shared<gpu::State>(state->getValues());
// create a new RenderPipeline with the same shader side and the mirrorState
auto mirrorPipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, mirrorState));
insert(value_type(mirrorKey.getRaw(), RenderPipeline(mirrorPipeline, locations)));
if (!key.isWireFrame()) {
RenderKey wireframeKey(key.getRaw() | RenderKey::IS_MIRROR | RenderKey::IS_WIREFRAME);
auto wireframeState = std::make_shared<gpu::State>(state->getValues());
wireframeState->setFillMode(gpu::State::FILL_LINE);
// create a new RenderPipeline with the same shader side and the mirrorState
auto wireframePipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, wireframeState));
insert(value_type(wireframeKey.getRaw(), RenderPipeline(wireframePipeline, locations)));
@ -214,12 +214,12 @@ void Model::setScaleInternal(const glm::vec3& scale) {
}
}
void Model::setOffset(const glm::vec3& offset) {
_offset = offset;
void Model::setOffset(const glm::vec3& offset) {
_offset = offset;
// if someone manually sets our offset, then we are no longer snapped to center
_snapModelToRegistrationPoint = false;
_snappedToRegistrationPoint = false;
_snapModelToRegistrationPoint = false;
_snappedToRegistrationPoint = false;
}
QVector<JointState> Model::createJointStates(const FBXGeometry& geometry) {
@ -267,7 +267,7 @@ void Model::init() {
auto modelLightmapNormalSpecularMapPixel = gpu::ShaderPointer(gpu::Shader::createPixel(std::string(model_lightmap_normal_specular_map_frag)));
// Fill the renderPipelineLib
_renderPipelineLib.addRenderPipeline(
RenderKey(0),
modelVertex, modelPixel);
@ -284,7 +284,7 @@ void Model::init() {
RenderKey(RenderKey::HAS_TANGENTS | RenderKey::HAS_SPECULAR),
modelNormalMapVertex, modelNormalSpecularMapPixel);
_renderPipelineLib.addRenderPipeline(
RenderKey(RenderKey::IS_TRANSLUCENT),
modelVertex, modelTranslucentPixel);
@ -292,7 +292,7 @@ void Model::init() {
_renderPipelineLib.addRenderPipeline(
RenderKey(RenderKey::IS_TRANSLUCENT | RenderKey::HAS_LIGHTMAP),
modelVertex, modelTranslucentPixel);
_renderPipelineLib.addRenderPipeline(
RenderKey(RenderKey::HAS_TANGENTS | RenderKey::IS_TRANSLUCENT),
modelNormalMapVertex, modelTranslucentPixel);
@ -440,15 +440,15 @@ bool Model::updateGeometry() {
}
_geometry->setLoadPriority(this, -_lodDistance);
_geometry->ensureLoading();
if (needToRebuild) {
const FBXGeometry& fbxGeometry = geometry->getFBXGeometry();
foreach (const FBXMesh& mesh, fbxGeometry.meshes) {
MeshState state;
state.clusterMatrices.resize(mesh.clusters.size());
state.cauterizedClusterMatrices.resize(mesh.clusters.size());
_meshStates.append(state);
_meshStates.append(state);
auto buffer = std::make_shared<gpu::Buffer>();
if (!mesh.blendshapes.isEmpty()) {
buffer->resize((mesh.vertices.size() + mesh.normals.size()) * sizeof(glm::vec3));
@ -486,7 +486,7 @@ void Model::initJointStates(QVector<JointState> states) {
rightShoulderJointIndex);
}
bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const glm::vec3& direction, float& distance,
bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const glm::vec3& direction, float& distance,
BoxFace& face, QString& extraInfo, bool pickAgainstTriangles) {
bool intersectedSomething = false;
@ -495,7 +495,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
if (!isActive()) {
return intersectedSomething;
}
// extents is the entity relative, scaled, centered extents of the entity
glm::vec3 position = _translation;
glm::mat4 rotation = glm::mat4_cast(_rotation);
@ -504,7 +504,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
glm::mat4 worldToModelMatrix = glm::inverse(modelToWorldMatrix);
Extents modelExtents = getMeshExtents(); // NOTE: unrotated
glm::vec3 dimensions = modelExtents.maximum - modelExtents.minimum;
glm::vec3 corner = -(dimensions * _registrationPoint); // since we're going to do the ray picking in the model frame of reference
AABox modelFrameBox(corner, dimensions);
@ -543,7 +543,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
int t = 0;
foreach (const Triangle& triangle, meshTriangles) {
t++;
float thisTriangleDistance;
if (findRayTriangleIntersection(origin, direction, triangle, thisTriangleDistance)) {
if (thisTriangleDistance < bestDistance) {
@ -562,7 +562,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
extraInfo = geometry.getModelNameOfMesh(subMeshIndex);
}
}
}
}
subMeshIndex++;
}
_mutex.unlock();
@ -570,7 +570,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
if (intersectedSomething) {
distance = bestDistance;
}
return intersectedSomething;
}
@ -582,22 +582,22 @@ bool Model::convexHullContains(glm::vec3 point) {
if (!isActive()) {
return false;
}
// extents is the entity relative, scaled, centered extents of the entity
glm::vec3 position = _translation;
glm::mat4 rotation = glm::mat4_cast(_rotation);
glm::mat4 translation = glm::translate(position);
glm::mat4 modelToWorldMatrix = translation * rotation;
glm::mat4 worldToModelMatrix = glm::inverse(modelToWorldMatrix);
Extents modelExtents = getMeshExtents(); // NOTE: unrotated
glm::vec3 dimensions = modelExtents.maximum - modelExtents.minimum;
glm::vec3 corner = -(dimensions * _registrationPoint);
AABox modelFrameBox(corner, dimensions);
glm::vec3 modelFramePoint = glm::vec3(worldToModelMatrix * glm::vec4(point, 1.0f));
// we can use the AABox's contains() by mapping our point into the model frame
// and testing there.
if (modelFrameBox.contains(modelFramePoint)){
@ -605,7 +605,7 @@ bool Model::convexHullContains(glm::vec3 point) {
if (!_calculatedMeshTrianglesValid) {
recalculateMeshBoxes(true);
}
// If we are inside the models box, then consider the submeshes...
int subMeshIndex = 0;
foreach(const AABox& subMeshBox, _calculatedMeshBoxes) {
@ -619,7 +619,7 @@ bool Model::convexHullContains(glm::vec3 point) {
insideMesh = false;
break;
}
}
if (insideMesh) {
// It's inside this mesh, return true.
@ -658,7 +658,7 @@ void Model::recalculateMeshPartOffsets() {
// Any script might trigger findRayIntersectionAgainstSubMeshes (and maybe convexHullContains), so these
// can occur multiple times. In addition, rendering does it's own ray picking in order to decide which
// entity-scripts to call. I think it would be best to do the picking once-per-frame (in cpu, or gpu if possible)
// and then the calls use the most recent such result.
// and then the calls use the most recent such result.
void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
PROFILE_RANGE(__FUNCTION__);
bool calculatedMeshTrianglesNeeded = pickAgainstTriangles && !_calculatedMeshTrianglesValid;
@ -703,7 +703,7 @@ void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
glm::vec3 mv1 = glm::vec3(mesh.modelTransform * glm::vec4(mesh.vertices[i1], 1.0f));
glm::vec3 mv2 = glm::vec3(mesh.modelTransform * glm::vec4(mesh.vertices[i2], 1.0f));
glm::vec3 mv3 = glm::vec3(mesh.modelTransform * glm::vec4(mesh.vertices[i3], 1.0f));
// track the mesh parts in model space
if (!atLeastOnePointInBounds) {
thisPartBounds.setBox(mv0, 0.0f);
@ -719,18 +719,18 @@ void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
glm::vec3 v1 = calculateScaledOffsetPoint(mv1);
glm::vec3 v2 = calculateScaledOffsetPoint(mv2);
glm::vec3 v3 = calculateScaledOffsetPoint(mv3);
// Sam's recommended triangle slices
Triangle tri1 = { v0, v1, v3 };
Triangle tri2 = { v1, v2, v3 };
// NOTE: Random guy on the internet's recommended triangle slices
//Triangle tri1 = { v0, v1, v2 };
//Triangle tri2 = { v2, v3, v0 };
thisMeshTriangles.push_back(tri1);
thisMeshTriangles.push_back(tri2);
}
}
@ -792,7 +792,7 @@ void Model::renderSetup(RenderArgs* args) {
_dilatedTextures.append(dilated);
}
}
if (!_meshGroupsKnown && isLoaded()) {
segregateMeshGroups();
}
@ -805,7 +805,7 @@ public:
transparent(transparent), model(model), url(model->getURL()), meshIndex(meshIndex), partIndex(partIndex) { }
typedef render::Payload<MeshPartPayload> Payload;
typedef Payload::DataPointer Pointer;
bool transparent;
Model* model;
QUrl url;
@ -814,14 +814,14 @@ public:
};
namespace render {
template <> const ItemKey payloadGetKey(const MeshPartPayload::Pointer& payload) {
template <> const ItemKey payloadGetKey(const MeshPartPayload::Pointer& payload) {
if (!payload->model->isVisible()) {
return ItemKey::Builder().withInvisible().build();
}
return payload->transparent ? ItemKey::Builder::transparentShape() : ItemKey::Builder::opaqueShape();
}
template <> const Item::Bound payloadGetBound(const MeshPartPayload::Pointer& payload) {
template <> const Item::Bound payloadGetBound(const MeshPartPayload::Pointer& payload) {
if (payload) {
return payload->model->getPartBounds(payload->meshIndex, payload->partIndex);
}
@ -875,7 +875,7 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
_renderItems.insert(item, renderPayload);
somethingAdded = true;
}
_readyWhenAdded = readyToAddToScene();
return somethingAdded;
@ -907,7 +907,7 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
_renderItems.insert(item, renderPayload);
somethingAdded = true;
}
_readyWhenAdded = readyToAddToScene();
return somethingAdded;
@ -929,7 +929,7 @@ void Model::renderDebugMeshBoxes(gpu::Batch& batch) {
_debugMeshBoxesID = DependencyManager::get<GeometryCache>()->allocateID();
}
QVector<glm::vec3> points;
glm::vec3 brn = box.getCorner();
glm::vec3 bln = brn + glm::vec3(box.getDimensions().x, 0, 0);
glm::vec3 brf = brn + glm::vec3(0, 0, box.getDimensions().z);
@ -963,12 +963,12 @@ void Model::renderDebugMeshBoxes(gpu::Batch& batch) {
{ 1.0f, 1.0f, 0.0f, 1.0f }, // yellow
{ 0.0f, 1.0f, 1.0f, 1.0f }, // cyan
{ 1.0f, 1.0f, 1.0f, 1.0f }, // white
{ 0.0f, 0.5f, 0.0f, 1.0f },
{ 0.0f, 0.0f, 0.5f, 1.0f },
{ 0.5f, 0.0f, 0.5f, 1.0f },
{ 0.5f, 0.5f, 0.0f, 1.0f },
{ 0.0f, 0.5f, 0.0f, 1.0f },
{ 0.0f, 0.0f, 0.5f, 1.0f },
{ 0.5f, 0.0f, 0.5f, 1.0f },
{ 0.5f, 0.5f, 0.0f, 1.0f },
{ 0.0f, 0.5f, 0.5f, 1.0f } };
DependencyManager::get<GeometryCache>()->updateVertices(_debugMeshBoxesID, points, color[colorNdx]);
DependencyManager::get<GeometryCache>()->renderVertices(batch, gpu::LINES, _debugMeshBoxesID);
colorNdx++;
@ -1003,7 +1003,7 @@ Extents Model::getUnscaledMeshExtents() const {
if (!isActive()) {
return Extents();
}
const Extents& extents = _geometry->getFBXGeometry().meshExtents;
// even though our caller asked for "unscaled" we need to include any fst scaling, translation, and rotation, which
@ -1011,7 +1011,7 @@ Extents Model::getUnscaledMeshExtents() const {
glm::vec3 minimum = glm::vec3(_geometry->getFBXGeometry().offset * glm::vec4(extents.minimum, 1.0f));
glm::vec3 maximum = glm::vec3(_geometry->getFBXGeometry().offset * glm::vec4(extents.maximum, 1.0f));
Extents scaledExtents = { minimum, maximum };
return scaledExtents;
}
@ -1020,12 +1020,12 @@ Extents Model::calculateScaledOffsetExtents(const Extents& extents) const {
glm::vec3 minimum = glm::vec3(_geometry->getFBXGeometry().offset * glm::vec4(extents.minimum, 1.0f));
glm::vec3 maximum = glm::vec3(_geometry->getFBXGeometry().offset * glm::vec4(extents.maximum, 1.0f));
Extents scaledOffsetExtents = { ((minimum + _offset) * _scale),
Extents scaledOffsetExtents = { ((minimum + _offset) * _scale),
((maximum + _offset) * _scale) };
Extents rotatedExtents = scaledOffsetExtents.getRotated(_rotation);
Extents translatedExtents = { rotatedExtents.minimum + _translation,
Extents translatedExtents = { rotatedExtents.minimum + _translation,
rotatedExtents.maximum + _translation };
return translatedExtents;
@ -1084,7 +1084,7 @@ void Model::setURL(const QUrl& url, const QUrl& fallback, bool retainCurrent, bo
onInvalidate();
// if so instructed, keep the current geometry until the new one is loaded
// if so instructed, keep the current geometry until the new one is loaded
_nextGeometry = DependencyManager::get<GeometryCache>()->getGeometry(url, fallback, delayLoad);
_nextLODHysteresis = NetworkGeometry::NO_HYSTERESIS;
if (!retainCurrent || !isActive() || (_nextGeometry && _nextGeometry->isLoaded())) {
@ -1094,14 +1094,14 @@ void Model::setURL(const QUrl& url, const QUrl& fallback, bool retainCurrent, bo
void Model::geometryRefreshed() {
QObject* sender = QObject::sender();
if (sender == _geometry) {
_readyWhenAdded = false; // reset out render items.
_needsReload = true;
invalidCalculatedMeshBoxes();
onInvalidate();
// if so instructed, keep the current geometry until the new one is loaded
_nextGeometry = DependencyManager::get<GeometryCache>()->getGeometry(_url);
_nextLODHysteresis = NetworkGeometry::NO_HYSTERESIS;
@ -1121,7 +1121,7 @@ const QSharedPointer<NetworkGeometry> Model::getCollisionGeometry(bool delayLoad
if (_collisionGeometry && _collisionGeometry->isLoaded()) {
return _collisionGeometry;
}
return QSharedPointer<NetworkGeometry>();
}
@ -1176,11 +1176,11 @@ public:
Blender(Model* model, int blendNumber, const QWeakPointer<NetworkGeometry>& geometry,
const QVector<FBXMesh>& meshes, const QVector<float>& blendshapeCoefficients);
virtual void run();
private:
QPointer<Model> _model;
int _blendNumber;
QWeakPointer<NetworkGeometry> _geometry;
@ -1254,10 +1254,10 @@ void Model::setScaleToFit(bool scaleToFit, float largestDimension, bool forceRes
}
return;
}
if (forceRescale || _scaleToFit != scaleToFit || glm::length(_scaleToFitDimensions) != largestDimension) {
_scaleToFit = scaleToFit;
// we only need to do this work if we're "turning on" scale to fit.
if (scaleToFit) {
Extents modelMeshExtents = getUnscaledMeshExtents();
@ -1278,7 +1278,7 @@ void Model::scaleToFit() {
// we didn't yet have an active mesh. We can only enter this scaleToFit() in this state
// if we now do have an active mesh, so we take this opportunity to actually determine
// the correct scale.
if (_scaleToFit && _scaleToFitDimensions.y == FAKE_DIMENSION_PLACEHOLDER
if (_scaleToFit && _scaleToFitDimensions.y == FAKE_DIMENSION_PLACEHOLDER
&& _scaleToFitDimensions.z == FAKE_DIMENSION_PLACEHOLDER) {
setScaleToFit(_scaleToFit, _scaleToFitDimensions.x);
}
@ -1313,7 +1313,7 @@ void Model::simulate(float deltaTime, bool fullUpdate) {
PROFILE_RANGE(__FUNCTION__);
fullUpdate = updateGeometry() || fullUpdate || (_scaleToFit && !_scaledToFit)
|| (_snapModelToRegistrationPoint && !_snappedToRegistrationPoint);
if (isActive() && fullUpdate) {
// NOTE: This is overly aggressive and we are invalidating the MeshBoxes when in fact they may not be invalid
// they really only become invalid if something about the transform to world space has changed. This is
@ -1440,7 +1440,7 @@ void Model::setBlendedVertices(int blendNumber, const QWeakPointer<NetworkGeomet
return;
}
_appliedBlendNumber = blendNumber;
const FBXGeometry& fbxGeometry = _geometry->getFBXGeometry();
const FBXGeometry& fbxGeometry = _geometry->getFBXGeometry();
int index = 0;
for (int i = 0; i < fbxGeometry.meshes.size(); i++) {
const FBXMesh& mesh = fbxGeometry.meshes.at(i);
@ -1461,7 +1461,7 @@ void Model::setGeometry(const QSharedPointer<NetworkGeometry>& newGeometry) {
if (_geometry == newGeometry) {
return;
}
if (_geometry) {
_geometry->disconnect(_geometry.data(), &Resource::onRefresh, this, &Model::geometryRefreshed);
}
@ -1474,10 +1474,10 @@ void Model::applyNextGeometry() {
deleteGeometry();
_dilatedTextures.clear();
_lodHysteresis = _nextLODHysteresis;
// we retain a reference to the base geometry so that its reference count doesn't fall to zero
setGeometry(_nextGeometry);
_meshGroupsKnown = false;
_readyWhenAdded = false; // in case any of our users are using scenes
_needsReload = false; // we are loaded now!
@ -1509,9 +1509,9 @@ AABox Model::getPartBounds(int meshIndex, int partIndex) {
return calculateScaledOffsetAABox(_geometry->getFBXGeometry().meshExtents);
}
}
if (_geometry->getFBXGeometry().meshes.size() > meshIndex) {
// FIX ME! - This is currently a hack because for some mesh parts our efforts to calculate the bounding
// box of the mesh part fails. It seems to create boxes that are not consistent with where the
// geometry actually renders. If instead we make all the parts share the bounds of the entire subMesh
@ -1536,7 +1536,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
if (!_readyWhenAdded) {
return; // bail asap
}
// We need to make sure we have valid offsets calculated before we can render
if (!_calculatedMeshPartOffsetValid) {
_mutex.lock();
@ -1561,13 +1561,13 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
// guard against partially loaded meshes
if (meshIndex >= networkMeshes.size() || meshIndex >= geometry.meshes.size() || meshIndex >= _meshStates.size() ) {
return;
return;
}
const NetworkMesh& networkMesh = networkMeshes.at(meshIndex);
const FBXMesh& mesh = geometry.meshes.at(meshIndex);
const MeshState& state = _meshStates.at(meshIndex);
bool translucentMesh = translucent; // networkMesh.getTranslucentPartCount(mesh) == networkMesh.parts.size();
bool hasTangents = !mesh.tangents.isEmpty();
bool hasSpecular = mesh.hasSpecularTexture();
@ -1597,7 +1597,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
DependencyManager::get<DeferredLightingEffect>()->renderWireCube(batch, 1.0f, cubeColor);
}
#endif //def DEBUG_BOUNDING_PARTS
if (wireframe) {
translucentMesh = hasTangents = hasSpecular = hasLightmap = isSkinned = false;
}
@ -1614,14 +1614,14 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
// if our index is ever out of range for either meshes or networkMeshes, then skip it, and set our _meshGroupsKnown
// to false to rebuild out mesh groups.
if (meshIndex < 0 || meshIndex >= networkMeshes.size() || meshIndex > geometry.meshes.size()) {
_meshGroupsKnown = false; // regenerate these lists next time around.
_readyWhenAdded = false; // in case any of our users are using scenes
invalidCalculatedMeshBoxes(); // if we have to reload, we need to assume our mesh boxes are all invalid
return; // FIXME!
}
batch.setIndexBuffer(gpu::UINT32, (networkMesh._indexBuffer), 0);
int vertexCount = mesh.vertices.size();
if (vertexCount == 0) {
@ -1633,7 +1633,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
if (_transforms.empty()) {
_transforms.push_back(Transform());
}
if (isSkinned) {
const float* bones;
if (_cauterizeBones) {
@ -1682,7 +1682,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
qCDebug(renderutils) << "WARNING: material == nullptr!!!";
}
#endif
if (material != nullptr) {
// apply material properties
@ -1724,12 +1724,12 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
batch._glUniformMatrix4fv(locations->texcoordMatrices, 2, false, (const float*) &texcoordTransform);
}
if (!mesh.tangents.isEmpty()) {
if (!mesh.tangents.isEmpty()) {
NetworkTexture* normalMap = networkPart.normalTexture.data();
batch.setResourceTexture(1, (!normalMap || !normalMap->isLoaded()) ?
textureCache->getBlueTexture() : normalMap->getGPUTexture());
}
if (locations->specularTextureUnit >= 0) {
NetworkTexture* specularMap = networkPart.specularTexture.data();
batch.setResourceTexture(locations->specularTextureUnit, (!specularMap || !specularMap->isLoaded()) ?
@ -1747,18 +1747,18 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
float emissiveOffset = part.emissiveParams.x;
float emissiveScale = part.emissiveParams.y;
batch._glUniform2f(locations->emissiveParams, emissiveOffset, emissiveScale);
NetworkTexture* emissiveMap = networkPart.emissiveTexture.data();
batch.setResourceTexture(locations->emissiveTextureUnit, (!emissiveMap || !emissiveMap->isLoaded()) ?
textureCache->getGrayTexture() : emissiveMap->getGPUTexture());
}
if (translucent && locations->lightBufferUnit >= 0) {
DependencyManager::get<DeferredLightingEffect>()->setupTransparent(args, locations->lightBufferUnit);
}
}
}
qint64 offset;
{
// FIXME_STUTTER: We should n't have any lock here
@ -1798,7 +1798,7 @@ void Model::segregateMeshGroups() {
qDebug() << "WARNING!!!! Mesh Sizes don't match! We will not segregate mesh groups yet.";
return;
}
_transparentRenderItems.clear();
_opaqueRenderItems.clear();
@ -1807,7 +1807,6 @@ void Model::segregateMeshGroups() {
const NetworkMesh& networkMesh = networkMeshes.at(i);
const FBXMesh& mesh = geometry.meshes.at(i);
const MeshState& state = _meshStates.at(i);
bool translucentMesh = networkMesh.getTranslucentPartCount(mesh) == networkMesh.parts.size();
bool hasTangents = !mesh.tangents.isEmpty();
@ -1815,7 +1814,7 @@ void Model::segregateMeshGroups() {
bool hasLightmap = mesh.hasEmissiveTexture();
bool isSkinned = state.clusterMatrices.size() > 1;
bool wireframe = isWireframe();
if (wireframe) {
translucentMesh = hasTangents = hasSpecular = hasLightmap = isSkinned = false;
}
@ -1831,7 +1830,7 @@ void Model::segregateMeshGroups() {
}
}
_meshGroupsKnown = true;
}
}
void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
bool hasLightmap, bool hasTangents, bool hasSpecular, bool isSkinned, bool isWireframe, RenderArgs* args,
@ -1851,7 +1850,7 @@ void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, f
gpu::ShaderPointer program = (*pipeline).second._pipeline->getProgram();
locations = (*pipeline).second._locations.get();
// Setup the One pipeline
batch.setPipeline((*pipeline).second._pipeline);
@ -1865,7 +1864,7 @@ void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, f
}
if ((locations->normalFittingMapUnit > -1)) {
batch.setResourceTexture(locations->normalFittingMapUnit,
batch.setResourceTexture(locations->normalFittingMapUnit,
DependencyManager::get<TextureCache>()->getNormalFittingTexture());
}
}

View file

@ -44,7 +44,7 @@ const int16_t COLLISION_GROUP_OTHER_AVATAR = 1 << 6;
const int16_t COLLISION_GROUP_MY_ATTACHMENT = 1 << 7;
const int16_t COLLISION_GROUP_OTHER_ATTACHMENT = 1 << 8;
// ...
const int16_t COLLISION_GROUP_COLLISIONLESS = 1 << 15;
const int16_t COLLISION_GROUP_COLLISIONLESS = 1 << 14;
/* Note: In order for objectA to collide with objectB at the filter stage

View file

@ -20,6 +20,8 @@
#include <QDir>
#include <QGuiApplication>
#include <GLHelpers.h>
#include "../model/Skybox_vert.h"
#include "../model/Skybox_frag.h"
@ -112,85 +114,22 @@
#include "paintStroke_vert.h"
#include "paintStroke_frag.h"
class RateCounter {
std::vector<float> times;
QElapsedTimer timer;
public:
RateCounter() {
timer.start();
}
void reset() {
times.clear();
}
unsigned int count() const {
return times.size() - 1;
}
float elapsed() const {
if (times.size() < 1) {
return 0.0f;
}
float elapsed = *times.rbegin() - *times.begin();
return elapsed;
}
void increment() {
times.push_back(timer.elapsed() / 1000.0f);
}
float rate() const {
if (elapsed() == 0.0f) {
return 0.0f;
}
return (float) count() / elapsed();
}
};
const QString& getQmlDir() {
static QString dir;
if (dir.isEmpty()) {
QDir path(__FILE__);
path.cdUp();
dir = path.cleanPath(path.absoluteFilePath("../../../interface/resources/qml/")) + "/";
qDebug() << "Qml Path: " << dir;
}
return dir;
}
#include "polyvox_vert.h"
#include "polyvox_frag.h"
// Create a simple OpenGL window that renders text in various ways
class QTestWindow : public QWindow {
Q_OBJECT
QOpenGLContext* _context{ nullptr };
QSize _size;
//TextRenderer* _textRenderer[4];
RateCounter fps;
protected:
void renderText();
private:
void resizeWindow(const QSize& size) {
_size = size;
}
public:
QTestWindow() {
setSurfaceType(QSurface::OpenGLSurface);
QSurfaceFormat format;
// Qt Quick may need a depth and stencil buffer. Always make sure these are available.
format.setDepthBufferSize(16);
format.setStencilBufferSize(8);
format.setVersion(4, 1);
format.setProfile(QSurfaceFormat::OpenGLContextProfile::CoreProfile);
format.setOption(QSurfaceFormat::DebugContext);
QSurfaceFormat format = getDefaultOpenGlSurfaceFormat();
setFormat(format);
_context = new QOpenGLContext;
_context->setFormat(format);
_context->create();
@ -199,8 +138,6 @@ public:
makeCurrent();
gpu::Context::init<gpu::GLBackend>();
{
QOpenGLDebugLogger* logger = new QOpenGLDebugLogger(this);
logger->initialize(); // initializes in the current context, i.e. ctx
@ -208,25 +145,8 @@ public:
connect(logger, &QOpenGLDebugLogger::messageLogged, this, [&](const QOpenGLDebugMessage & debugMessage) {
qDebug() << debugMessage;
});
// logger->startLogging(QOpenGLDebugLogger::SynchronousLogging);
}
qDebug() << (const char*)glGetString(GL_VERSION);
//_textRenderer[0] = TextRenderer::getInstance(SANS_FONT_FAMILY, 12, false);
//_textRenderer[1] = TextRenderer::getInstance(SERIF_FONT_FAMILY, 12, false,
// TextRenderer::SHADOW_EFFECT);
//_textRenderer[2] = TextRenderer::getInstance(MONO_FONT_FAMILY, 48, -1,
// false, TextRenderer::OUTLINE_EFFECT);
//_textRenderer[3] = TextRenderer::getInstance(INCONSOLATA_FONT_FAMILY, 24);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glClearColor(0.2f, 0.2f, 0.2f, 1);
glDisable(GL_DEPTH_TEST);
makeCurrent();
// setFramePosition(QPoint(-1000, 0));
resize(QSize(800, 600));
}
@ -237,18 +157,15 @@ public:
void makeCurrent() {
_context->makeCurrent(this);
}
protected:
void resizeEvent(QResizeEvent* ev) override {
resizeWindow(ev->size());
}
};
void testShaderBuild(const char* vs_src, const char * fs_src) {
auto vs = gpu::ShaderPointer(gpu::Shader::createVertex(std::string(vs_src)));
auto fs = gpu::ShaderPointer(gpu::Shader::createPixel(std::string(fs_src)));
auto pr = gpu::ShaderPointer(gpu::Shader::createProgram(vs, fs));
gpu::Shader::makeProgram(*pr);
if (!gpu::Shader::makeProgram(*pr)) {
throw std::runtime_error("Failed to compile shader");
}
}
void QTestWindow::draw() {
@ -257,8 +174,8 @@ void QTestWindow::draw() {
}
makeCurrent();
glClearColor(0.2f, 0.2f, 0.2f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, _size.width() * devicePixelRatio(), _size.height() * devicePixelRatio());
static std::once_flag once;
std::call_once(once, [&]{
@ -328,17 +245,10 @@ void QTestWindow::draw() {
testShaderBuild(Skybox_vert, Skybox_frag);
testShaderBuild(paintStroke_vert,paintStroke_frag);
testShaderBuild(polyvox_vert, polyvox_frag);
});
_context->swapBuffers(this);
glFinish();
fps.increment();
if (fps.elapsed() >= 2.0f) {
qDebug() << "FPS: " << fps.rate();
fps.reset();
}
}
void messageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message) {
@ -352,7 +262,6 @@ void messageHandler(QtMsgType type, const QMessageLogContext& context, const QSt
}
}
const char * LOG_FILTER_RULES = R"V0G0N(
hifi.gpu=true
)V0G0N";