Merge branch 'master' of github.com:highfidelity/hifi into near-grab-via-parenting

This commit is contained in:
Seth Alves 2015-12-15 10:12:49 -08:00
commit a3631ea1d1
8 changed files with 97 additions and 99 deletions

View file

@ -37,6 +37,7 @@
#include <EntityScriptingInterface.h> // TODO: consider moving to scriptengine.h
#include "avatars/ScriptableAvatar.h"
#include "entities/AssignmentParentFinder.h"
#include "RecordingScriptingInterface.h"
#include "AbstractAudioInterface.h"
@ -62,6 +63,8 @@ Agent::Agent(ReceivedMessage& message) :
connect(assetThread, &QThread::started, assetClient.data(), &AssetClient::init);
assetThread->start();
DependencyManager::registerInheritance<SpatialParentFinder, AssignmentParentFinder>();
DependencyManager::set<ResourceCacheSharedItems>();
DependencyManager::set<SoundCache>();
DependencyManager::set<AudioInjectorManager>();
@ -284,6 +287,8 @@ void Agent::executeScript() {
entityScriptingInterface->setEntityTree(_entityViewer.getTree());
DependencyManager::set<AssignmentParentFinder>(_entityViewer.getTree());
// wire up our additional agent related processing to the update signal
QObject::connect(_scriptEngine.get(), &ScriptEngine::update, this, &Agent::processAgentAvatarAndAudio);

View file

@ -1231,15 +1231,28 @@ void Application::paintGL() {
}
} else if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
if (isHMDMode()) {
auto mirrorBodyOrientation = myAvatar->getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f));
glm::quat hmdRotation = extractRotation(myAvatar->getHMDSensorMatrix());
_myCamera.setRotation(myAvatar->getWorldAlignedOrientation()
* glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)) * hmdRotation);
// Mirror HMD yaw and roll
glm::vec3 mirrorHmdEulers = glm::eulerAngles(hmdRotation);
mirrorHmdEulers.y = -mirrorHmdEulers.y;
mirrorHmdEulers.z = -mirrorHmdEulers.z;
glm::quat mirrorHmdRotation = glm::quat(mirrorHmdEulers);
glm::quat worldMirrorRotation = mirrorBodyOrientation * mirrorHmdRotation;
_myCamera.setRotation(worldMirrorRotation);
glm::vec3 hmdOffset = extractTranslation(myAvatar->getHMDSensorMatrix());
// Mirror HMD lateral offsets
hmdOffset.x = -hmdOffset.x;
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
+ glm::vec3(0, _raiseMirror * myAvatar->getAvatarScale(), 0)
+ (myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, _rotateMirror, 0.0f))) *
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror
+ (myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f))) * hmdOffset);
+ mirrorBodyOrientation * glm::vec3(0.0f, 0.0f, 1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror
+ mirrorBodyOrientation * hmdOffset);
} else {
_myCamera.setRotation(myAvatar->getWorldAlignedOrientation()
* glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
@ -1273,6 +1286,10 @@ void Application::paintGL() {
// Primary rendering pass
auto framebufferCache = DependencyManager::get<FramebufferCache>();
const QSize size = framebufferCache->getFrameBufferSize();
// Final framebuffer that will be handled to the display-plugin
auto finalFramebuffer = framebufferCache->getFramebuffer();
{
PROFILE_RANGE(__FUNCTION__ "/mainRender");
PerformanceTimer perfTimer("mainRender");
@ -1326,9 +1343,63 @@ void Application::paintGL() {
}
displaySide(&renderArgs, _myCamera);
renderArgs._context->enableStereo(false);
gpu::doInBatch(renderArgs._context, [](gpu::Batch& batch) {
batch.setFramebuffer(nullptr);
});
// Blit primary to final FBO
auto primaryFbo = framebufferCache->getPrimaryFramebuffer();
if (renderArgs._renderMode == RenderArgs::MIRROR_RENDER_MODE) {
if (displayPlugin->isStereo()) {
gpu::doInBatch(renderArgs._context, [=](gpu::Batch& batch) {
gpu::Vec4i srcRectLeft;
srcRectLeft.z = size.width() / 2;
srcRectLeft.w = size.height();
gpu::Vec4i srcRectRight;
srcRectRight.x = size.width() / 2;
srcRectRight.z = size.width();
srcRectRight.w = size.height();
gpu::Vec4i destRectLeft;
destRectLeft.x = srcRectLeft.z;
destRectLeft.z = srcRectLeft.x;
destRectLeft.y = srcRectLeft.y;
destRectLeft.w = srcRectLeft.w;
gpu::Vec4i destRectRight;
destRectRight.x = srcRectRight.z;
destRectRight.z = srcRectRight.x;
destRectRight.y = srcRectRight.y;
destRectRight.w = srcRectRight.w;
batch.setFramebuffer(finalFramebuffer);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0f, 0.0f, 1.0f, 0.0f));
// BLit left to right and right to left in stereo
batch.blit(primaryFbo, srcRectRight, finalFramebuffer, destRectLeft);
batch.blit(primaryFbo, srcRectLeft, finalFramebuffer, destRectRight);
});
} else {
gpu::doInBatch(renderArgs._context, [=](gpu::Batch& batch) {
gpu::Vec4i srcRect;
srcRect.z = size.width();
srcRect.w = size.height();
gpu::Vec4i destRect;
destRect.x = size.width();
destRect.y = 0;
destRect.z = 0;
destRect.w = size.height();
batch.setFramebuffer(finalFramebuffer);
batch.blit(primaryFbo, srcRect, finalFramebuffer, destRect);
});
}
} else {
gpu::doInBatch(renderArgs._context, [=](gpu::Batch& batch) {
gpu::Vec4i rect;
rect.z = size.width();
rect.w = size.height();
batch.setFramebuffer(finalFramebuffer);
batch.blit(primaryFbo, rect, finalFramebuffer, rect);
});
}
}
// Overlay Composition, needs to occur after screen space effects have completed
@ -1336,7 +1407,7 @@ void Application::paintGL() {
{
PROFILE_RANGE(__FUNCTION__ "/compositor");
PerformanceTimer perfTimer("compositor");
auto primaryFbo = framebufferCache->getPrimaryFramebuffer();
auto primaryFbo = finalFramebuffer;
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, gpu::GLBackend::getFramebufferID(primaryFbo));
if (displayPlugin->isStereo()) {
QRect currentViewport(QPoint(0, 0), QSize(size.width() / 2, size.height()));
@ -1361,23 +1432,12 @@ void Application::paintGL() {
{
PROFILE_RANGE(__FUNCTION__ "/pluginOutput");
PerformanceTimer perfTimer("pluginOutput");
auto primaryFramebuffer = framebufferCache->getPrimaryFramebuffer();
auto scratchFramebuffer = framebufferCache->getFramebuffer();
gpu::doInBatch(renderArgs._context, [=](gpu::Batch& batch) {
gpu::Vec4i rect;
rect.z = size.width();
rect.w = size.height();
batch.setFramebuffer(scratchFramebuffer);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0f, 0.0f, 0.0f, 0.0f));
batch.blit(primaryFramebuffer, rect, scratchFramebuffer, rect);
batch.setFramebuffer(nullptr);
});
auto finalTexturePointer = scratchFramebuffer->getRenderBuffer(0);
auto finalTexturePointer = finalFramebuffer->getRenderBuffer(0);
GLuint finalTexture = gpu::GLBackend::getTextureID(finalTexturePointer);
Q_ASSERT(0 != finalTexture);
Q_ASSERT(!_lockedFramebufferMap.contains(finalTexture));
_lockedFramebufferMap[finalTexture] = scratchFramebuffer;
_lockedFramebufferMap[finalTexture] = finalFramebuffer;
Q_ASSERT(isCurrentContext(_offscreenContext->getContext()));
{

View file

@ -261,9 +261,6 @@ Menu::Menu() {
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::NamesAboveHeads, 0, true,
NULL, NULL, UNSPECIFIED_POSITION, "Advanced");
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::BlueSpeechSphere, 0, true,
NULL, NULL, UNSPECIFIED_POSITION, "Advanced");
MenuWrapper* viewMenu = addMenu("View");
addActionToQMenuAndActionHash(viewMenu, MenuOption::ReloadContent, 0, qApp, SLOT(reloadResourceCaches()),
QAction::NoRole, UNSPECIFIED_POSITION, "Advanced");

View file

@ -168,7 +168,6 @@ namespace MenuOption {
const QString Back = "Back";
const QString BandwidthDetails = "Bandwidth Details";
const QString BinaryEyelidControl = "Binary Eyelid Control";
const QString BlueSpeechSphere = "Blue Sphere While Speaking";
const QString BookmarkLocation = "Bookmark Location";
const QString Bookmarks = "Bookmarks";
const QString CachesSize = "RAM Caches Size";

View file

@ -495,39 +495,6 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
}
}
}
// quick check before falling into the code below:
// (a 10 degree breadth of an almost 2 meter avatar kicks in at about 12m)
const float MIN_VOICE_SPHERE_DISTANCE = 12.0f;
if (Menu::getInstance()->isOptionChecked(MenuOption::BlueSpeechSphere)
&& distanceToTarget > MIN_VOICE_SPHERE_DISTANCE) {
PROFILE_RANGE_BATCH(batch, __FUNCTION__":renderVoiceSphere");
// render voice intensity sphere for avatars that are farther away
const float MAX_SPHERE_ANGLE = 10.0f * RADIANS_PER_DEGREE;
const float MIN_SPHERE_ANGLE = 0.5f * RADIANS_PER_DEGREE;
const float MIN_SPHERE_SIZE = 0.01f;
const float SPHERE_LOUDNESS_SCALING = 0.0005f;
const float SPHERE_COLOR[] = { 0.5f, 0.8f, 0.8f };
float height = getSkeletonHeight();
glm::vec3 delta = height * (getHead()->getCameraOrientation() * IDENTITY_UP) / 2.0f;
float angle = abs(angleBetween(toTarget + delta, toTarget - delta));
float sphereRadius = getHead()->getAverageLoudness() * SPHERE_LOUDNESS_SCALING;
if (renderArgs->_renderMode == RenderArgs::DEFAULT_RENDER_MODE && (sphereRadius > MIN_SPHERE_SIZE) &&
(angle < MAX_SPHERE_ANGLE) && (angle > MIN_SPHERE_ANGLE)) {
batch.setModelTransform(Transform());
Transform transform;
transform.setTranslation(getPosition());
transform.setScale(height);
transform.postScale(sphereRadius);
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(batch,
transform,
glm::vec4(SPHERE_COLOR[0], SPHERE_COLOR[1], SPHERE_COLOR[2], 1.0f - angle / MAX_SPHERE_ANGLE));
}
}
}
const float DISPLAYNAME_DISTANCE = 20.0f;

View file

@ -441,14 +441,6 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
estimatedPosition = tracker->getHeadTranslation();
_trackedHeadPosition = estimatedPosition;
estimatedRotation = glm::degrees(safeEulerAngles(tracker->getHeadRotation()));
if (qApp->getCamera()->getMode() == CAMERA_MODE_MIRROR) {
// Invert yaw and roll when in mirror mode
// NOTE: this is kinda a hack, it's the same hack we use to make the head tilt. But it's not really a mirror
// it just makes you feel like you're looking in a mirror because the body movements of the avatar appear to
// match your body movements.
YAW(estimatedRotation) *= -1.0f;
ROLL(estimatedRotation) *= -1.0f;
}
}
// Rotate the body if the head is turned beyond the screen
@ -489,14 +481,6 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
const float TORSO_LENGTH = 0.5f;
glm::vec3 relativePosition = estimatedPosition - glm::vec3(0.0f, -TORSO_LENGTH, 0.0f);
// Invert left/right lean when in mirror mode
// NOTE: this is kinda a hack, it's the same hack we use to make the head tilt. But it's not really a mirror
// it just makes you feel like you're looking in a mirror because the body movements of the avatar appear to
// match your body movements.
if ((inHmd || inFacetracker) && qApp->getCamera()->getMode() == CAMERA_MODE_MIRROR) {
relativePosition.x = -relativePosition.x;
}
const float MAX_LEAN = 45.0f;
head->setLeanSideways(glm::clamp(glm::degrees(atanf(relativePosition.x * _leanScale / TORSO_LENGTH)),
-MAX_LEAN, MAX_LEAN));
@ -1401,12 +1385,6 @@ void MyAvatar::updateOrientation(float deltaTime) {
// ... so they need to be converted to degrees before we do math...
glm::vec3 euler = glm::eulerAngles(localOrientation) * DEGREES_PER_RADIAN;
//Invert yaw and roll when in mirror mode
if (qApp->getCamera()->getMode() == CAMERA_MODE_MIRROR) {
YAW(euler) *= -1.0f;
ROLL(euler) *= -1.0f;
}
Head* head = getHead();
head->setBaseYaw(YAW(euler));
head->setBasePitch(PITCH(euler));

View file

@ -47,18 +47,7 @@ void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum,
Skybox::render(batch, viewFrustum, skybox);
}
static gpu::BufferPointer theBuffer;
static gpu::Stream::FormatPointer theFormat;
if (skybox._procedural && skybox._procedural->_enabled && skybox._procedural->ready()) {
if (!theBuffer) {
const float CLIP = 1.0f;
const glm::vec2 vertices[4] = { { -CLIP, -CLIP }, { CLIP, -CLIP }, { -CLIP, CLIP }, { CLIP, CLIP } };
theBuffer = std::make_shared<gpu::Buffer>(sizeof(vertices), (const gpu::Byte*) vertices);
theFormat = std::make_shared<gpu::Stream::Format>();
theFormat->setAttribute(gpu::Stream::POSITION, gpu::Stream::POSITION, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ));
}
glm::mat4 projMat;
viewFrustum.evalProjectionMatrix(projMat);
@ -67,8 +56,6 @@ void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum,
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewTransform);
batch.setModelTransform(Transform()); // only for Mac
batch.setInputBuffer(gpu::Stream::POSITION, theBuffer, 0, 8);
batch.setInputFormat(theFormat);
if (skybox.getCubemap() && skybox.getCubemap()->isDefined()) {
batch.setResourceTexture(0, skybox.getCubemap());

View file

@ -11,21 +11,26 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
out vec3 _normal;
void main(void) {
void main(void) {
const float depth = 0.0;
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, depth, 1.0),
vec4(1.0, -1.0, depth, 1.0),
vec4(-1.0, 1.0, depth, 1.0),
vec4(1.0, 1.0, depth, 1.0)
);
vec4 inPosition = UNIT_QUAD[gl_VertexID];
// standard transform
TransformCamera cam = getTransformCamera();
vec3 clipDir = vec3(inPosition.xy, 0.0);
vec3 eyeDir;
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>
<$transformEyeToWorldDir(cam, eyeDir, _normal)$>