mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-25 21:35:04 +02:00
HMD re-centering while driving improvements.
Previously the HUD fading in/out would also recenter the hmd sensor and the avatar, which caused many problems including: * The user's view could shift vertically. * Your avatar would briefly go into t-pose * other users would see your avatar go into t-pose. Now we now move the UI sphere instead, which results in a much smoother experience. MyAvatar: added hasDriveInput method. OverlayConductor: * removed avatar and sensor reset, instead the overlay's modelTransform is changed. * revived STANDING mode, which is active if myAvatar->getClearOverlayWhenDriving() is true and you are wearing an HMD. * SITTING & FLAT mode should be unchanged. * Instead of using avatar velocity to fade out/fade in the hud, We use the presense or absanse of avatar drive input. * Additionally, we check distance to the UI sphere, and quickly recenter the hud if the users head is too close to the actual hud sphere. CompositorHelper: * Bug fixes for ray picks not using the modelTransform. HmdDisplayPlugin: * Bug fixes for rendering not using the modelTransform.
This commit is contained in:
parent
b21fbe5d99
commit
5ef6847dc3
6 changed files with 66 additions and 64 deletions
|
@ -1261,8 +1261,7 @@ void MyAvatar::prepareForPhysicsSimulation() {
|
|||
|
||||
_characterController.setPositionAndOrientation(getPosition(), getOrientation());
|
||||
if (qApp->isHMDMode()) {
|
||||
bool hasDriveInput = fabsf(_driveKeys[TRANSLATE_X]) > 0.0f || fabsf(_driveKeys[TRANSLATE_Z]) > 0.0f;
|
||||
_follow.prePhysicsUpdate(*this, deriveBodyFromHMDSensor(), _bodySensorMatrix, hasDriveInput);
|
||||
_follow.prePhysicsUpdate(*this, deriveBodyFromHMDSensor(), _bodySensorMatrix, hasDriveInput());
|
||||
} else {
|
||||
_follow.deactivate();
|
||||
}
|
||||
|
@ -2135,3 +2134,7 @@ bool MyAvatar::didTeleport() {
|
|||
lastPosition = pos;
|
||||
return (changeInPosition.length() > MAX_AVATAR_MOVEMENT_PER_FRAME);
|
||||
}
|
||||
|
||||
bool MyAvatar::hasDriveInput() const {
|
||||
return fabsf(_driveKeys[TRANSLATE_X]) > 0.0f || fabsf(_driveKeys[TRANSLATE_Z]) > 0.0f;
|
||||
}
|
||||
|
|
|
@ -264,6 +264,8 @@ public:
|
|||
controller::Pose getLeftHandControllerPoseInAvatarFrame() const;
|
||||
controller::Pose getRightHandControllerPoseInAvatarFrame() const;
|
||||
|
||||
bool hasDriveInput() const;
|
||||
|
||||
public slots:
|
||||
void increaseSize();
|
||||
void decreaseSize();
|
||||
|
|
|
@ -22,10 +22,31 @@ OverlayConductor::OverlayConductor() {
|
|||
OverlayConductor::~OverlayConductor() {
|
||||
}
|
||||
|
||||
bool OverlayConductor::shouldCenterUI() const {
|
||||
|
||||
glm::mat4 hmdMat = qApp->getHMDSensorPose();
|
||||
glm::vec3 hmdPos = extractTranslation(hmdMat);
|
||||
glm::vec3 hmdForward = transformVectorFast(hmdMat, glm::vec3(0.0f, 0.0f, -1.0f));
|
||||
|
||||
Transform uiTransform = qApp->getApplicationCompositor().getModelTransform();
|
||||
glm::vec3 uiPos = uiTransform.getTranslation();
|
||||
glm::vec3 uiForward = uiTransform.getRotation() * glm::vec3(0.0f, 0.0f, -1.0f);
|
||||
|
||||
const float MAX_COMPOSITOR_DISTANCE = 0.6f;
|
||||
const float MAX_COMPOSITOR_ANGLE = 180.0f; // rotation check is effectively disabled
|
||||
if (glm::distance(uiPos, hmdPos) > MAX_COMPOSITOR_DISTANCE ||
|
||||
glm::dot(uiForward, hmdForward) < cosf(glm::radians(MAX_COMPOSITOR_ANGLE))) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void OverlayConductor::update(float dt) {
|
||||
|
||||
updateMode();
|
||||
|
||||
MyAvatar* myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
||||
|
||||
switch (_mode) {
|
||||
case SITTING: {
|
||||
// when sitting, the overlay is at the origin, facing down the -z axis.
|
||||
|
@ -36,27 +57,30 @@ void OverlayConductor::update(float dt) {
|
|||
break;
|
||||
}
|
||||
case STANDING: {
|
||||
// when standing, the overlay is at a reference position, which is set when the overlay is
|
||||
// enabled. The camera is taken directly from the HMD, but in world space.
|
||||
// So the sensorToWorldMatrix must be applied.
|
||||
MyAvatar* myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
||||
Transform t;
|
||||
t.evalFromRawMatrix(myAvatar->getSensorToWorldMatrix());
|
||||
qApp->getApplicationCompositor().setCameraBaseTransform(t);
|
||||
|
||||
// detect when head moves out side of sweet spot, or looks away.
|
||||
mat4 headMat = myAvatar->getSensorToWorldMatrix() * qApp->getHMDSensorPose();
|
||||
vec3 headWorldPos = extractTranslation(headMat);
|
||||
vec3 headForward = glm::quat_cast(headMat) * glm::vec3(0.0f, 0.0f, -1.0f);
|
||||
Transform modelXform = qApp->getApplicationCompositor().getModelTransform();
|
||||
vec3 compositorWorldPos = modelXform.getTranslation();
|
||||
vec3 compositorForward = modelXform.getRotation() * glm::vec3(0.0f, 0.0f, -1.0f);
|
||||
const float MAX_COMPOSITOR_DISTANCE = 0.6f;
|
||||
const float MAX_COMPOSITOR_ANGLE = 110.0f;
|
||||
if (_enabled && (glm::distance(headWorldPos, compositorWorldPos) > MAX_COMPOSITOR_DISTANCE ||
|
||||
glm::dot(headForward, compositorForward) < cosf(glm::radians(MAX_COMPOSITOR_ANGLE)))) {
|
||||
// fade out the overlay
|
||||
setEnabled(false);
|
||||
const quint64 REQUIRED_USECS_IN_NEW_MODE_BEFORE_INVISIBLE = 200 * 1000;
|
||||
const quint64 REQUIRED_USECS_IN_NEW_MODE_BEFORE_VISIBLE = 1000 * 1000;
|
||||
|
||||
// fade in or out the overlay, based on driving.
|
||||
bool nowDriving = myAvatar->hasDriveInput();
|
||||
// Check that we're in this new mode for long enough to really trigger a transition.
|
||||
if (nowDriving == _driving) { // If there's no change in state, clear any attepted timer.
|
||||
_timeInPotentialMode = 0;
|
||||
} else if (_timeInPotentialMode == 0) { // We've just changed with no timer, so start timing now.
|
||||
_timeInPotentialMode = usecTimestampNow();
|
||||
} else if ((usecTimestampNow() - _timeInPotentialMode) > (nowDriving ? REQUIRED_USECS_IN_NEW_MODE_BEFORE_INVISIBLE : REQUIRED_USECS_IN_NEW_MODE_BEFORE_VISIBLE)) {
|
||||
_timeInPotentialMode = 0; // a real transition
|
||||
bool wantsOverlays = Menu::getInstance()->isOptionChecked(MenuOption::Overlays);
|
||||
if (wantsOverlays) {
|
||||
setEnabled(!nowDriving);
|
||||
}
|
||||
_driving = nowDriving;
|
||||
}
|
||||
|
||||
// center the UI
|
||||
if (shouldCenterUI()) {
|
||||
Transform hmdTransform(cancelOutRollAndPitch(qApp->getHMDSensorPose()));
|
||||
qApp->getApplicationCompositor().setModelTransform(hmdTransform);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -68,43 +92,14 @@ void OverlayConductor::update(float dt) {
|
|||
|
||||
void OverlayConductor::updateMode() {
|
||||
MyAvatar* myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
||||
if (myAvatar->getClearOverlayWhenDriving()) {
|
||||
float speed = glm::length(myAvatar->getVelocity());
|
||||
const float MIN_DRIVING = 0.2f;
|
||||
const float MAX_NOT_DRIVING = 0.01f;
|
||||
const quint64 REQUIRED_USECS_IN_NEW_MODE_BEFORE_INVISIBLE = 200 * 1000;
|
||||
const quint64 REQUIRED_USECS_IN_NEW_MODE_BEFORE_VISIBLE = 1000 * 1000;
|
||||
bool nowDriving = _driving; // Assume current _driving mode unless...
|
||||
if (speed > MIN_DRIVING) { // ... we're definitely moving...
|
||||
nowDriving = true;
|
||||
} else if (speed < MAX_NOT_DRIVING) { // ... or definitely not.
|
||||
nowDriving = false;
|
||||
}
|
||||
// Check that we're in this new mode for long enough to really trigger a transition.
|
||||
if (nowDriving == _driving) { // If there's no change in state, clear any attepted timer.
|
||||
_timeInPotentialMode = 0;
|
||||
} else if (_timeInPotentialMode == 0) { // We've just changed with no timer, so start timing now.
|
||||
_timeInPotentialMode = usecTimestampNow();
|
||||
} else if ((usecTimestampNow() - _timeInPotentialMode) > (nowDriving ? REQUIRED_USECS_IN_NEW_MODE_BEFORE_INVISIBLE : REQUIRED_USECS_IN_NEW_MODE_BEFORE_VISIBLE)) {
|
||||
_timeInPotentialMode = 0; // a real transition
|
||||
if (nowDriving) {
|
||||
_wantsOverlays = Menu::getInstance()->isOptionChecked(MenuOption::Overlays);
|
||||
} else { // reset when coming out of driving
|
||||
_mode = FLAT; // Seems appropriate to let things reset, below, after the following.
|
||||
// All reset of, e.g., room-scale location as though by apostrophe key, without all the other adjustments.
|
||||
qApp->getActiveDisplayPlugin()->resetSensors();
|
||||
myAvatar->reset(true, false, false);
|
||||
}
|
||||
if (_wantsOverlays) {
|
||||
setEnabled(!nowDriving);
|
||||
}
|
||||
_driving = nowDriving;
|
||||
} // Else haven't accumulated enough time in new mode, but keep timing.
|
||||
}
|
||||
|
||||
Mode newMode;
|
||||
if (qApp->isHMDMode()) {
|
||||
if (myAvatar->getClearOverlayWhenDriving()) {
|
||||
newMode = STANDING;
|
||||
} else {
|
||||
newMode = SITTING;
|
||||
}
|
||||
} else {
|
||||
newMode = FLAT;
|
||||
}
|
||||
|
@ -117,11 +112,10 @@ void OverlayConductor::updateMode() {
|
|||
qApp->getApplicationCompositor().setModelTransform(Transform());
|
||||
break;
|
||||
}
|
||||
case STANDING: { // STANDING mode is not currently used.
|
||||
case STANDING: {
|
||||
// enter the STANDING state
|
||||
// place the overlay at the current hmd position in world space
|
||||
auto camMat = cancelOutRollAndPitch(myAvatar->getSensorToWorldMatrix() * qApp->getHMDSensorPose());
|
||||
qApp->getApplicationCompositor().setModelTransform(Transform(camMat));
|
||||
Transform hmdTransform(cancelOutRollAndPitch(qApp->getHMDSensorPose()));
|
||||
qApp->getApplicationCompositor().setModelTransform(hmdTransform);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -132,7 +126,6 @@ void OverlayConductor::updateMode() {
|
|||
}
|
||||
|
||||
_mode = newMode;
|
||||
|
||||
}
|
||||
|
||||
void OverlayConductor::setEnabled(bool enabled) {
|
||||
|
|
|
@ -19,6 +19,7 @@ public:
|
|||
void update(float dt);
|
||||
void setEnabled(bool enable);
|
||||
bool getEnabled() const;
|
||||
bool shouldCenterUI() const;
|
||||
|
||||
private:
|
||||
void updateMode();
|
||||
|
|
|
@ -336,7 +336,9 @@ void CompositorHelper::computeHmdPickRay(const glm::vec2& cursorPos, glm::vec3&
|
|||
}
|
||||
|
||||
glm::mat4 CompositorHelper::getUiTransform() const {
|
||||
return _currentCamera * glm::inverse(_currentDisplayPlugin->getHeadPose());
|
||||
glm::mat4 modelMat;
|
||||
_modelTransform.getMatrix(modelMat);
|
||||
return _currentCamera * glm::inverse(_currentDisplayPlugin->getHeadPose()) * modelMat;
|
||||
}
|
||||
|
||||
//Finds the collision point of a world space ray
|
||||
|
|
|
@ -253,12 +253,13 @@ void HmdDisplayPlugin::compositeScene() {
|
|||
void HmdDisplayPlugin::compositeOverlay() {
|
||||
using namespace oglplus;
|
||||
auto compositorHelper = DependencyManager::get<CompositorHelper>();
|
||||
glm::mat4 modelMat = compositorHelper->getModelTransform().getMatrix();
|
||||
|
||||
useProgram(_program);
|
||||
_sphereSection->Use();
|
||||
for_each_eye([&](Eye eye) {
|
||||
eyeViewport(eye);
|
||||
auto modelView = glm::inverse(_currentPresentFrameInfo.presentPose * getEyeToHeadTransform(eye));
|
||||
auto modelView = glm::inverse(_currentPresentFrameInfo.presentPose * getEyeToHeadTransform(eye)) * modelMat;
|
||||
auto mvp = _eyeProjections[eye] * modelView;
|
||||
Uniform<glm::mat4>(*_program, _mvpUniform).Set(mvp);
|
||||
_sphereSection->Draw();
|
||||
|
|
Loading…
Reference in a new issue