mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-05-28 07:19:55 +02:00
Correct controller offsets with sensor scaling
This commit is contained in:
parent
ffdb10681e
commit
d62a0ea8ff
5 changed files with 23 additions and 15 deletions
|
@ -2478,9 +2478,9 @@ void Application::paintGL() {
|
||||||
auto hmdInterface = DependencyManager::get<HMDScriptingInterface>();
|
auto hmdInterface = DependencyManager::get<HMDScriptingInterface>();
|
||||||
float ipdScale = hmdInterface->getIPDScale();
|
float ipdScale = hmdInterface->getIPDScale();
|
||||||
|
|
||||||
// scale IPD by height ratio, to make the world seem larger or smaller accordingly.
|
// scale IPD by sensorToWorldScale, to make the world seem larger or smaller accordingly.
|
||||||
float heightRatio = getMyAvatar()->getEyeHeight() / getMyAvatar()->getUserEyeHeight();
|
float sensorToWorldScale = getMyAvatar()->getSensorToWorldScale();
|
||||||
ipdScale *= heightRatio;
|
ipdScale *= sensorToWorldScale;
|
||||||
|
|
||||||
mat4 eyeProjections[2];
|
mat4 eyeProjections[2];
|
||||||
{
|
{
|
||||||
|
@ -2509,7 +2509,7 @@ void Application::paintGL() {
|
||||||
// adjust near clip plane by heightRatio
|
// adjust near clip plane by heightRatio
|
||||||
auto baseProjection = glm::perspective(renderArgs.getViewFrustum().getFieldOfView(),
|
auto baseProjection = glm::perspective(renderArgs.getViewFrustum().getFieldOfView(),
|
||||||
renderArgs.getViewFrustum().getAspectRatio(),
|
renderArgs.getViewFrustum().getAspectRatio(),
|
||||||
renderArgs.getViewFrustum().getNearClip() * heightRatio,
|
renderArgs.getViewFrustum().getNearClip() * sensorToWorldScale,
|
||||||
renderArgs.getViewFrustum().getFarClip());
|
renderArgs.getViewFrustum().getFarClip());
|
||||||
|
|
||||||
// FIXME we probably don't need to set the projection matrix every frame,
|
// FIXME we probably don't need to set the projection matrix every frame,
|
||||||
|
|
|
@ -679,7 +679,6 @@ void MyAvatar::updateSensorToWorldMatrix() {
|
||||||
}
|
}
|
||||||
|
|
||||||
_sensorToWorldMatrixCache.set(_sensorToWorldMatrix);
|
_sensorToWorldMatrixCache.set(_sensorToWorldMatrix);
|
||||||
_sensorToWorldScaleCache.set(sensorToWorldScale);
|
|
||||||
|
|
||||||
updateJointFromController(controller::Action::LEFT_HAND, _controllerLeftHandMatrixCache);
|
updateJointFromController(controller::Action::LEFT_HAND, _controllerLeftHandMatrixCache);
|
||||||
updateJointFromController(controller::Action::RIGHT_HAND, _controllerRightHandMatrixCache);
|
updateJointFromController(controller::Action::RIGHT_HAND, _controllerRightHandMatrixCache);
|
||||||
|
@ -2577,8 +2576,8 @@ glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const {
|
||||||
glm::vec3 headToNeck = headOrientation * Quaternions::Y_180 * (localNeck - localHead);
|
glm::vec3 headToNeck = headOrientation * Quaternions::Y_180 * (localNeck - localHead);
|
||||||
glm::vec3 neckToRoot = headOrientationYawOnly * Quaternions::Y_180 * -localNeck;
|
glm::vec3 neckToRoot = headOrientationYawOnly * Quaternions::Y_180 * -localNeck;
|
||||||
|
|
||||||
float invHeightRatio = getUserEyeHeight() / getEyeHeight();
|
float invSensorToWorldScale = getUserEyeHeight() / getEyeHeight();
|
||||||
glm::vec3 bodyPos = headPosition + invHeightRatio * (headToNeck + neckToRoot);
|
glm::vec3 bodyPos = headPosition + invSensorToWorldScale * (headToNeck + neckToRoot);
|
||||||
|
|
||||||
return createMatFromQuatAndPos(headOrientationYawOnly, bodyPos);
|
return createMatFromQuatAndPos(headOrientationYawOnly, bodyPos);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2401,6 +2401,11 @@ glm::mat4 AvatarData::getSensorToWorldMatrix() const {
|
||||||
return _sensorToWorldMatrixCache.get();
|
return _sensorToWorldMatrixCache.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// thread-safe
|
||||||
|
float AvatarData::getSensorToWorldScale() const {
|
||||||
|
return extractUniformScale(_sensorToWorldMatrixCache.get());
|
||||||
|
}
|
||||||
|
|
||||||
// thread-safe
|
// thread-safe
|
||||||
glm::mat4 AvatarData::getControllerLeftHandMatrix() const {
|
glm::mat4 AvatarData::getControllerLeftHandMatrix() const {
|
||||||
return _controllerLeftHandMatrixCache.get();
|
return _controllerLeftHandMatrixCache.get();
|
||||||
|
|
|
@ -385,6 +385,8 @@ class AvatarData : public QObject, public SpatiallyNestable {
|
||||||
Q_PROPERTY(glm::mat4 controllerLeftHandMatrix READ getControllerLeftHandMatrix)
|
Q_PROPERTY(glm::mat4 controllerLeftHandMatrix READ getControllerLeftHandMatrix)
|
||||||
Q_PROPERTY(glm::mat4 controllerRightHandMatrix READ getControllerRightHandMatrix)
|
Q_PROPERTY(glm::mat4 controllerRightHandMatrix READ getControllerRightHandMatrix)
|
||||||
|
|
||||||
|
Q_PROPERTY(float sensorToWorldScale READ getSensorToWorldScale)
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
virtual QString getName() const override { return QString("Avatar:") + _displayName; }
|
virtual QString getName() const override { return QString("Avatar:") + _displayName; }
|
||||||
|
@ -617,6 +619,7 @@ public:
|
||||||
|
|
||||||
// thread safe
|
// thread safe
|
||||||
Q_INVOKABLE glm::mat4 getSensorToWorldMatrix() const;
|
Q_INVOKABLE glm::mat4 getSensorToWorldMatrix() const;
|
||||||
|
Q_INVOKABLE float getSensorToWorldScale() const;
|
||||||
Q_INVOKABLE glm::mat4 getControllerLeftHandMatrix() const;
|
Q_INVOKABLE glm::mat4 getControllerLeftHandMatrix() const;
|
||||||
Q_INVOKABLE glm::mat4 getControllerRightHandMatrix() const;
|
Q_INVOKABLE glm::mat4 getControllerRightHandMatrix() const;
|
||||||
|
|
||||||
|
|
|
@ -19,14 +19,15 @@ getGrabCommunications = function getFarGrabCommunications() {
|
||||||
var GRAB_POINT_SPHERE_OFFSET = { x: 0.04, y: 0.13, z: 0.039 }; // x = upward, y = forward, z = lateral
|
var GRAB_POINT_SPHERE_OFFSET = { x: 0.04, y: 0.13, z: 0.039 }; // x = upward, y = forward, z = lateral
|
||||||
|
|
||||||
getGrabPointSphereOffset = function(handController) {
|
getGrabPointSphereOffset = function(handController) {
|
||||||
if (handController === Controller.Standard.RightHand) {
|
var offset = GRAB_POINT_SPHERE_OFFSET;
|
||||||
return GRAB_POINT_SPHERE_OFFSET;
|
if (handController === Controller.Standard.LeftHand) {
|
||||||
|
offset = {
|
||||||
|
x: -GRAB_POINT_SPHERE_OFFSET.x,
|
||||||
|
y: GRAB_POINT_SPHERE_OFFSET.y,
|
||||||
|
z: GRAB_POINT_SPHERE_OFFSET.z
|
||||||
|
};
|
||||||
}
|
}
|
||||||
return {
|
return Vec3.multiply(MyAvatar.sensorToWorldScale, offset);
|
||||||
x: GRAB_POINT_SPHERE_OFFSET.x * -1,
|
|
||||||
y: GRAB_POINT_SPHERE_OFFSET.y,
|
|
||||||
z: GRAB_POINT_SPHERE_OFFSET.z
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// controllerWorldLocation is where the controller would be, in-world, with an added offset
|
// controllerWorldLocation is where the controller would be, in-world, with an added offset
|
||||||
|
@ -53,7 +54,7 @@ getControllerWorldLocation = function (handController, doOffset) {
|
||||||
|
|
||||||
} else if (!HMD.isHandControllerAvailable()) {
|
} else if (!HMD.isHandControllerAvailable()) {
|
||||||
// NOTE: keep this offset in sync with scripts/system/controllers/handControllerPointer.js:493
|
// NOTE: keep this offset in sync with scripts/system/controllers/handControllerPointer.js:493
|
||||||
var VERTICAL_HEAD_LASER_OFFSET = 0.1;
|
var VERTICAL_HEAD_LASER_OFFSET = 0.1 * MyAvatar.sensorToWorldScale;
|
||||||
position = Vec3.sum(Camera.position, Vec3.multiplyQbyV(Camera.orientation, {x: 0, y: VERTICAL_HEAD_LASER_OFFSET, z: 0}));
|
position = Vec3.sum(Camera.position, Vec3.multiplyQbyV(Camera.orientation, {x: 0, y: VERTICAL_HEAD_LASER_OFFSET, z: 0}));
|
||||||
orientation = Quat.multiply(Camera.orientation, Quat.angleAxis(-90, { x: 1, y: 0, z: 0 }));
|
orientation = Quat.multiply(Camera.orientation, Quat.angleAxis(-90, { x: 1, y: 0, z: 0 }));
|
||||||
valid = true;
|
valid = true;
|
||||||
|
|
Loading…
Reference in a new issue