mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 08:21:24 +02:00
Refactor of MyAvatar and Application HMD sensor
MyAvatar now uses a cached copy of the HMDSensor location, instead of calling into Application for every access. This allows us to simplify application to always return the most recent pose, instead of a different one pre and post physics. Added accessors to HMD Sensor in MyAvatar, besides being helpful, it also an optimization to avoid extracting quaternions from the HMD matrix on every access.
This commit is contained in:
parent
ac9e1384c0
commit
d07a02cef3
6 changed files with 88 additions and 88 deletions
|
@ -962,14 +962,7 @@ void Application::paintGL() {
|
|||
// If not using an HMD, grab the camera orientation directly
|
||||
_myCamera.setRotation(_myAvatar->getHead()->getCameraOrientation());
|
||||
} else {
|
||||
// In an HMD, people can look up and down with their actual neck, and the
|
||||
// per-eye HMD pose will be applied later. So set the camera orientation
|
||||
// to only the yaw, excluding pitch and roll, i.e. an orientation that
|
||||
// is orthongonal to the (body's) Y axis
|
||||
//_myCamera.setRotation(_myAvatar->getWorldAlignedOrientation());
|
||||
|
||||
// AJT: no actually we do want the roll and pitch
|
||||
_myCamera.setRotation(glm::quat_cast(_myAvatar->getSensorToWorldMat()) * getHeadOrientation());
|
||||
_myCamera.setRotation(glm::quat_cast(_myAvatar->getSensorToWorldMatrix() * getHMDSensorPose()));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2608,7 +2601,7 @@ void Application::update(float deltaTime) {
|
|||
}
|
||||
|
||||
auto userInputMapper = DependencyManager::get<UserInputMapper>();
|
||||
userInputMapper->setSensorToWorldMat(_myAvatar->getSensorToWorldMat());
|
||||
userInputMapper->setSensorToWorldMat(_myAvatar->getSensorToWorldMatrix());
|
||||
userInputMapper->update(deltaTime);
|
||||
_keyboardMouseDevice.update();
|
||||
|
||||
|
@ -2688,10 +2681,8 @@ void Application::update(float deltaTime) {
|
|||
_physicsEngine.stepSimulation();
|
||||
_entities.getTree()->unlock();
|
||||
|
||||
// AJT: FIXME due to sensitve order of operations, within MyAvatar,
|
||||
// delay the head pose until after the physics step.
|
||||
_headPosition = glm::vec3(getActiveDisplayPlugin()->getHeadPose()[3]);
|
||||
_headOrientation = glm::quat_cast(getActiveDisplayPlugin()->getHeadPose());
|
||||
// update the avatar with the current HMD pose
|
||||
_myAvatar->setHMDSensorMatrix(getHMDSensorPose());
|
||||
|
||||
if (_physicsEngine.hasOutgoingChanges()) {
|
||||
_entities.getTree()->lockForWrite();
|
||||
|
@ -2821,7 +2812,7 @@ void Application::setPalmData(Hand* hand, UserInputMapper::PoseValue pose, int i
|
|||
|
||||
// transform from sensor space, to world space, to avatar model space.
|
||||
glm::mat4 poseMat = createMatFromQuatAndPos(pose.getRotation(), pose.getTranslation());
|
||||
glm::mat4 sensorToWorldMat = _myAvatar->getSensorToWorldMat();
|
||||
glm::mat4 sensorToWorldMat = _myAvatar->getSensorToWorldMatrix();
|
||||
glm::mat4 modelMat = createMatFromQuatAndPos(_myAvatar->getOrientation(), _myAvatar->getPosition());
|
||||
glm::mat4 objectPose = glm::inverse(modelMat) * sensorToWorldMat * poseMat;
|
||||
|
||||
|
@ -5018,14 +5009,6 @@ void Application::initPlugins() {
|
|||
void Application::shutdownPlugins() {
|
||||
}
|
||||
|
||||
glm::vec3 Application::getHeadPosition() const {
|
||||
return _headPosition;
|
||||
}
|
||||
|
||||
glm::quat Application::getHeadOrientation() const {
|
||||
return _headOrientation;
|
||||
}
|
||||
|
||||
glm::uvec2 Application::getCanvasSize() const {
|
||||
return getActiveDisplayPlugin()->getRecommendedUiSize();
|
||||
}
|
||||
|
@ -5172,7 +5155,7 @@ mat4 Application::getEyePose(int eye) const {
|
|||
return mat4();
|
||||
}
|
||||
|
||||
mat4 Application::getHeadPose() const {
|
||||
mat4 Application::getHMDSensorPose() const {
|
||||
if (isHMDMode()) {
|
||||
return getActiveDisplayPlugin()->getHeadPose();
|
||||
}
|
||||
|
|
|
@ -327,9 +327,7 @@ public:
|
|||
// rendering of several elements depend on that
|
||||
// TODO: carry that information on the Camera as a setting
|
||||
bool isHMDMode() const;
|
||||
glm::quat getHeadOrientation() const;
|
||||
glm::vec3 getHeadPosition() const;
|
||||
glm::mat4 getHeadPose() const;
|
||||
glm::mat4 getHMDSensorPose() const;
|
||||
glm::mat4 getEyePose(int eye) const;
|
||||
glm::mat4 getEyeProjection(int eye) const;
|
||||
|
||||
|
@ -670,9 +668,6 @@ private:
|
|||
ApplicationOverlay _applicationOverlay;
|
||||
ApplicationCompositor _compositor;
|
||||
|
||||
glm::vec3 _headPosition;
|
||||
glm::quat _headOrientation;
|
||||
|
||||
int _oldHandMouseX[2];
|
||||
int _oldHandMouseY[2];
|
||||
bool _oldHandLeftClick[2];
|
||||
|
|
|
@ -340,17 +340,18 @@ glm::quat Head::getCameraOrientation() const {
|
|||
// to change the driving direction while in Oculus mode. It is used to support driving toward where you're
|
||||
// head is looking. Note that in oculus mode, your actual camera view and where your head is looking is not
|
||||
// always the same.
|
||||
if (qApp->isHMDMode()) {
|
||||
MyAvatar* myAvatar = dynamic_cast<MyAvatar*>(_owningAvatar);
|
||||
if (qApp->isHMDMode() && myAvatar) {
|
||||
if (isRoomTracking) {
|
||||
return glm::quat_cast(myAvatar->getSensorToWorldMat()) * qApp->getHeadOrientation();
|
||||
if (isRoomTracking && myAvatar) {
|
||||
return glm::quat_cast(myAvatar->getSensorToWorldMatrix()) * myAvatar->getHMDSensorOrientation();
|
||||
} else {
|
||||
return getOrientation();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Avatar* owningAvatar = static_cast<Avatar*>(_owningAvatar);
|
||||
return owningAvatar->getWorldAlignedOrientation() * glm::quat(glm::radians(glm::vec3(_basePitch, 0.0f, 0.0f)));
|
||||
}
|
||||
}
|
||||
|
||||
glm::quat Head::getEyeRotation(const glm::vec3& eyePosition) const {
|
||||
glm::quat orientation = getOrientation();
|
||||
|
|
|
@ -104,7 +104,10 @@ MyAvatar::MyAvatar() :
|
|||
DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
|
||||
_firstPersonSkeletonModel(this),
|
||||
_prevShouldDrawHead(true),
|
||||
_sensorToWorldMat()
|
||||
_hmdSensorMatrix(),
|
||||
_hmdSensorPosition(),
|
||||
_hmdSensorOrientation(),
|
||||
_sensorToWorldMatrix()
|
||||
{
|
||||
_firstPersonSkeletonModel.setIsFirstPerson(true);
|
||||
|
||||
|
@ -246,6 +249,12 @@ void MyAvatar::simulate(float deltaTime) {
|
|||
maybeUpdateBillboard();
|
||||
}
|
||||
|
||||
void MyAvatar::setHMDSensorMatrix(const glm::mat4& hmdSensorMatrix) {
|
||||
_hmdSensorMatrix = hmdSensorMatrix;
|
||||
_hmdSensorPosition = extractTranslation(hmdSensorMatrix);
|
||||
_hmdSensorOrientation = glm::quat_cast(hmdSensorMatrix);
|
||||
}
|
||||
|
||||
// Update avatar head rotation with sensor data
|
||||
void MyAvatar::updateFromTrackers(float deltaTime) {
|
||||
glm::vec3 estimatedPosition, estimatedRotation;
|
||||
|
@ -257,7 +266,7 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
|
|||
}
|
||||
|
||||
if (inHmd) {
|
||||
estimatedPosition = qApp->getHeadPosition();
|
||||
estimatedPosition = extractTranslation(getHMDSensorMatrix());
|
||||
estimatedPosition.x *= -1.0f;
|
||||
_trackedHeadPosition = estimatedPosition;
|
||||
|
||||
|
@ -1291,12 +1300,13 @@ void MyAvatar::updateOrientation(float deltaTime) {
|
|||
|
||||
glm::quat twist = glm::quat(glm::radians(glm::vec3(0.0f, _bodyYawDelta, 0.0f) * deltaTime));
|
||||
|
||||
// AJT: FIXME move this into a method. also, why doesn't plain ole getOrientation, setOrientation work here?
|
||||
// update sensor mat, so that rotation will take effect when room tracking.
|
||||
glm::vec3 bodyPosition = calcBodyPositionFromSensors();
|
||||
glm::quat bodyOrientation = calcBodyOrientationFromSensors();
|
||||
glm::mat4 bodyMat = createMatFromQuatAndPos(bodyOrientation, bodyPosition);
|
||||
glm::mat4 sensorOffset = bodyMat * glm::mat4_cast(twist) * glm::inverse(bodyMat);
|
||||
_sensorToWorldMat = sensorOffset * _sensorToWorldMat;
|
||||
_sensorToWorldMatrix = sensorOffset * _sensorToWorldMatrix;
|
||||
|
||||
if (!(qApp->isHMDMode() && isRoomTracking)) {
|
||||
setOrientation(twist * getOrientation());
|
||||
|
@ -1313,7 +1323,7 @@ void MyAvatar::updateOrientation(float deltaTime) {
|
|||
|
||||
if (qApp->isHMDMode()) {
|
||||
// these angles will be in radians
|
||||
glm::quat orientation = glm::quat_cast(_sensorToWorldMat) * qApp->getHeadOrientation();
|
||||
glm::quat orientation = glm::quat_cast(getSensorToWorldMatrix() * getHMDSensorMatrix());
|
||||
glm::quat localOrientation = glm::inverse(bodyOrientation) * orientation;
|
||||
// ... so they need to be converted to degrees before we do math...
|
||||
glm::vec3 euler = glm::eulerAngles(localOrientation) * DEGREES_PER_RADIAN;
|
||||
|
@ -1590,7 +1600,7 @@ void MyAvatar::goToLocation(const glm::vec3& newPosition,
|
|||
// Set the orientation of the sensor room, not the avatar itself.
|
||||
glm::mat4 m;
|
||||
m[3] = glm::vec4(newPosition, 1);
|
||||
_sensorToWorldMat = m;
|
||||
_sensorToWorldMatrix = m;
|
||||
} else {
|
||||
glm::vec3 shiftedPosition = newPosition;
|
||||
if (hasOrientation) {
|
||||
|
@ -1698,8 +1708,8 @@ void MyAvatar::setPosition(const glm::vec3 position, bool overideReferential) {
|
|||
// position when driven from the head.
|
||||
glm::vec3 bodyPos = calcBodyPositionFromSensors();
|
||||
glm::vec3 desiredPos = position;
|
||||
glm::vec3 sensorPos(_sensorToWorldMat[3]);
|
||||
_sensorToWorldMat[3] = glm::vec4(desiredPos - bodyPos + sensorPos, 1);
|
||||
glm::vec3 sensorPos = extractTranslation(_sensorToWorldMatrix);
|
||||
_sensorToWorldMatrix[3] = glm::vec4(desiredPos - bodyPos + sensorPos, 1);
|
||||
|
||||
setAvatarPosition(position);
|
||||
}
|
||||
|
@ -1712,15 +1722,15 @@ void MyAvatar::setOrientation(const glm::quat& orientation, bool overideReferent
|
|||
glm::quat bodyOrientation = calcBodyOrientationFromSensors();
|
||||
glm::mat4 bodyMat = createMatFromQuatAndPos(bodyOrientation, bodyPos);
|
||||
glm::mat4 desiredMat = createMatFromQuatAndPos(orientation, bodyPos);
|
||||
_sensorToWorldMat = desiredMat * glm::inverse(bodyMat) * _sensorToWorldMat;
|
||||
_sensorToWorldMatrix = desiredMat * glm::inverse(bodyMat) * _sensorToWorldMatrix;
|
||||
|
||||
setAvatarOrientation(orientation);
|
||||
}
|
||||
|
||||
glm::vec3 MyAvatar::calcBodyPositionFromSensors() const {
|
||||
// hmd is in sensor space.
|
||||
const glm::vec3 hmdPosition = qApp->getHeadPosition();
|
||||
const glm::quat hmdOrientation = qApp->getHeadOrientation();
|
||||
const glm::vec3 hmdPosition = getHMDSensorPosition();
|
||||
const glm::quat hmdOrientation = getHMDSensorOrientation();
|
||||
const glm::quat hmdOrientationYawOnly = cancelOutRollAndPitch(hmdOrientation);
|
||||
|
||||
// In sensor space, figure out where the avatar body should be,
|
||||
|
@ -1734,13 +1744,13 @@ glm::vec3 MyAvatar::calcBodyPositionFromSensors() const {
|
|||
glm::vec3 roomBodyPos = hmdPosition + eyeToNeck + neckToRoot;
|
||||
|
||||
// now convert from sensor space into world coordinates
|
||||
return transformPoint(_sensorToWorldMat, roomBodyPos);
|
||||
return transformPoint(_sensorToWorldMatrix, roomBodyPos);
|
||||
}
|
||||
|
||||
glm::quat MyAvatar::calcBodyOrientationFromSensors() const {
|
||||
const glm::quat hmdOrientation = qApp->getHeadOrientation();
|
||||
const glm::quat hmdOrientation = getHMDSensorOrientation();
|
||||
const glm::quat hmdOrientationYawOnly = cancelOutRollAndPitch(hmdOrientation);
|
||||
|
||||
// TODO: do a beter calculation of bodyOrientation for now just use hmd facing.
|
||||
return glm::quat_cast(_sensorToWorldMat) * hmdOrientationYawOnly;
|
||||
return glm::quat_cast(_sensorToWorldMatrix) * hmdOrientationYawOnly;
|
||||
}
|
||||
|
|
|
@ -39,6 +39,17 @@ public:
|
|||
void preRender(RenderArgs* renderArgs);
|
||||
void updateFromTrackers(float deltaTime);
|
||||
|
||||
void setHMDSensorMatrix(const glm::mat4& hmdSensorMatrix);
|
||||
const glm::mat4& getHMDSensorMatrix() const { return _hmdSensorMatrix; }
|
||||
const glm::vec3& getHMDSensorPosition() const { return _hmdSensorPosition; }
|
||||
const glm::quat& getHMDSensorOrientation() const { return _hmdSensorOrientation; }
|
||||
|
||||
glm::mat4 getSensorToWorldMatrix() const { return _sensorToWorldMatrix; }
|
||||
|
||||
// these are overriden, because they must update the sensor matrix
|
||||
virtual void setPosition(const glm::vec3 position, bool overideReferential = false) override;
|
||||
virtual void setOrientation(const glm::quat& orientation, bool overideReferential = false) override;
|
||||
|
||||
virtual void render(RenderArgs* renderArgs, const glm::vec3& cameraPosition, bool postLighting = false) override;
|
||||
virtual void renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bool postLighting, float glowLevel = 0.0f) override;
|
||||
virtual bool shouldRenderHead(const RenderArgs* renderArgs) const override;
|
||||
|
@ -206,12 +217,6 @@ public slots:
|
|||
|
||||
virtual void rebuildSkeletonBody();
|
||||
|
||||
// these are overriden, because they must move the sensor mat, such that the avatar will be at the given location.
|
||||
virtual void setPosition(const glm::vec3 position, bool overideReferential = false) override;
|
||||
virtual void setOrientation(const glm::quat& orientation, bool overideReferential = false) override;
|
||||
|
||||
glm::mat4 getSensorToWorldMat() const { return _sensorToWorldMat; }
|
||||
|
||||
signals:
|
||||
void transformChanged();
|
||||
void newCollisionSoundURL(const QUrl& url);
|
||||
|
@ -294,7 +299,13 @@ private:
|
|||
SkeletonModel _firstPersonSkeletonModel;
|
||||
bool _prevShouldDrawHead;
|
||||
|
||||
glm::mat4 _sensorToWorldMat;
|
||||
// cache of the current HMD sensor position and orientation, in sensor space.
|
||||
glm::mat4 _hmdSensorMatrix;
|
||||
glm::vec3 _hmdSensorPosition;
|
||||
glm::quat _hmdSensorOrientation;
|
||||
|
||||
// used to transform any sensor into world space, including the _hmdSensorMat, or hand controllers.
|
||||
glm::mat4 _sensorToWorldMatrix;
|
||||
};
|
||||
|
||||
#endif // hifi_MyAvatar_h
|
||||
|
|
|
@ -350,7 +350,7 @@ void ApplicationCompositor::computeHmdPickRay(glm::vec2 cursorPos, glm::vec3& or
|
|||
// Intersection UI overlay space
|
||||
glm::vec3 worldSpaceDirection = overlayOrientation * overlaySpaceDirection;
|
||||
glm::vec3 worldSpaceIntersection = (glm::normalize(worldSpaceDirection) * _oculusUIRadius) + overlayPosition;
|
||||
glm::vec3 worldSpaceHeadPosition = (overlayOrientation * glm::vec3(qApp->getHeadPose()[3])) + overlayPosition;
|
||||
glm::vec3 worldSpaceHeadPosition = (overlayOrientation * extractTranslation(qApp->getHMDSensorPose())) + overlayPosition;
|
||||
|
||||
// Intersection in world space
|
||||
origin = worldSpaceHeadPosition;
|
||||
|
|
Loading…
Reference in a new issue