Refactor of MyAvatar and Application HMD sensor

MyAvatar now uses a cached copy of the HMDSensor location,
instead of calling into Application for every access.
This allows us to simplify application to always return the
most recent pose, instead of a different one pre and post physics.

Added accessors to HMD Sensor in MyAvatar, besides being helpful,
it also an optimization to avoid extracting quaternions from the
HMD matrix on every access.
This commit is contained in:
Anthony J. Thibault 2015-07-13 12:56:33 -07:00
parent ac9e1384c0
commit d07a02cef3
6 changed files with 88 additions and 88 deletions

View file

@ -962,14 +962,7 @@ void Application::paintGL() {
// If not using an HMD, grab the camera orientation directly
_myCamera.setRotation(_myAvatar->getHead()->getCameraOrientation());
} else {
// In an HMD, people can look up and down with their actual neck, and the
// per-eye HMD pose will be applied later. So set the camera orientation
// to only the yaw, excluding pitch and roll, i.e. an orientation that
// is orthongonal to the (body's) Y axis
//_myCamera.setRotation(_myAvatar->getWorldAlignedOrientation());
// AJT: no actually we do want the roll and pitch
_myCamera.setRotation(glm::quat_cast(_myAvatar->getSensorToWorldMat()) * getHeadOrientation());
_myCamera.setRotation(glm::quat_cast(_myAvatar->getSensorToWorldMatrix() * getHMDSensorPose()));
}
/*
@ -2608,7 +2601,7 @@ void Application::update(float deltaTime) {
}
auto userInputMapper = DependencyManager::get<UserInputMapper>();
userInputMapper->setSensorToWorldMat(_myAvatar->getSensorToWorldMat());
userInputMapper->setSensorToWorldMat(_myAvatar->getSensorToWorldMatrix());
userInputMapper->update(deltaTime);
_keyboardMouseDevice.update();
@ -2688,10 +2681,8 @@ void Application::update(float deltaTime) {
_physicsEngine.stepSimulation();
_entities.getTree()->unlock();
// AJT: FIXME due to sensitve order of operations, within MyAvatar,
// delay the head pose until after the physics step.
_headPosition = glm::vec3(getActiveDisplayPlugin()->getHeadPose()[3]);
_headOrientation = glm::quat_cast(getActiveDisplayPlugin()->getHeadPose());
// update the avatar with the current HMD pose
_myAvatar->setHMDSensorMatrix(getHMDSensorPose());
if (_physicsEngine.hasOutgoingChanges()) {
_entities.getTree()->lockForWrite();
@ -2821,7 +2812,7 @@ void Application::setPalmData(Hand* hand, UserInputMapper::PoseValue pose, int i
// transform from sensor space, to world space, to avatar model space.
glm::mat4 poseMat = createMatFromQuatAndPos(pose.getRotation(), pose.getTranslation());
glm::mat4 sensorToWorldMat = _myAvatar->getSensorToWorldMat();
glm::mat4 sensorToWorldMat = _myAvatar->getSensorToWorldMatrix();
glm::mat4 modelMat = createMatFromQuatAndPos(_myAvatar->getOrientation(), _myAvatar->getPosition());
glm::mat4 objectPose = glm::inverse(modelMat) * sensorToWorldMat * poseMat;
@ -5018,14 +5009,6 @@ void Application::initPlugins() {
void Application::shutdownPlugins() {
}
glm::vec3 Application::getHeadPosition() const {
return _headPosition;
}
glm::quat Application::getHeadOrientation() const {
return _headOrientation;
}
glm::uvec2 Application::getCanvasSize() const {
return getActiveDisplayPlugin()->getRecommendedUiSize();
}
@ -5159,8 +5142,8 @@ GlWindow* Application::getVisibleWindow() {
mat4 Application::getEyeProjection(int eye) const {
if (isHMDMode()) {
return getActiveDisplayPlugin()->getProjection((Eye)eye, _viewFrustum.getProjection());
}
}
return _viewFrustum.getProjection();
}
@ -5172,7 +5155,7 @@ mat4 Application::getEyePose(int eye) const {
return mat4();
}
mat4 Application::getHeadPose() const {
mat4 Application::getHMDSensorPose() const {
if (isHMDMode()) {
return getActiveDisplayPlugin()->getHeadPose();
}

View file

@ -327,9 +327,7 @@ public:
// rendering of several elements depend on that
// TODO: carry that information on the Camera as a setting
bool isHMDMode() const;
glm::quat getHeadOrientation() const;
glm::vec3 getHeadPosition() const;
glm::mat4 getHeadPose() const;
glm::mat4 getHMDSensorPose() const;
glm::mat4 getEyePose(int eye) const;
glm::mat4 getEyeProjection(int eye) const;
@ -670,9 +668,6 @@ private:
ApplicationOverlay _applicationOverlay;
ApplicationCompositor _compositor;
glm::vec3 _headPosition;
glm::quat _headOrientation;
int _oldHandMouseX[2];
int _oldHandMouseY[2];
bool _oldHandLeftClick[2];

View file

@ -337,19 +337,20 @@ void Head::setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition) {
glm::quat Head::getCameraOrientation() const {
// NOTE: Head::getCameraOrientation() is not used for orienting the camera "view" while in Oculus mode, so
// you may wonder why this code is here. This method will be called while in Oculus mode to determine how
// to change the driving direction while in Oculus mode. It is used to support driving toward where you're
// to change the driving direction while in Oculus mode. It is used to support driving toward where you're
// head is looking. Note that in oculus mode, your actual camera view and where your head is looking is not
// always the same.
MyAvatar* myAvatar = dynamic_cast<MyAvatar*>(_owningAvatar);
if (qApp->isHMDMode() && myAvatar) {
if (isRoomTracking) {
return glm::quat_cast(myAvatar->getSensorToWorldMat()) * qApp->getHeadOrientation();
if (qApp->isHMDMode()) {
MyAvatar* myAvatar = dynamic_cast<MyAvatar*>(_owningAvatar);
if (isRoomTracking && myAvatar) {
return glm::quat_cast(myAvatar->getSensorToWorldMatrix()) * myAvatar->getHMDSensorOrientation();
} else {
return getOrientation();
}
} else {
Avatar* owningAvatar = static_cast<Avatar*>(_owningAvatar);
return owningAvatar->getWorldAlignedOrientation() * glm::quat(glm::radians(glm::vec3(_basePitch, 0.0f, 0.0f)));
}
Avatar* owningAvatar = static_cast<Avatar*>(_owningAvatar);
return owningAvatar->getWorldAlignedOrientation() * glm::quat(glm::radians(glm::vec3(_basePitch, 0.0f, 0.0f)));
}
glm::quat Head::getEyeRotation(const glm::vec3& eyePosition) const {

View file

@ -104,7 +104,10 @@ MyAvatar::MyAvatar() :
DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
_firstPersonSkeletonModel(this),
_prevShouldDrawHead(true),
_sensorToWorldMat()
_hmdSensorMatrix(),
_hmdSensorPosition(),
_hmdSensorOrientation(),
_sensorToWorldMatrix()
{
_firstPersonSkeletonModel.setIsFirstPerson(true);
@ -246,6 +249,12 @@ void MyAvatar::simulate(float deltaTime) {
maybeUpdateBillboard();
}
void MyAvatar::setHMDSensorMatrix(const glm::mat4& hmdSensorMatrix) {
_hmdSensorMatrix = hmdSensorMatrix;
_hmdSensorPosition = extractTranslation(hmdSensorMatrix);
_hmdSensorOrientation = glm::quat_cast(hmdSensorMatrix);
}
// Update avatar head rotation with sensor data
void MyAvatar::updateFromTrackers(float deltaTime) {
glm::vec3 estimatedPosition, estimatedRotation;
@ -257,7 +266,7 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
}
if (inHmd) {
estimatedPosition = qApp->getHeadPosition();
estimatedPosition = extractTranslation(getHMDSensorMatrix());
estimatedPosition.x *= -1.0f;
_trackedHeadPosition = estimatedPosition;
@ -1291,12 +1300,13 @@ void MyAvatar::updateOrientation(float deltaTime) {
glm::quat twist = glm::quat(glm::radians(glm::vec3(0.0f, _bodyYawDelta, 0.0f) * deltaTime));
// AJT: FIXME move this into a method. also, why doesn't plain ole getOrientation, setOrientation work here?
// update sensor mat, so that rotation will take effect when room tracking.
glm::vec3 bodyPosition = calcBodyPositionFromSensors();
glm::quat bodyOrientation = calcBodyOrientationFromSensors();
glm::mat4 bodyMat = createMatFromQuatAndPos(bodyOrientation, bodyPosition);
glm::mat4 sensorOffset = bodyMat * glm::mat4_cast(twist) * glm::inverse(bodyMat);
_sensorToWorldMat = sensorOffset * _sensorToWorldMat;
_sensorToWorldMatrix = sensorOffset * _sensorToWorldMatrix;
if (!(qApp->isHMDMode() && isRoomTracking)) {
setOrientation(twist * getOrientation());
@ -1313,7 +1323,7 @@ void MyAvatar::updateOrientation(float deltaTime) {
if (qApp->isHMDMode()) {
// these angles will be in radians
glm::quat orientation = glm::quat_cast(_sensorToWorldMat) * qApp->getHeadOrientation();
glm::quat orientation = glm::quat_cast(getSensorToWorldMatrix() * getHMDSensorMatrix());
glm::quat localOrientation = glm::inverse(bodyOrientation) * orientation;
// ... so they need to be converted to degrees before we do math...
glm::vec3 euler = glm::eulerAngles(localOrientation) * DEGREES_PER_RADIAN;
@ -1427,7 +1437,7 @@ glm::vec3 MyAvatar::applyKeyboardMotor(float deltaTime, const glm::vec3& localVe
}
}
}
float boomChange = _driveKeys[BOOM_OUT] - _driveKeys[BOOM_IN];
_boomLength += 2.0f * _boomLength * boomChange + boomChange * boomChange;
_boomLength = glm::clamp<float>(_boomLength, ZOOM_MIN, ZOOM_MAX);
@ -1590,7 +1600,7 @@ void MyAvatar::goToLocation(const glm::vec3& newPosition,
// Set the orientation of the sensor room, not the avatar itself.
glm::mat4 m;
m[3] = glm::vec4(newPosition, 1);
_sensorToWorldMat = m;
_sensorToWorldMatrix = m;
} else {
glm::vec3 shiftedPosition = newPosition;
if (hasOrientation) {
@ -1698,8 +1708,8 @@ void MyAvatar::setPosition(const glm::vec3 position, bool overideReferential) {
// position when driven from the head.
glm::vec3 bodyPos = calcBodyPositionFromSensors();
glm::vec3 desiredPos = position;
glm::vec3 sensorPos(_sensorToWorldMat[3]);
_sensorToWorldMat[3] = glm::vec4(desiredPos - bodyPos + sensorPos, 1);
glm::vec3 sensorPos = extractTranslation(_sensorToWorldMatrix);
_sensorToWorldMatrix[3] = glm::vec4(desiredPos - bodyPos + sensorPos, 1);
setAvatarPosition(position);
}
@ -1712,15 +1722,15 @@ void MyAvatar::setOrientation(const glm::quat& orientation, bool overideReferent
glm::quat bodyOrientation = calcBodyOrientationFromSensors();
glm::mat4 bodyMat = createMatFromQuatAndPos(bodyOrientation, bodyPos);
glm::mat4 desiredMat = createMatFromQuatAndPos(orientation, bodyPos);
_sensorToWorldMat = desiredMat * glm::inverse(bodyMat) * _sensorToWorldMat;
_sensorToWorldMatrix = desiredMat * glm::inverse(bodyMat) * _sensorToWorldMatrix;
setAvatarOrientation(orientation);
}
glm::vec3 MyAvatar::calcBodyPositionFromSensors() const {
// hmd is in sensor space.
const glm::vec3 hmdPosition = qApp->getHeadPosition();
const glm::quat hmdOrientation = qApp->getHeadOrientation();
const glm::vec3 hmdPosition = getHMDSensorPosition();
const glm::quat hmdOrientation = getHMDSensorOrientation();
const glm::quat hmdOrientationYawOnly = cancelOutRollAndPitch(hmdOrientation);
// In sensor space, figure out where the avatar body should be,
@ -1734,13 +1744,13 @@ glm::vec3 MyAvatar::calcBodyPositionFromSensors() const {
glm::vec3 roomBodyPos = hmdPosition + eyeToNeck + neckToRoot;
// now convert from sensor space into world coordinates
return transformPoint(_sensorToWorldMat, roomBodyPos);
return transformPoint(_sensorToWorldMatrix, roomBodyPos);
}
glm::quat MyAvatar::calcBodyOrientationFromSensors() const {
const glm::quat hmdOrientation = qApp->getHeadOrientation();
const glm::quat hmdOrientation = getHMDSensorOrientation();
const glm::quat hmdOrientationYawOnly = cancelOutRollAndPitch(hmdOrientation);
// TODO: do a beter calculation of bodyOrientation for now just use hmd facing.
return glm::quat_cast(_sensorToWorldMat) * hmdOrientationYawOnly;
return glm::quat_cast(_sensorToWorldMatrix) * hmdOrientationYawOnly;
}

View file

@ -39,6 +39,17 @@ public:
void preRender(RenderArgs* renderArgs);
void updateFromTrackers(float deltaTime);
void setHMDSensorMatrix(const glm::mat4& hmdSensorMatrix);
const glm::mat4& getHMDSensorMatrix() const { return _hmdSensorMatrix; }
const glm::vec3& getHMDSensorPosition() const { return _hmdSensorPosition; }
const glm::quat& getHMDSensorOrientation() const { return _hmdSensorOrientation; }
glm::mat4 getSensorToWorldMatrix() const { return _sensorToWorldMatrix; }
// these are overriden, because they must update the sensor matrix
virtual void setPosition(const glm::vec3 position, bool overideReferential = false) override;
virtual void setOrientation(const glm::quat& orientation, bool overideReferential = false) override;
virtual void render(RenderArgs* renderArgs, const glm::vec3& cameraPosition, bool postLighting = false) override;
virtual void renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bool postLighting, float glowLevel = 0.0f) override;
virtual bool shouldRenderHead(const RenderArgs* renderArgs) const override;
@ -54,30 +65,30 @@ public:
Q_INVOKABLE glm::vec3 getDefaultEyePosition() const;
bool getShouldRenderLocally() const { return _shouldRender; }
float getRealWorldFieldOfView() { return _realWorldFieldOfView.get(); }
const QList<AnimationHandlePointer>& getAnimationHandles() const { return _animationHandles; }
AnimationHandlePointer addAnimationHandle();
void removeAnimationHandle(const AnimationHandlePointer& handle);
/// Allows scripts to run animations.
Q_INVOKABLE void startAnimation(const QString& url, float fps = 30.0f, float priority = 1.0f, bool loop = false,
bool hold = false, float firstFrame = 0.0f, float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
/// Stops an animation as identified by a URL.
Q_INVOKABLE void stopAnimation(const QString& url);
/// Starts an animation by its role, using the provided URL and parameters if the avatar doesn't have a custom
/// animation for the role.
Q_INVOKABLE void startAnimationByRole(const QString& role, const QString& url = QString(), float fps = 30.0f,
float priority = 1.0f, bool loop = false, bool hold = false, float firstFrame = 0.0f,
float lastFrame = FLT_MAX, const QStringList& maskedJoints = QStringList());
/// Stops an animation identified by its role.
Q_INVOKABLE void stopAnimationByRole(const QString& role);
Q_INVOKABLE AnimationDetails getAnimationDetailsByRole(const QString& role);
Q_INVOKABLE AnimationDetails getAnimationDetails(const QString& url);
// get/set avatar data
void saveData();
void loadData();
@ -89,31 +100,31 @@ public:
void clearDriveKeys();
void setDriveKeys(int key, float val) { _driveKeys[key] = val; };
bool getDriveKeys(int key) { return _driveKeys[key] != 0.0f; };
void relayDriveKeysToCharacterController();
bool isMyAvatar() const { return true; }
bool isLookingAtLeftEye();
virtual int parseDataAtOffset(const QByteArray& packet, int offset);
static void sendKillAvatar();
Q_INVOKABLE glm::vec3 getTrackedHeadPosition() const { return _trackedHeadPosition; }
Q_INVOKABLE glm::vec3 getHeadPosition() const { return getHead()->getPosition(); }
Q_INVOKABLE float getHeadFinalYaw() const { return getHead()->getFinalYaw(); }
Q_INVOKABLE float getHeadFinalRoll() const { return getHead()->getFinalRoll(); }
Q_INVOKABLE float getHeadFinalPitch() const { return getHead()->getFinalPitch(); }
Q_INVOKABLE float getHeadDeltaPitch() const { return getHead()->getDeltaPitch(); }
Q_INVOKABLE glm::vec3 getEyePosition() const { return getHead()->getEyePosition(); }
Q_INVOKABLE glm::vec3 getTargetAvatarPosition() const { return _targetAvatarPosition; }
AvatarWeakPointer getLookAtTargetAvatar() const { return _lookAtTargetAvatar; }
void updateLookAtTargetAvatar();
void clearLookAtTargetAvatar();
virtual void setJointRotations(QVector<glm::quat> jointRotations);
virtual void setJointData(int index, const glm::quat& rotation);
virtual void clearJointData(int index);
@ -140,7 +151,7 @@ public:
virtual glm::vec3 getSkeletonPosition() const;
void updateLocalAABox();
DynamicCharacterController* getCharacterController() { return &_characterController; }
void clearJointAnimationPriorities();
glm::vec3 getScriptedMotorVelocity() const { return _scriptedMotorVelocity; }
@ -159,17 +170,17 @@ public:
virtual void attach(const QString& modelURL, const QString& jointName = QString(),
const glm::vec3& translation = glm::vec3(), const glm::quat& rotation = glm::quat(), float scale = 1.0f,
bool allowDuplicates = false, bool useSaved = true);
/// Renders a laser pointer for UI picking
void renderLaserPointers(gpu::Batch& batch);
glm::vec3 getLaserPointerTipPosition(const PalmData* palm);
const RecorderPointer getRecorder() const { return _recorder; }
const PlayerPointer getPlayer() const { return _player; }
float getBoomLength() const { return _boomLength; }
void setBoomLength(float boomLength) { _boomLength = boomLength; }
static const float ZOOM_MIN;
static const float ZOOM_MAX;
static const float ZOOM_DEFAULT;
@ -178,7 +189,7 @@ public slots:
void increaseSize();
void decreaseSize();
void resetSize();
void goToLocation(const glm::vec3& newPosition,
bool hasOrientation = false, const glm::quat& newOrientation = glm::quat(),
bool shouldFaceLocation = false);
@ -189,14 +200,14 @@ public slots:
void setThrust(glm::vec3 newThrust) { _thrust = newThrust; }
void updateMotionBehavior();
glm::vec3 getLeftPalmPosition();
glm::vec3 getRightPalmPosition();
void clearReferential();
bool setModelReferential(const QUuid& id);
bool setJointReferential(const QUuid& id, int jointIndex);
bool isRecording();
qint64 recorderElapsed();
void startRecording();
@ -206,12 +217,6 @@ public slots:
virtual void rebuildSkeletonBody();
// these are overriden, because they must move the sensor mat, such that the avatar will be at the given location.
virtual void setPosition(const glm::vec3 position, bool overideReferential = false) override;
virtual void setOrientation(const glm::quat& orientation, bool overideReferential = false) override;
glm::mat4 getSensorToWorldMat() const { return _sensorToWorldMat; }
signals:
void transformChanged();
void newCollisionSoundURL(const QUrl& url);
@ -239,7 +244,7 @@ private:
bool _wasPushing;
bool _isPushing;
bool _isBraking;
float _boomLength;
float _trapDuration; // seconds that avatar has been trapped by collisions
@ -262,16 +267,16 @@ private:
float _oculusYawOffset;
QList<AnimationHandlePointer> _animationHandles;
bool _feetTouchFloor;
bool _isLookingAtLeftEye;
RecorderPointer _recorder;
glm::vec3 _trackedHeadPosition;
Setting::Handle<float> _realWorldFieldOfView;
// private methods
void updateOrientation(float deltaTime);
glm::vec3 applyKeyboardMotor(float deltaTime, const glm::vec3& velocity, bool isHovering);
@ -285,7 +290,7 @@ private:
QUrl _fullAvatarURLFromPreferences;
QUrl _headURLFromPreferences;
QUrl _skeletonURLFromPreferences;
QString _headModelName;
QString _bodyModelName;
QString _fullAvatarModelName;
@ -294,7 +299,13 @@ private:
SkeletonModel _firstPersonSkeletonModel;
bool _prevShouldDrawHead;
glm::mat4 _sensorToWorldMat;
// cache of the current HMD sensor position and orientation, in sensor space.
glm::mat4 _hmdSensorMatrix;
glm::vec3 _hmdSensorPosition;
glm::quat _hmdSensorOrientation;
// used to transform any sensor into world space, including the _hmdSensorMat, or hand controllers.
glm::mat4 _sensorToWorldMatrix;
};
#endif // hifi_MyAvatar_h

View file

@ -350,7 +350,7 @@ void ApplicationCompositor::computeHmdPickRay(glm::vec2 cursorPos, glm::vec3& or
// Intersection UI overlay space
glm::vec3 worldSpaceDirection = overlayOrientation * overlaySpaceDirection;
glm::vec3 worldSpaceIntersection = (glm::normalize(worldSpaceDirection) * _oculusUIRadius) + overlayPosition;
glm::vec3 worldSpaceHeadPosition = (overlayOrientation * glm::vec3(qApp->getHeadPose()[3])) + overlayPosition;
glm::vec3 worldSpaceHeadPosition = (overlayOrientation * extractTranslation(qApp->getHMDSensorPose())) + overlayPosition;
// Intersection in world space
origin = worldSpaceHeadPosition;