mirror of
https://github.com/overte-org/overte.git
synced 2025-04-25 13:13:27 +02:00
refactor of myAvatar sensor matrix updating
Much simpler and maintanable. * updateFromHMDSensorMatrix is called at start of Application::update() It moves the avatar forward taking into account head motion in the room. * all other avatar manipulation call setPosition and setOrientation normally * Then at the end of Application::update() updateSensorToWorldMatrix is called. This ensures that rendering will use the correct camera position w.r.t. the body.
This commit is contained in:
parent
fb62fda2d2
commit
442b701c9a
3 changed files with 43 additions and 63 deletions
|
@ -2588,6 +2588,9 @@ void Application::update(float deltaTime) {
|
|||
updateLOD();
|
||||
updateMouseRay(); // check what's under the mouse and update the mouse voxel
|
||||
|
||||
// update the avatar with a fresh HMD pose
|
||||
_myAvatar->updateFromHMDSensorMatrix(getHMDSensorPose());
|
||||
|
||||
{
|
||||
PerformanceTimer perfTimer("devices");
|
||||
DeviceTracker::updateAll();
|
||||
|
@ -2691,7 +2694,6 @@ void Application::update(float deltaTime) {
|
|||
_entitySimulation.applyActionChanges();
|
||||
_entitySimulation.unlock();
|
||||
|
||||
|
||||
AvatarManager* avatarManager = DependencyManager::get<AvatarManager>().data();
|
||||
_physicsEngine.deleteObjects(avatarManager->getObjectsToDelete());
|
||||
_physicsEngine.addObjects(avatarManager->getObjectsToAdd());
|
||||
|
@ -2701,9 +2703,6 @@ void Application::update(float deltaTime) {
|
|||
_physicsEngine.stepSimulation();
|
||||
_entities.getTree()->unlock();
|
||||
|
||||
// update the avatar with the current HMD pose
|
||||
_myAvatar->setHMDSensorMatrix(getHMDSensorPose());
|
||||
|
||||
if (_physicsEngine.hasOutgoingChanges()) {
|
||||
_entities.getTree()->lockForWrite();
|
||||
_entitySimulation.lock();
|
||||
|
@ -2801,6 +2800,9 @@ void Application::update(float deltaTime) {
|
|||
QMetaObject::invokeMethod(DependencyManager::get<AudioClient>().data(), "sendDownstreamAudioStatsPacket", Qt::QueuedConnection);
|
||||
}
|
||||
}
|
||||
|
||||
// update sensorToWorldMatrix for rendering camera.
|
||||
_myAvatar->updateSensorToWorldMatrix();
|
||||
}
|
||||
|
||||
void Application::setPalmData(Hand* hand, UserInputMapper::PoseValue pose, int index) {
|
||||
|
|
|
@ -107,7 +107,6 @@ MyAvatar::MyAvatar() :
|
|||
_hmdSensorOrientation(),
|
||||
_hmdSensorPosition(),
|
||||
_bodySensorMatrix(),
|
||||
_inverseBodySensorMatrix(),
|
||||
_sensorToWorldMatrix(),
|
||||
_standingHMDSensorMode(false)
|
||||
{
|
||||
|
@ -251,13 +250,31 @@ void MyAvatar::simulate(float deltaTime) {
|
|||
maybeUpdateBillboard();
|
||||
}
|
||||
|
||||
void MyAvatar::setHMDSensorMatrix(const glm::mat4& hmdSensorMatrix) {
|
||||
_hmdSensorMatrix = hmdSensorMatrix;
|
||||
_hmdSensorPosition = extractTranslation(hmdSensorMatrix);
|
||||
_hmdSensorOrientation = glm::quat_cast(hmdSensorMatrix);
|
||||
// best called at end of main loop, just before rendering.
|
||||
// update sensor to world matrix from current body position and hmd sensor.
|
||||
// This is so the correct camera can be used for rendering.
|
||||
void MyAvatar::updateFromHMDSensorMatrix(const glm::mat4& hmdSensorMatrix) {
|
||||
|
||||
_bodySensorMatrix = deriveBodyFromHMDSensor();
|
||||
_inverseBodySensorMatrix = glm::inverse(_bodySensorMatrix);
|
||||
// update the sensorMatrices based on the new hmd pose
|
||||
_hmdSensorMatrix = hmdSensorMatrix;
|
||||
_hmdSensorPosition = extractTranslation(hmdSensorMatrix);
|
||||
_hmdSensorOrientation = glm::quat_cast(hmdSensorMatrix);
|
||||
_bodySensorMatrix = deriveBodyFromHMDSensor();
|
||||
|
||||
// set the body position/orientation to reflect motion due to the head.
|
||||
auto worldMat = _sensorToWorldMatrix * _bodySensorMatrix;
|
||||
setPosition(extractTranslation(worldMat));
|
||||
setOrientation(glm::quat_cast(worldMat));
|
||||
}
|
||||
|
||||
// best called at end of main loop, just before rendering.
|
||||
// update sensor to world matrix from current body position and hmd sensor.
|
||||
// This is so the correct camera can be used for rendering.
|
||||
void MyAvatar::updateSensorToWorldMatrix() {
|
||||
// update the sensor mat so that the body position will end up in the desired
|
||||
// position when driven from the head.
|
||||
glm::mat4 desiredMat = createMatFromQuatAndPos(getOrientation(), getPosition());
|
||||
_sensorToWorldMatrix = desiredMat * glm::inverse(_BodySensorMatrix);
|
||||
}
|
||||
|
||||
// Update avatar head rotation with sensor data
|
||||
|
@ -1330,18 +1347,7 @@ void MyAvatar::updateOrientation(float deltaTime) {
|
|||
getHead()->setBasePitch(getHead()->getBasePitch() + (_driveKeys[ROT_UP] - _driveKeys[ROT_DOWN]) * PITCH_SPEED * deltaTime);
|
||||
|
||||
glm::quat twist = glm::quat(glm::radians(glm::vec3(0.0f, _bodyYawDelta, 0.0f) * deltaTime));
|
||||
|
||||
// AJT: FIXME move this into a method. also, why doesn't plain ole getOrientation, setOrientation work here?
|
||||
// update sensor mat, so that rotation will take effect when room tracking.
|
||||
glm::vec3 bodyPosition = getWorldBodyPosition();
|
||||
glm::quat bodyOrientation = getWorldBodyOrientation();
|
||||
glm::mat4 bodyMat = createMatFromQuatAndPos(bodyOrientation, bodyPosition);
|
||||
glm::mat4 sensorOffset = bodyMat * glm::mat4_cast(twist) * glm::inverse(bodyMat);
|
||||
_sensorToWorldMatrix = sensorOffset * _sensorToWorldMatrix;
|
||||
|
||||
if (!(qApp->isHMDMode() && getStandingHMDSensorMode())) {
|
||||
setOrientation(twist * getOrientation());
|
||||
}
|
||||
setOrientation(twist * getOrientation());
|
||||
|
||||
// decay body rotation momentum
|
||||
const float BODY_SPIN_FRICTION = 7.5f;
|
||||
|
@ -1353,9 +1359,11 @@ void MyAvatar::updateOrientation(float deltaTime) {
|
|||
if (fabs(_bodyYawDelta) < MINIMUM_ROTATION_RATE) { _bodyYawDelta = 0.0f; }
|
||||
|
||||
if (qApp->isHMDMode()) {
|
||||
// these angles will be in radians
|
||||
glm::quat orientation = glm::quat_cast(getSensorToWorldMatrix()) * getHMDSensorOrientation();
|
||||
glm::quat bodyOrientation = getWorldBodyOrientation();
|
||||
glm::quat localOrientation = glm::inverse(bodyOrientation) * orientation;
|
||||
|
||||
// these angles will be in radians
|
||||
// ... so they need to be converted to degrees before we do math...
|
||||
glm::vec3 euler = glm::eulerAngles(localOrientation) * DEGREES_PER_RADIAN;
|
||||
|
||||
|
@ -1369,9 +1377,6 @@ void MyAvatar::updateOrientation(float deltaTime) {
|
|||
head->setBaseYaw(YAW(euler));
|
||||
head->setBasePitch(PITCH(euler));
|
||||
head->setBaseRoll(ROLL(euler));
|
||||
|
||||
// AJT: FIXME, I might be able to do just a setOrientation here right?
|
||||
Avatar::setOrientation(getWorldBodyOrientation());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1524,16 +1529,11 @@ void MyAvatar::updatePosition(float deltaTime) {
|
|||
// update position ourselves
|
||||
applyPositionDelta(deltaTime * _targetVelocity);
|
||||
measureMotionDerivatives(deltaTime);
|
||||
setPosition(getPosition()); // seems redundant, but we should do this to update the sensorMat.
|
||||
} // else physics will move avatar later
|
||||
|
||||
// update _moving flag based on speed
|
||||
const float MOVING_SPEED_THRESHOLD = 0.01f;
|
||||
_moving = speed > MOVING_SPEED_THRESHOLD;
|
||||
|
||||
if (qApp->isHMDMode() && getStandingHMDSensorMode()) {
|
||||
Avatar::setPosition(getWorldBodyPosition());
|
||||
}
|
||||
}
|
||||
|
||||
void MyAvatar::updateCollisionSound(const glm::vec3 &penetration, float deltaTime, float frequency) {
|
||||
|
@ -1730,32 +1730,12 @@ void MyAvatar::relayDriveKeysToCharacterController() {
|
|||
}
|
||||
}
|
||||
|
||||
void MyAvatar::setPosition(const glm::vec3 position, bool overideReferential) {
|
||||
|
||||
// update the sensor mat so that the body position will end up in the desired
|
||||
// position when driven from the head.
|
||||
glm::mat4 desiredMat = createMatFromQuatAndPos(getOrientation(), position);
|
||||
_sensorToWorldMatrix = desiredMat * _inverseBodySensorMatrix;
|
||||
|
||||
Avatar::setPosition(position);
|
||||
}
|
||||
|
||||
void MyAvatar::setOrientation(const glm::quat& orientation, bool overideReferential) {
|
||||
|
||||
// update the sensor mat so that the body position will end up in the desired
|
||||
// position when driven from the head.
|
||||
glm::mat4 desiredMat = createMatFromQuatAndPos(orientation, getPosition());
|
||||
_sensorToWorldMatrix = desiredMat * _inverseBodySensorMatrix;
|
||||
|
||||
Avatar::setOrientation(orientation);
|
||||
}
|
||||
|
||||
glm::vec3 MyAvatar::getWorldBodyPosition() const {
|
||||
return transformPoint(_sensorToWorldMatrix, extractTranslation(_bodySensorMatrix));
|
||||
}
|
||||
|
||||
glm::quat MyAvatar::getWorldBodyOrientation() const {
|
||||
return glm::quat_cast(_sensorToWorldMatrix *_bodySensorMatrix);
|
||||
return glm::quat_cast(_sensorToWorldMatrix * _bodySensorMatrix);
|
||||
}
|
||||
|
||||
// derive avatar body position and orientation from the current HMD Sensor location.
|
||||
|
|
|
@ -45,16 +45,19 @@ public:
|
|||
void preRender(RenderArgs* renderArgs);
|
||||
void updateFromTrackers(float deltaTime);
|
||||
|
||||
void setHMDSensorMatrix(const glm::mat4& hmdSensorMatrix);
|
||||
const glm::mat4& getHMDSensorMatrix() const { return _hmdSensorMatrix; }
|
||||
const glm::vec3& getHMDSensorPosition() const { return _hmdSensorPosition; }
|
||||
const glm::quat& getHMDSensorOrientation() const { return _hmdSensorOrientation; }
|
||||
|
||||
glm::mat4 getSensorToWorldMatrix() const { return _sensorToWorldMatrix; }
|
||||
|
||||
// these are overriden, because they must update the sensor matrix
|
||||
virtual void setPosition(const glm::vec3 position, bool overideReferential = false) override;
|
||||
virtual void setOrientation(const glm::quat& orientation, bool overideReferential = false) override;
|
||||
// best called at start of main loop just after we have a fresh hmd pose.
|
||||
// update internal body position from new hmd pose.
|
||||
void updateFromHMDSensorMatrix(const glm::mat4& hmdSensorMatrix);
|
||||
|
||||
// best called at end of main loop, just before rendering.
|
||||
// update sensor to world matrix from current body position and hmd sensor.
|
||||
// This is so the correct camera can be used for rendering.
|
||||
void updateSensorToWorldMatrix();
|
||||
|
||||
virtual void render(RenderArgs* renderArgs, const glm::vec3& cameraPosition, bool postLighting = false) override;
|
||||
virtual void renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bool postLighting, float glowLevel = 0.0f) override;
|
||||
|
@ -238,10 +241,6 @@ signals:
|
|||
|
||||
private:
|
||||
|
||||
// these set the avatars position in world space without effecting the sensor location.
|
||||
void setAvatarPosition(glm::vec3 pos);
|
||||
void setAvatarOrientation(glm::quat quat);
|
||||
|
||||
glm::vec3 getWorldBodyPosition() const;
|
||||
glm::quat getWorldBodyOrientation() const;
|
||||
|
||||
|
@ -327,7 +326,6 @@ private:
|
|||
// cache of the current body position and orientation of the avatar's body,
|
||||
// in sensor space.
|
||||
glm::mat4 _bodySensorMatrix;
|
||||
glm::mat4 _inverseBodySensorMatrix;
|
||||
|
||||
// used to transform any sensor into world space, including the _hmdSensorMat, or hand controllers.
|
||||
glm::mat4 _sensorToWorldMatrix;
|
||||
|
|
Loading…
Reference in a new issue