mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 07:19:05 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into baseball
This commit is contained in:
commit
75f4ae97c6
6 changed files with 106 additions and 137 deletions
|
@ -177,9 +177,8 @@ void MyAvatar::reset(bool andReload) {
|
||||||
|
|
||||||
// Reset dynamic state.
|
// Reset dynamic state.
|
||||||
_wasPushing = _isPushing = _isBraking = _billboardValid = false;
|
_wasPushing = _isPushing = _isBraking = _billboardValid = false;
|
||||||
_isFollowingHMD = false;
|
_followVelocity = Vectors::ZERO;
|
||||||
_hmdFollowVelocity = Vectors::ZERO;
|
_followSpeed = 0.0f;
|
||||||
_hmdFollowSpeed = 0.0f;
|
|
||||||
_skeletonModel.reset();
|
_skeletonModel.reset();
|
||||||
getHead()->reset();
|
getHead()->reset();
|
||||||
_targetVelocity = glm::vec3(0.0f);
|
_targetVelocity = glm::vec3(0.0f);
|
||||||
|
@ -352,52 +351,40 @@ void MyAvatar::updateFromHMDSensorMatrix(const glm::mat4& hmdSensorMatrix) {
|
||||||
void MyAvatar::updateHMDFollowVelocity() {
|
void MyAvatar::updateHMDFollowVelocity() {
|
||||||
// compute offset to body's target position (in sensor-frame)
|
// compute offset to body's target position (in sensor-frame)
|
||||||
auto sensorBodyMatrix = deriveBodyFromHMDSensor();
|
auto sensorBodyMatrix = deriveBodyFromHMDSensor();
|
||||||
_hmdFollowOffset = extractTranslation(sensorBodyMatrix) - extractTranslation(_bodySensorMatrix);
|
glm::vec3 offset = extractTranslation(sensorBodyMatrix) - extractTranslation(_bodySensorMatrix);
|
||||||
glm::vec3 truncatedOffset = _hmdFollowOffset;
|
_followOffsetDistance = glm::length(offset);
|
||||||
|
|
||||||
|
const float FOLLOW_TIMESCALE = 0.5f;
|
||||||
|
const float FOLLOW_THRESHOLD_SPEED = 0.2f;
|
||||||
|
const float FOLLOW_MIN_DISTANCE = 0.01f;
|
||||||
|
const float FOLLOW_THRESHOLD_DISTANCE = 0.2f;
|
||||||
|
const float FOLLOW_MAX_IDLE_DISTANCE = 0.1f;
|
||||||
|
|
||||||
|
bool hmdIsAtRest = _hmdAtRestDetector.update(_hmdSensorPosition, _hmdSensorOrientation);
|
||||||
|
|
||||||
|
_followOffsetDistance = glm::length(offset);
|
||||||
|
if (_followOffsetDistance < FOLLOW_MIN_DISTANCE) {
|
||||||
|
// close enough
|
||||||
|
_followOffsetDistance = 0.0f;
|
||||||
|
} else {
|
||||||
|
bool avatarIsMoving = glm::length(_velocity - _followVelocity) > FOLLOW_THRESHOLD_SPEED;
|
||||||
|
bool shouldFollow = (hmdIsAtRest || avatarIsMoving) && _followOffsetDistance > FOLLOW_MAX_IDLE_DISTANCE;
|
||||||
|
|
||||||
|
glm::vec3 truncatedOffset = offset;
|
||||||
if (truncatedOffset.y < 0.0f) {
|
if (truncatedOffset.y < 0.0f) {
|
||||||
// don't pull the body DOWN to match the target (allow animation system to squat)
|
|
||||||
truncatedOffset.y = 0.0f;
|
truncatedOffset.y = 0.0f;
|
||||||
}
|
}
|
||||||
float truncatedOffsetDistance = glm::length(truncatedOffset);
|
float truncatedDistance = glm::length(truncatedOffset);
|
||||||
|
bool needsNewSpeed = truncatedDistance > FOLLOW_THRESHOLD_DISTANCE;
|
||||||
bool isMoving;
|
if (needsNewSpeed || (shouldFollow && _followSpeed == 0.0f)) {
|
||||||
if (_lastIsMoving) {
|
// compute new speed
|
||||||
const float MOVE_EXIT_SPEED_THRESHOLD = 0.07f; // m/sec
|
_followSpeed = _followOffsetDistance / FOLLOW_TIMESCALE;
|
||||||
isMoving = glm::length(_velocity) >= MOVE_EXIT_SPEED_THRESHOLD;
|
|
||||||
} else {
|
|
||||||
const float MOVE_ENTER_SPEED_THRESHOLD = 0.2f; // m/sec
|
|
||||||
isMoving = glm::length(_velocity) > MOVE_ENTER_SPEED_THRESHOLD;
|
|
||||||
}
|
}
|
||||||
bool justStartedMoving = (_lastIsMoving != isMoving) && isMoving;
|
if (_followSpeed > 0.0f) {
|
||||||
_lastIsMoving = isMoving;
|
// to compute new velocity we must rotate offset into the world-frame
|
||||||
bool hmdIsAtRest = _hmdAtRestDetector.update(_hmdSensorPosition, _hmdSensorOrientation);
|
|
||||||
const float MIN_HMD_HIP_SHIFT = 0.05f;
|
|
||||||
if (justStartedMoving || (hmdIsAtRest && truncatedOffsetDistance > MIN_HMD_HIP_SHIFT)) {
|
|
||||||
_isFollowingHMD = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool needNewFollowSpeed = (_isFollowingHMD && _hmdFollowSpeed == 0.0f);
|
|
||||||
if (!needNewFollowSpeed) {
|
|
||||||
// check to see if offset has exceeded its threshold
|
|
||||||
const float MAX_HMD_HIP_SHIFT = 0.2f;
|
|
||||||
if (truncatedOffsetDistance > MAX_HMD_HIP_SHIFT) {
|
|
||||||
_isFollowingHMD = true;
|
|
||||||
needNewFollowSpeed = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (_isFollowingHMD) {
|
|
||||||
// only bother to rotate into world frame if we're following
|
|
||||||
glm::quat sensorToWorldRotation = extractRotation(_sensorToWorldMatrix);
|
glm::quat sensorToWorldRotation = extractRotation(_sensorToWorldMatrix);
|
||||||
_hmdFollowOffset = sensorToWorldRotation * _hmdFollowOffset;
|
_followVelocity = _followSpeed * glm::normalize(sensorToWorldRotation * offset);
|
||||||
}
|
}
|
||||||
if (needNewFollowSpeed) {
|
|
||||||
// compute new velocity that will be used to resolve offset of hips from body
|
|
||||||
const float FOLLOW_HMD_DURATION = 0.5f; // seconds
|
|
||||||
_hmdFollowVelocity = (_hmdFollowOffset / FOLLOW_HMD_DURATION);
|
|
||||||
_hmdFollowSpeed = glm::length(_hmdFollowVelocity);
|
|
||||||
} else if (_isFollowingHMD) {
|
|
||||||
// compute new velocity (but not new speed)
|
|
||||||
_hmdFollowVelocity = _hmdFollowSpeed * glm::normalize(_hmdFollowOffset);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1295,11 +1282,11 @@ void MyAvatar::prepareForPhysicsSimulation() {
|
||||||
_characterController.setAvatarPositionAndOrientation(getPosition(), getOrientation());
|
_characterController.setAvatarPositionAndOrientation(getPosition(), getOrientation());
|
||||||
if (qApp->isHMDMode()) {
|
if (qApp->isHMDMode()) {
|
||||||
updateHMDFollowVelocity();
|
updateHMDFollowVelocity();
|
||||||
} else if (_isFollowingHMD) {
|
} else if (_followSpeed > 0.0f) {
|
||||||
_isFollowingHMD = false;
|
_followVelocity = Vectors::ZERO;
|
||||||
_hmdFollowVelocity = Vectors::ZERO;
|
_followSpeed = 0.0f;
|
||||||
}
|
}
|
||||||
_characterController.setHMDVelocity(_hmdFollowVelocity);
|
_characterController.setFollowVelocity(_followVelocity);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MyAvatar::harvestResultsFromPhysicsSimulation() {
|
void MyAvatar::harvestResultsFromPhysicsSimulation() {
|
||||||
|
@ -1307,35 +1294,27 @@ void MyAvatar::harvestResultsFromPhysicsSimulation() {
|
||||||
glm::quat orientation = getOrientation();
|
glm::quat orientation = getOrientation();
|
||||||
_characterController.getAvatarPositionAndOrientation(position, orientation);
|
_characterController.getAvatarPositionAndOrientation(position, orientation);
|
||||||
nextAttitude(position, orientation);
|
nextAttitude(position, orientation);
|
||||||
if (_isFollowingHMD) {
|
if (_followSpeed > 0.0f) {
|
||||||
setVelocity(_characterController.getLinearVelocity() + _hmdFollowVelocity);
|
adjustSensorTransform();
|
||||||
glm::vec3 hmdShift = _characterController.getHMDShift();
|
setVelocity(_characterController.getLinearVelocity() + _followVelocity);
|
||||||
adjustSensorTransform(hmdShift);
|
|
||||||
} else {
|
} else {
|
||||||
setVelocity(_characterController.getLinearVelocity());
|
setVelocity(_characterController.getLinearVelocity());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MyAvatar::adjustSensorTransform(glm::vec3 hmdShift) {
|
void MyAvatar::adjustSensorTransform() {
|
||||||
// compute blendFactor of latest hmdShift
|
// compute blendFactor of latest hmdShift
|
||||||
// which we'll use to blend the rotation part
|
// which we'll use to blend the rotation part
|
||||||
float blendFactor = 1.0f;
|
float linearDistance = _characterController.getFollowTime() * _followSpeed;
|
||||||
float shiftLength = glm::length(hmdShift);
|
float blendFactor = linearDistance < _followOffsetDistance ? linearDistance / _followOffsetDistance : 1.0f;
|
||||||
if (shiftLength > 1.0e-5f) {
|
|
||||||
float offsetLength = glm::length(_hmdFollowOffset);
|
|
||||||
if (offsetLength > shiftLength) {
|
|
||||||
blendFactor = shiftLength / offsetLength;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto newBodySensorMatrix = deriveBodyFromHMDSensor();
|
auto newBodySensorMatrix = deriveBodyFromHMDSensor();
|
||||||
auto worldBodyMatrix = _sensorToWorldMatrix * newBodySensorMatrix;
|
auto worldBodyMatrix = _sensorToWorldMatrix * newBodySensorMatrix;
|
||||||
glm::quat finalBodyRotation = glm::normalize(glm::quat_cast(worldBodyMatrix));
|
glm::quat finalBodyRotation = glm::normalize(glm::quat_cast(worldBodyMatrix));
|
||||||
if (blendFactor >= 0.99f) {
|
if (blendFactor >= 0.99f) {
|
||||||
// the "adjustment" is more or less complete so stop following
|
// the "adjustment" is more or less complete so stop following
|
||||||
_isFollowingHMD = false;
|
_followVelocity = Vectors::ZERO;
|
||||||
_hmdFollowSpeed = 0.0f;
|
_followSpeed = 0.0f;
|
||||||
_hmdFollowVelocity = Vectors::ZERO;
|
|
||||||
// and slam the body's transform anyway to eliminate any slight errors
|
// and slam the body's transform anyway to eliminate any slight errors
|
||||||
glm::vec3 finalBodyPosition = extractTranslation(worldBodyMatrix);
|
glm::vec3 finalBodyPosition = extractTranslation(worldBodyMatrix);
|
||||||
nextAttitude(finalBodyPosition, finalBodyRotation);
|
nextAttitude(finalBodyPosition, finalBodyRotation);
|
||||||
|
@ -1515,6 +1494,9 @@ void MyAvatar::initAnimGraph() {
|
||||||
QUrl::fromLocalFile(PathUtils::resourcesPath() + "meshes/defaultAvatar_full/avatar-animation.json") :
|
QUrl::fromLocalFile(PathUtils::resourcesPath() + "meshes/defaultAvatar_full/avatar-animation.json") :
|
||||||
_animGraphUrl);
|
_animGraphUrl);
|
||||||
_rig->initAnimGraph(graphUrl, _skeletonModel.getGeometry()->getFBXGeometry());
|
_rig->initAnimGraph(graphUrl, _skeletonModel.getGeometry()->getFBXGeometry());
|
||||||
|
|
||||||
|
_bodySensorMatrix = deriveBodyFromHMDSensor(); // Based on current cached HMD position/rotation..
|
||||||
|
updateSensorToWorldMatrix(); // Uses updated position/orientation and _bodySensorMatrix changes
|
||||||
}
|
}
|
||||||
|
|
||||||
void MyAvatar::destroyAnimGraph() {
|
void MyAvatar::destroyAnimGraph() {
|
||||||
|
@ -1989,53 +1971,19 @@ glm::quat MyAvatar::getWorldBodyOrientation() const {
|
||||||
// derive avatar body position and orientation from the current HMD Sensor location.
|
// derive avatar body position and orientation from the current HMD Sensor location.
|
||||||
// results are in sensor space
|
// results are in sensor space
|
||||||
glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const {
|
glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const {
|
||||||
|
if (_rig) {
|
||||||
// HMD is in sensor space.
|
// orientation
|
||||||
const glm::vec3 hmdPosition = getHMDSensorPosition();
|
|
||||||
const glm::quat hmdOrientation = getHMDSensorOrientation();
|
const glm::quat hmdOrientation = getHMDSensorOrientation();
|
||||||
const glm::quat hmdOrientationYawOnly = cancelOutRollAndPitch(hmdOrientation);
|
const glm::quat yaw = cancelOutRollAndPitch(hmdOrientation);
|
||||||
|
// position
|
||||||
const glm::vec3 DEFAULT_RIGHT_EYE_POS(-0.3f, 1.6f, 0.0f);
|
// we flip about yAxis when going from "root" to "avatar" frame
|
||||||
const glm::vec3 DEFAULT_LEFT_EYE_POS(0.3f, 1.6f, 0.0f);
|
// and we must also apply "yaw" to get into HMD frame
|
||||||
const glm::vec3 DEFAULT_NECK_POS(0.0f, 1.5f, 0.0f);
|
glm::quat rotY180 = glm::angleAxis((float)M_PI, glm::vec3(0.0f, 1.0f, 0.0f));
|
||||||
const glm::vec3 DEFAULT_HIPS_POS(0.0f, 1.0f, 0.0f);
|
glm::vec3 eyesInAvatarFrame = rotY180 * yaw * _rig->getEyesInRootFrame();
|
||||||
|
glm::vec3 bodyPos = getHMDSensorPosition() - eyesInAvatarFrame;
|
||||||
vec3 localEyes, localNeck;
|
return createMatFromQuatAndPos(yaw, bodyPos);
|
||||||
if (!_debugDrawSkeleton) {
|
|
||||||
const glm::quat rotY180 = glm::angleAxis((float)PI, glm::vec3(0.0f, 1.0f, 0.0f));
|
|
||||||
localEyes = rotY180 * (((DEFAULT_RIGHT_EYE_POS + DEFAULT_LEFT_EYE_POS) / 2.0f) - DEFAULT_HIPS_POS);
|
|
||||||
localNeck = rotY180 * (DEFAULT_NECK_POS - DEFAULT_HIPS_POS);
|
|
||||||
} else {
|
|
||||||
// TODO: At the moment MyAvatar does not have access to the rig, which has the skeleton, which has the bind poses.
|
|
||||||
// for now use the _debugDrawSkeleton, which is initialized with the same FBX model as the rig.
|
|
||||||
|
|
||||||
// TODO: cache these indices.
|
|
||||||
int rightEyeIndex = _debugDrawSkeleton->nameToJointIndex("RightEye");
|
|
||||||
int leftEyeIndex = _debugDrawSkeleton->nameToJointIndex("LeftEye");
|
|
||||||
int neckIndex = _debugDrawSkeleton->nameToJointIndex("Neck");
|
|
||||||
int hipsIndex = _debugDrawSkeleton->nameToJointIndex("Hips");
|
|
||||||
|
|
||||||
glm::vec3 absRightEyePos = rightEyeIndex != -1 ? _debugDrawSkeleton->getAbsoluteBindPose(rightEyeIndex).trans : DEFAULT_RIGHT_EYE_POS;
|
|
||||||
glm::vec3 absLeftEyePos = leftEyeIndex != -1 ? _debugDrawSkeleton->getAbsoluteBindPose(leftEyeIndex).trans : DEFAULT_LEFT_EYE_POS;
|
|
||||||
glm::vec3 absNeckPos = neckIndex != -1 ? _debugDrawSkeleton->getAbsoluteBindPose(neckIndex).trans : DEFAULT_NECK_POS;
|
|
||||||
glm::vec3 absHipsPos = neckIndex != -1 ? _debugDrawSkeleton->getAbsoluteBindPose(hipsIndex).trans : DEFAULT_HIPS_POS;
|
|
||||||
|
|
||||||
const glm::quat rotY180 = glm::angleAxis((float)PI, glm::vec3(0.0f, 1.0f, 0.0f));
|
|
||||||
localEyes = rotY180 * (((absRightEyePos + absLeftEyePos) / 2.0f) - absHipsPos);
|
|
||||||
localNeck = rotY180 * (absNeckPos - absHipsPos);
|
|
||||||
}
|
}
|
||||||
|
return glm::mat4();
|
||||||
// apply simplistic head/neck model
|
|
||||||
// figure out where the avatar body should be by applying offsets from the avatar's neck & head joints.
|
|
||||||
|
|
||||||
// eyeToNeck offset is relative full HMD orientation.
|
|
||||||
// while neckToRoot offset is only relative to HMDs yaw.
|
|
||||||
glm::vec3 eyeToNeck = hmdOrientation * (localNeck - localEyes);
|
|
||||||
glm::vec3 neckToRoot = hmdOrientationYawOnly * -localNeck;
|
|
||||||
glm::vec3 bodyPos = hmdPosition + eyeToNeck + neckToRoot;
|
|
||||||
|
|
||||||
// avatar facing is determined solely by hmd orientation.
|
|
||||||
return createMatFromQuatAndPos(hmdOrientationYawOnly, bodyPos);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
glm::vec3 MyAvatar::getPositionForAudio() {
|
glm::vec3 MyAvatar::getPositionForAudio() {
|
||||||
|
|
|
@ -206,7 +206,7 @@ public:
|
||||||
|
|
||||||
void prepareForPhysicsSimulation();
|
void prepareForPhysicsSimulation();
|
||||||
void harvestResultsFromPhysicsSimulation();
|
void harvestResultsFromPhysicsSimulation();
|
||||||
void adjustSensorTransform(glm::vec3 hmdShift);
|
void adjustSensorTransform();
|
||||||
|
|
||||||
const QString& getCollisionSoundURL() { return _collisionSoundURL; }
|
const QString& getCollisionSoundURL() { return _collisionSoundURL; }
|
||||||
void setCollisionSoundURL(const QString& url);
|
void setCollisionSoundURL(const QString& url);
|
||||||
|
@ -329,7 +329,7 @@ private:
|
||||||
PalmData getActivePalmData(int palmIndex) const;
|
PalmData getActivePalmData(int palmIndex) const;
|
||||||
|
|
||||||
// derive avatar body position and orientation from the current HMD Sensor location.
|
// derive avatar body position and orientation from the current HMD Sensor location.
|
||||||
// results are in sensor space
|
// results are in HMD frame
|
||||||
glm::mat4 deriveBodyFromHMDSensor() const;
|
glm::mat4 deriveBodyFromHMDSensor() const;
|
||||||
|
|
||||||
float _driveKeys[MAX_DRIVE_KEYS];
|
float _driveKeys[MAX_DRIVE_KEYS];
|
||||||
|
@ -393,9 +393,10 @@ private:
|
||||||
|
|
||||||
// used to transform any sensor into world space, including the _hmdSensorMat, or hand controllers.
|
// used to transform any sensor into world space, including the _hmdSensorMat, or hand controllers.
|
||||||
glm::mat4 _sensorToWorldMatrix;
|
glm::mat4 _sensorToWorldMatrix;
|
||||||
glm::vec3 _hmdFollowOffset { Vectors::ZERO };
|
|
||||||
glm::vec3 _hmdFollowVelocity { Vectors::ZERO };
|
glm::vec3 _followVelocity { Vectors::ZERO };
|
||||||
float _hmdFollowSpeed { 0.0f };
|
float _followSpeed { 0.0f };
|
||||||
|
float _followOffsetDistance { 0.0f };
|
||||||
|
|
||||||
bool _goToPending;
|
bool _goToPending;
|
||||||
glm::vec3 _goToPosition;
|
glm::vec3 _goToPosition;
|
||||||
|
@ -413,9 +414,6 @@ private:
|
||||||
glm::vec3 _customListenPosition;
|
glm::vec3 _customListenPosition;
|
||||||
glm::quat _customListenOrientation;
|
glm::quat _customListenOrientation;
|
||||||
|
|
||||||
bool _isFollowingHMD { false };
|
|
||||||
float _followHMDAlpha { 0.0f };
|
|
||||||
|
|
||||||
AtRestDetector _hmdAtRestDetector;
|
AtRestDetector _hmdAtRestDetector;
|
||||||
bool _lastIsMoving { false };
|
bool _lastIsMoving { false };
|
||||||
};
|
};
|
||||||
|
|
|
@ -60,7 +60,7 @@ MyCharacterController::MyCharacterController(MyAvatar* avatar) {
|
||||||
_floorDistance = MAX_FALL_HEIGHT;
|
_floorDistance = MAX_FALL_HEIGHT;
|
||||||
|
|
||||||
_walkVelocity.setValue(0.0f, 0.0f, 0.0f);
|
_walkVelocity.setValue(0.0f, 0.0f, 0.0f);
|
||||||
_hmdVelocity.setValue(0.0f, 0.0f, 0.0f);
|
_followVelocity.setValue(0.0f, 0.0f, 0.0f);
|
||||||
_jumpSpeed = JUMP_SPEED;
|
_jumpSpeed = JUMP_SPEED;
|
||||||
_isOnGround = false;
|
_isOnGround = false;
|
||||||
_isJumping = false;
|
_isJumping = false;
|
||||||
|
@ -68,7 +68,7 @@ MyCharacterController::MyCharacterController(MyAvatar* avatar) {
|
||||||
_isHovering = true;
|
_isHovering = true;
|
||||||
_isPushingUp = false;
|
_isPushingUp = false;
|
||||||
_jumpToHoverStart = 0;
|
_jumpToHoverStart = 0;
|
||||||
_lastStepDuration = 0.0f;
|
_followTime = 0.0f;
|
||||||
|
|
||||||
_pendingFlags = PENDING_FLAG_UPDATE_SHAPE;
|
_pendingFlags = PENDING_FLAG_UPDATE_SHAPE;
|
||||||
updateShapeIfNecessary();
|
updateShapeIfNecessary();
|
||||||
|
@ -161,16 +161,14 @@ void MyCharacterController::playerStep(btCollisionWorld* dynaWorld, btScalar dt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rather than add _hmdVelocity to the velocity of the RigidBody, we explicitly teleport
|
// Rather than add _followVelocity to the velocity of the RigidBody, we explicitly teleport
|
||||||
// the RigidBody forward according to the formula: distance = rate * time
|
// the RigidBody forward according to the formula: distance = rate * time
|
||||||
if (_hmdVelocity.length2() > 0.0f) {
|
if (_followVelocity.length2() > 0.0f) {
|
||||||
btTransform bodyTransform = _rigidBody->getWorldTransform();
|
btTransform bodyTransform = _rigidBody->getWorldTransform();
|
||||||
bodyTransform.setOrigin(bodyTransform.getOrigin() + dt * _hmdVelocity);
|
bodyTransform.setOrigin(bodyTransform.getOrigin() + dt * _followVelocity);
|
||||||
_rigidBody->setWorldTransform(bodyTransform);
|
_rigidBody->setWorldTransform(bodyTransform);
|
||||||
}
|
}
|
||||||
// MyAvatar will ask us how far we stepped for HMD motion, which will depend on how
|
_followTime += dt;
|
||||||
// much time has accumulated in _lastStepDuration.
|
|
||||||
_lastStepDuration += dt;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MyCharacterController::jump() {
|
void MyCharacterController::jump() {
|
||||||
|
@ -346,8 +344,8 @@ void MyCharacterController::setTargetVelocity(const glm::vec3& velocity) {
|
||||||
_walkVelocity = glmToBullet(velocity);
|
_walkVelocity = glmToBullet(velocity);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MyCharacterController::setHMDVelocity(const glm::vec3& velocity) {
|
void MyCharacterController::setFollowVelocity(const glm::vec3& velocity) {
|
||||||
_hmdVelocity = glmToBullet(velocity);
|
_followVelocity = glmToBullet(velocity);
|
||||||
}
|
}
|
||||||
|
|
||||||
glm::vec3 MyCharacterController::getLinearVelocity() const {
|
glm::vec3 MyCharacterController::getLinearVelocity() const {
|
||||||
|
@ -400,7 +398,7 @@ void MyCharacterController::preSimulation() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_lastStepDuration = 0.0f;
|
_followTime = 0.0f;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MyCharacterController::postSimulation() {
|
void MyCharacterController::postSimulation() {
|
||||||
|
|
|
@ -64,8 +64,8 @@ public:
|
||||||
void getAvatarPositionAndOrientation(glm::vec3& position, glm::quat& rotation) const;
|
void getAvatarPositionAndOrientation(glm::vec3& position, glm::quat& rotation) const;
|
||||||
|
|
||||||
void setTargetVelocity(const glm::vec3& velocity);
|
void setTargetVelocity(const glm::vec3& velocity);
|
||||||
void setHMDVelocity(const glm::vec3& velocity);
|
void setFollowVelocity(const glm::vec3& velocity);
|
||||||
glm::vec3 getHMDShift() const { return _lastStepDuration * bulletToGLM(_hmdVelocity); }
|
float getFollowTime() const { return _followTime; }
|
||||||
|
|
||||||
glm::vec3 getLinearVelocity() const;
|
glm::vec3 getLinearVelocity() const;
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ protected:
|
||||||
protected:
|
protected:
|
||||||
btVector3 _currentUp;
|
btVector3 _currentUp;
|
||||||
btVector3 _walkVelocity;
|
btVector3 _walkVelocity;
|
||||||
btVector3 _hmdVelocity;
|
btVector3 _followVelocity;
|
||||||
btTransform _avatarBodyTransform;
|
btTransform _avatarBodyTransform;
|
||||||
|
|
||||||
glm::vec3 _shapeLocalOffset;
|
glm::vec3 _shapeLocalOffset;
|
||||||
|
@ -93,7 +93,7 @@ protected:
|
||||||
btScalar _gravity;
|
btScalar _gravity;
|
||||||
|
|
||||||
btScalar _jumpSpeed;
|
btScalar _jumpSpeed;
|
||||||
btScalar _lastStepDuration;
|
btScalar _followTime;
|
||||||
|
|
||||||
bool _enabled;
|
bool _enabled;
|
||||||
bool _isOnGround;
|
bool _isOnGround;
|
||||||
|
|
|
@ -407,6 +407,24 @@ void Rig::calcAnimAlpha(float speed, const std::vector<float>& referenceSpeeds,
|
||||||
*alphaOut = alpha;
|
*alphaOut = alpha;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Rig::computeEyesInRootFrame(const AnimPoseVec& poses) {
|
||||||
|
// TODO: use cached eye/hips indices for these calculations
|
||||||
|
int numPoses = poses.size();
|
||||||
|
int rightEyeIndex = _animSkeleton->nameToJointIndex(QString("RightEye"));
|
||||||
|
int leftEyeIndex = _animSkeleton->nameToJointIndex(QString("LeftEye"));
|
||||||
|
if (numPoses > rightEyeIndex && numPoses > leftEyeIndex
|
||||||
|
&& rightEyeIndex > 0 && leftEyeIndex > 0) {
|
||||||
|
int hipsIndex = _animSkeleton->nameToJointIndex(QString("Hips"));
|
||||||
|
int headIndex = _animSkeleton->nameToJointIndex(QString("Head"));
|
||||||
|
if (hipsIndex >= 0 && headIndex > 0) {
|
||||||
|
glm::vec3 rightEye = _animSkeleton->getAbsolutePose(rightEyeIndex, poses).trans;
|
||||||
|
glm::vec3 leftEye = _animSkeleton->getAbsolutePose(leftEyeIndex, poses).trans;
|
||||||
|
glm::vec3 hips = _animSkeleton->getAbsolutePose(hipsIndex, poses).trans;
|
||||||
|
_eyesInRootFrame = 0.5f * (rightEye + leftEye) - hips;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// animation reference speeds.
|
// animation reference speeds.
|
||||||
static const std::vector<float> FORWARD_SPEEDS = { 0.4f, 1.4f, 4.5f }; // m/s
|
static const std::vector<float> FORWARD_SPEEDS = { 0.4f, 1.4f, 4.5f }; // m/s
|
||||||
static const std::vector<float> BACKWARD_SPEEDS = { 0.6f, 1.45f }; // m/s
|
static const std::vector<float> BACKWARD_SPEEDS = { 0.6f, 1.45f }; // m/s
|
||||||
|
@ -730,6 +748,7 @@ void Rig::updateAnimations(float deltaTime, glm::mat4 rootTransform) {
|
||||||
setJointTranslation((int)i, true, poses[i].trans, PRIORITY);
|
setJointTranslation((int)i, true, poses[i].trans, PRIORITY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
computeEyesInRootFrame(poses);
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// First normalize the fades so that they sum to 1.0.
|
// First normalize the fades so that they sum to 1.0.
|
||||||
|
@ -1124,14 +1143,14 @@ void Rig::updateLeanJoint(int index, float leanSideways, float leanForward, floa
|
||||||
|
|
||||||
static AnimPose avatarToBonePose(AnimPose pose, AnimSkeleton::ConstPointer skeleton) {
|
static AnimPose avatarToBonePose(AnimPose pose, AnimSkeleton::ConstPointer skeleton) {
|
||||||
AnimPose rootPose = skeleton->getAbsoluteBindPose(skeleton->nameToJointIndex("Hips"));
|
AnimPose rootPose = skeleton->getAbsoluteBindPose(skeleton->nameToJointIndex("Hips"));
|
||||||
AnimPose rotY180(glm::vec3(1), glm::angleAxis((float)PI, glm::vec3(0.0f, 1.0f, 0.0f)), glm::vec3(0));
|
AnimPose rotY180(glm::vec3(1.0f), glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f)), glm::vec3(0));
|
||||||
return rootPose * rotY180 * pose;
|
return rootPose * rotY180 * pose;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef DEBUG_RENDERING
|
#ifdef DEBUG_RENDERING
|
||||||
static AnimPose boneToAvatarPose(AnimPose pose, AnimSkeleton::ConstPointer skeleton) {
|
static AnimPose boneToAvatarPose(AnimPose pose, AnimSkeleton::ConstPointer skeleton) {
|
||||||
AnimPose rootPose = skeleton->getAbsoluteBindPose(skeleton->nameToJointIndex("Hips"));
|
AnimPose rootPose = skeleton->getAbsoluteBindPose(skeleton->nameToJointIndex("Hips"));
|
||||||
AnimPose rotY180(glm::vec3(1), glm::angleAxis((float)PI, glm::vec3(0.0f, 1.0f, 0.0f)), glm::vec3(0));
|
AnimPose rotY180(glm::vec3(1.0f), glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f)), glm::vec3(0));
|
||||||
return (rootPose * rotY180).inverse() * pose;
|
return (rootPose * rotY180).inverse() * pose;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1342,6 +1361,7 @@ void Rig::updateFromHandParameters(const HandParameters& params, float dt) {
|
||||||
void Rig::makeAnimSkeleton(const FBXGeometry& fbxGeometry) {
|
void Rig::makeAnimSkeleton(const FBXGeometry& fbxGeometry) {
|
||||||
if (!_animSkeleton) {
|
if (!_animSkeleton) {
|
||||||
_animSkeleton = std::make_shared<AnimSkeleton>(fbxGeometry);
|
_animSkeleton = std::make_shared<AnimSkeleton>(fbxGeometry);
|
||||||
|
computeEyesInRootFrame(_animSkeleton->getRelativeBindPoses());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -214,6 +214,8 @@ public:
|
||||||
|
|
||||||
bool getModelOffset(glm::vec3& modelOffsetOut) const;
|
bool getModelOffset(glm::vec3& modelOffsetOut) const;
|
||||||
|
|
||||||
|
const glm::vec3& getEyesInRootFrame() const { return _eyesInRootFrame; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void updateAnimationStateHandlers();
|
void updateAnimationStateHandlers();
|
||||||
|
|
||||||
|
@ -222,6 +224,8 @@ public:
|
||||||
void updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm::quat& modelRotation, const glm::quat& worldHeadOrientation, const glm::vec3& lookAt, const glm::vec3& saccade);
|
void updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm::quat& modelRotation, const glm::quat& worldHeadOrientation, const glm::vec3& lookAt, const glm::vec3& saccade);
|
||||||
void calcAnimAlpha(float speed, const std::vector<float>& referenceSpeeds, float* alphaOut) const;
|
void calcAnimAlpha(float speed, const std::vector<float>& referenceSpeeds, float* alphaOut) const;
|
||||||
|
|
||||||
|
void computeEyesInRootFrame(const AnimPoseVec& poses);
|
||||||
|
|
||||||
QVector<JointState> _jointStates;
|
QVector<JointState> _jointStates;
|
||||||
int _rootJointIndex = -1;
|
int _rootJointIndex = -1;
|
||||||
|
|
||||||
|
@ -241,6 +245,7 @@ public:
|
||||||
glm::vec3 _lastFront;
|
glm::vec3 _lastFront;
|
||||||
glm::vec3 _lastPosition;
|
glm::vec3 _lastPosition;
|
||||||
glm::vec3 _lastVelocity;
|
glm::vec3 _lastVelocity;
|
||||||
|
glm::vec3 _eyesInRootFrame { Vectors::ZERO };
|
||||||
|
|
||||||
std::shared_ptr<AnimNode> _animNode;
|
std::shared_ptr<AnimNode> _animNode;
|
||||||
std::shared_ptr<AnimSkeleton> _animSkeleton;
|
std::shared_ptr<AnimSkeleton> _animSkeleton;
|
||||||
|
|
Loading…
Reference in a new issue