mirror of
https://github.com/lubosz/overte.git
synced 2025-04-23 20:54:25 +02:00
Merge pull request #5383 from ctrlaltdavid/20630
Improve looking-at-me indication
This commit is contained in:
commit
e24395a226
5 changed files with 72 additions and 25 deletions
|
@ -443,36 +443,57 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
_skeletonModel.renderBoundingCollisionShapes(*renderArgs->_batch, 0.7f);
|
||||
}
|
||||
|
||||
// Stack indicator spheres
|
||||
float indicatorOffset = 0.0f;
|
||||
if (!_displayName.isEmpty() && _displayNameAlpha != 0.0f) {
|
||||
const float DISPLAY_NAME_INDICATOR_OFFSET = 0.22f;
|
||||
indicatorOffset = DISPLAY_NAME_INDICATOR_OFFSET;
|
||||
}
|
||||
const float INDICATOR_RADIUS = 0.03f;
|
||||
const float INDICATOR_INDICATOR_OFFSET = 3.0f * INDICATOR_RADIUS;
|
||||
|
||||
// If this is the avatar being looked at, render a little ball above their head
|
||||
if (_isLookAtTarget && Menu::getInstance()->isOptionChecked(MenuOption::RenderFocusIndicator)) {
|
||||
const float INDICATOR_OFFSET = 0.22f;
|
||||
const float INDICATOR_RADIUS = 0.03f;
|
||||
const glm::vec4 LOOK_AT_INDICATOR_COLOR = { 0.8f, 0.0f, 0.0f, 0.75f };
|
||||
glm::vec3 position = glm::vec3(_position.x, getDisplayNamePosition().y + indicatorOffset, _position.z);
|
||||
glm::vec3 position = glm::vec3(_position.x, getDisplayNamePosition().y + INDICATOR_OFFSET, _position.z);
|
||||
Transform transform;
|
||||
transform.setTranslation(position);
|
||||
batch.setModelTransform(transform);
|
||||
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch, INDICATOR_RADIUS,
|
||||
15, 15, LOOK_AT_INDICATOR_COLOR);
|
||||
indicatorOffset += INDICATOR_INDICATOR_OFFSET;
|
||||
}
|
||||
|
||||
// If the avatar is looking at me, render an indication that they area
|
||||
if (getHead()->getIsLookingAtMe() && Menu::getInstance()->isOptionChecked(MenuOption::ShowWhosLookingAtMe)) {
|
||||
const glm::vec4 LOOKING_AT_ME_COLOR = { 0.8f, 0.65f, 0.0f, 0.1f };
|
||||
glm::vec3 position = glm::vec3(_position.x, getDisplayNamePosition().y + indicatorOffset, _position.z);
|
||||
Transform transform;
|
||||
transform.setTranslation(position);
|
||||
batch.setModelTransform(transform);
|
||||
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch, INDICATOR_RADIUS,
|
||||
15, 15, LOOKING_AT_ME_COLOR);
|
||||
// If the avatar is looking at me, indicate that they are
|
||||
if (getHead()->isLookingAtMe() && Menu::getInstance()->isOptionChecked(MenuOption::ShowWhosLookingAtMe)) {
|
||||
const glm::vec3 LOOKING_AT_ME_COLOR = { 1.0f, 1.0f, 1.0f };
|
||||
const float LOOKING_AT_ME_ALPHA_START = 0.8f;
|
||||
const float LOOKING_AT_ME_DURATION = 0.5f; // seconds
|
||||
quint64 now = usecTimestampNow();
|
||||
float alpha = LOOKING_AT_ME_ALPHA_START
|
||||
* (1.0f - ((float)(usecTimestampNow() - getHead()->getLookingAtMeStarted()))
|
||||
/ (LOOKING_AT_ME_DURATION * (float)USECS_PER_SECOND));
|
||||
if (alpha > 0.0f) {
|
||||
QSharedPointer<NetworkGeometry> geometry = getHead()->getFaceModel().getGeometry();
|
||||
if (geometry) {
|
||||
const float DEFAULT_EYE_DIAMETER = 0.048f; // Typical human eye
|
||||
const float RADIUS_INCREMENT = 0.005f;
|
||||
Transform transform;
|
||||
|
||||
glm::vec3 position = getHead()->getLeftEyePosition();
|
||||
transform.setTranslation(position);
|
||||
batch.setModelTransform(transform);
|
||||
float eyeDiameter = geometry->getFBXGeometry().leftEyeSize;
|
||||
if (eyeDiameter == 0.0f) {
|
||||
eyeDiameter = DEFAULT_EYE_DIAMETER;
|
||||
}
|
||||
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch,
|
||||
eyeDiameter * _scale / 2.0f + RADIUS_INCREMENT, 15, 15, glm::vec4(LOOKING_AT_ME_COLOR, alpha));
|
||||
|
||||
position = getHead()->getRightEyePosition();
|
||||
transform.setTranslation(position);
|
||||
batch.setModelTransform(transform);
|
||||
eyeDiameter = geometry->getFBXGeometry().rightEyeSize;
|
||||
if (eyeDiameter == 0.0f) {
|
||||
eyeDiameter = DEFAULT_EYE_DIAMETER;
|
||||
}
|
||||
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch,
|
||||
eyeDiameter * _scale / 2.0f + RADIUS_INCREMENT, 15, 15, glm::vec4(LOOKING_AT_ME_COLOR, alpha));
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// quick check before falling into the code below:
|
||||
|
|
|
@ -55,6 +55,8 @@ Head::Head(Avatar* owningAvatar) :
|
|||
_deltaLeanForward(0.0f),
|
||||
_isCameraMoving(false),
|
||||
_isLookingAtMe(false),
|
||||
_lookingAtMeStarted(0),
|
||||
_wasLastLookingAtMe(0),
|
||||
_faceModel(this),
|
||||
_leftEyeLookAtID(DependencyManager::get<GeometryCache>()->allocateID()),
|
||||
_rightEyeLookAtID(DependencyManager::get<GeometryCache>()->allocateID())
|
||||
|
@ -316,7 +318,7 @@ glm::quat Head::getFinalOrientationInLocalFrame() const {
|
|||
}
|
||||
|
||||
glm::vec3 Head::getCorrectedLookAtPosition() {
|
||||
if (_isLookingAtMe) {
|
||||
if (isLookingAtMe()) {
|
||||
return _correctedLookAtPosition;
|
||||
} else {
|
||||
return getLookAtPosition();
|
||||
|
@ -324,10 +326,21 @@ glm::vec3 Head::getCorrectedLookAtPosition() {
|
|||
}
|
||||
|
||||
void Head::setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition) {
|
||||
if (!isLookingAtMe()) {
|
||||
_lookingAtMeStarted = usecTimestampNow();
|
||||
}
|
||||
_isLookingAtMe = true;
|
||||
_wasLastLookingAtMe = usecTimestampNow();
|
||||
_correctedLookAtPosition = correctedLookAtPosition;
|
||||
}
|
||||
|
||||
bool Head::isLookingAtMe() {
|
||||
// Allow for outages such as may be encountered during avatar movement
|
||||
quint64 now = usecTimestampNow();
|
||||
const quint64 LOOKING_AT_ME_GAP_ALLOWED = 1000000; // microseconds
|
||||
return _isLookingAtMe || (now - _wasLastLookingAtMe) < LOOKING_AT_ME_GAP_ALLOWED;
|
||||
}
|
||||
|
||||
glm::quat Head::getCameraOrientation() const {
|
||||
// NOTE: Head::getCameraOrientation() is not used for orienting the camera "view" while in Oculus mode, so
|
||||
// you may wonder why this code is here. This method will be called while in Oculus mode to determine how
|
||||
|
|
|
@ -52,8 +52,9 @@ public:
|
|||
void setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition);
|
||||
glm::vec3 getCorrectedLookAtPosition();
|
||||
void clearCorrectedLookAtPosition() { _isLookingAtMe = false; }
|
||||
bool getIsLookingAtMe() { return _isLookingAtMe; }
|
||||
|
||||
bool isLookingAtMe();
|
||||
quint64 getLookingAtMeStarted() { return _lookingAtMeStarted; }
|
||||
|
||||
float getScale() const { return _scale; }
|
||||
glm::vec3 getPosition() const { return _position; }
|
||||
const glm::vec3& getEyePosition() const { return _eyePosition; }
|
||||
|
@ -139,6 +140,8 @@ private:
|
|||
|
||||
bool _isCameraMoving;
|
||||
bool _isLookingAtMe;
|
||||
quint64 _lookingAtMeStarted;
|
||||
quint64 _wasLastLookingAtMe;
|
||||
FaceModel _faceModel;
|
||||
|
||||
glm::vec3 _correctedLookAtPosition;
|
||||
|
|
|
@ -2616,10 +2616,17 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping,
|
|||
buildModelMesh(extracted);
|
||||
# endif
|
||||
|
||||
if (extracted.mesh.isEye) {
|
||||
if (maxJointIndex == geometry.leftEyeJointIndex) {
|
||||
geometry.leftEyeSize = extracted.mesh.meshExtents.largestDimension() * offsetScale;
|
||||
} else {
|
||||
geometry.rightEyeSize = extracted.mesh.meshExtents.largestDimension() * offsetScale;
|
||||
}
|
||||
}
|
||||
|
||||
geometry.meshes.append(extracted.mesh);
|
||||
int meshIndex = geometry.meshes.size() - 1;
|
||||
meshIDsToMeshIndices.insert(it.key(), meshIndex);
|
||||
|
||||
}
|
||||
|
||||
// now that all joints have been scanned, compute a collision shape for each joint
|
||||
|
|
|
@ -232,7 +232,10 @@ public:
|
|||
int rightHandJointIndex = -1;
|
||||
int leftToeJointIndex = -1;
|
||||
int rightToeJointIndex = -1;
|
||||
|
||||
|
||||
float leftEyeSize = 0.0f; // Maximum mesh extents dimension
|
||||
float rightEyeSize = 0.0f;
|
||||
|
||||
QVector<int> humanIKJointIndices;
|
||||
|
||||
glm::vec3 palmDirection;
|
||||
|
|
Loading…
Reference in a new issue