Sensor space entity support

You can do this by parenting an entity to an avatar's -2 joint index.
This will mean that the entity will follow the avatar as it moves in the world, but
will not follow the avatar's position as it moves in sensor space.  Essentially, this
gives you the ability to place objects in the user's physical room.

WebTablets now are located in this feature and no longer jitter.
This commit is contained in:
Anthony J. Thibault 2016-08-18 17:04:56 -07:00
parent e7dd9c4478
commit c6ea64926c
8 changed files with 81 additions and 86 deletions

View file

@ -59,6 +59,8 @@ const float DISPLAYNAME_ALPHA = 1.0f;
const float DISPLAYNAME_BACKGROUND_ALPHA = 0.4f;
const glm::vec3 HAND_TO_PALM_OFFSET(0.0f, 0.12f, 0.08f);
const int SENSOR_TO_WORLD_MATRIX_INDEX = 65534;
namespace render {
template <> const ItemKey payloadGetKey(const AvatarSharedPointer& avatar) {
return ItemKey::Builder::opaqueShape();
@ -851,15 +853,33 @@ glm::vec3 Avatar::getDefaultJointTranslation(int index) const {
}
glm::quat Avatar::getAbsoluteJointRotationInObjectFrame(int index) const {
glm::quat rotation;
_skeletonModel->getAbsoluteJointRotationInRigFrame(index, rotation);
return Quaternions::Y_180 * rotation;
if (index == SENSOR_TO_WORLD_MATRIX_INDEX) {
glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix();
bool success;
Transform avatarTransform;
Transform::mult(avatarTransform, getParentTransform(success), getLocalTransform());
glm::mat4 invAvatarMat = avatarTransform.getInverseMatrix();
return glmExtractRotation(invAvatarMat * sensorToWorldMatrix);
} else {
glm::quat rotation;
_skeletonModel->getAbsoluteJointRotationInRigFrame(index, rotation);
return Quaternions::Y_180 * rotation;
}
}
glm::vec3 Avatar::getAbsoluteJointTranslationInObjectFrame(int index) const {
glm::vec3 translation;
_skeletonModel->getAbsoluteJointTranslationInRigFrame(index, translation);
return Quaternions::Y_180 * translation;
if (index == SENSOR_TO_WORLD_MATRIX_INDEX) {
glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix();
bool success;
Transform avatarTransform;
Transform::mult(avatarTransform, getParentTransform(success), getLocalTransform());
glm::mat4 invAvatarMat = avatarTransform.getInverseMatrix();
return extractTranslation(invAvatarMat * sensorToWorldMatrix);
} else {
glm::vec3 translation;
_skeletonModel->getAbsoluteJointTranslationInRigFrame(index, translation);
return Quaternions::Y_180 * translation;
}
}
int Avatar::getJointIndex(const QString& name) const {

View file

@ -107,7 +107,6 @@ MyAvatar::MyAvatar(RigPointer rig) :
_hmdSensorOrientation(),
_hmdSensorPosition(),
_bodySensorMatrix(),
_sensorToWorldMatrix(),
_goToPending(false),
_goToPosition(),
_goToOrientation(),
@ -511,13 +510,9 @@ void MyAvatar::simulate(float deltaTime) {
updateAvatarEntities();
}
// thread-safe
glm::mat4 MyAvatar::getSensorToWorldMatrix() const {
return _sensorToWorldMatrixCache.get();
}
// As far as I know no HMD system supports a play area of a kilometer in radius.
// As far as I know no HMD system supports a play area of a kilometer in radius.
static const float MAX_HMD_ORIGIN_DISTANCE = 1000.0f;
// Pass a recent sample of the HMD to the avatar.
// This can also update the avatar's position to follow the HMD
// as it moves through the world.
@ -526,7 +521,7 @@ void MyAvatar::updateFromHMDSensorMatrix(const glm::mat4& hmdSensorMatrix) {
_hmdSensorMatrix = hmdSensorMatrix;
auto newHmdSensorPosition = extractTranslation(hmdSensorMatrix);
if (newHmdSensorPosition != _hmdSensorPosition &&
if (newHmdSensorPosition != _hmdSensorPosition &&
glm::length(newHmdSensorPosition) > MAX_HMD_ORIGIN_DISTANCE) {
qWarning() << "Invalid HMD sensor position " << newHmdSensorPosition;
// Ignore unreasonable HMD sensor data

View file

@ -79,8 +79,6 @@ class MyAvatar : public Avatar {
Q_PROPERTY(controller::Pose leftHandTipPose READ getLeftHandTipPose)
Q_PROPERTY(controller::Pose rightHandTipPose READ getRightHandTipPose)
Q_PROPERTY(glm::mat4 sensorToWorldMatrix READ getSensorToWorldMatrix)
Q_PROPERTY(float energy READ getEnergy WRITE setEnergy)
Q_PROPERTY(bool hmdLeanRecenterEnabled READ getHMDLeanRecenterEnabled WRITE setHMDLeanRecenterEnabled)
@ -110,9 +108,6 @@ public:
const glm::quat& getHMDSensorOrientation() const { return _hmdSensorOrientation; }
const glm::vec2& getHMDSensorFacingMovingAverage() const { return _hmdSensorFacingMovingAverage; }
// thread safe
Q_INVOKABLE glm::mat4 getSensorToWorldMatrix() const;
Q_INVOKABLE void setOrientationVar(const QVariant& newOrientationVar);
Q_INVOKABLE QVariant getOrientationVar() const;
@ -415,6 +410,10 @@ private:
bool _useSnapTurn { true };
bool _clearOverlayWhenMoving { true };
// working copy of sensorToWorldMatrix.
// See AvatarData for thread-safe _sensorToWorldMatrixCache, used for outward facing access
glm::mat4 _sensorToWorldMatrix;
// cache of the current HMD sensor position and orientation
// in sensor space.
glm::mat4 _hmdSensorMatrix;
@ -427,10 +426,6 @@ private:
// in sensor space.
glm::mat4 _bodySensorMatrix;
// used to transform any sensor into world space, including the _hmdSensorMat, or hand controllers.
glm::mat4 _sensorToWorldMatrix;
ThreadSafeValueCache<glm::mat4> _sensorToWorldMatrixCache { glm::mat4() };
struct FollowHelper {
FollowHelper();

View file

@ -53,15 +53,18 @@ namespace AvatarDataPacket {
// NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure.
PACKED_BEGIN struct Header {
float position[3]; // skeletal model's position
float globalPosition[3]; // avatar's position
uint16_t localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the thing it's attached to
uint16_t scale; // (compressed) 'ratio' encoding uses sign bit as flag.
float lookAtPosition[3]; // world space position that eyes are focusing on.
float audioLoudness; // current loundess of microphone
float position[3]; // skeletal model's position
float globalPosition[3]; // avatar's position
uint16_t localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the thing it's attached to
uint16_t scale; // (compressed) 'ratio' encoding uses sign bit as flag.
float lookAtPosition[3]; // world space position that eyes are focusing on.
float audioLoudness; // current loundess of microphone
uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix
uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix
float sensorToWorldTrans[3]; // fourth column of sensor to world matrix
uint8_t flags;
} PACKED_END;
const size_t HEADER_SIZE = 49;
const size_t HEADER_SIZE = 69;
// only present if HAS_REFERENTIAL flag is set in header.flags
PACKED_BEGIN struct ParentInfo {
@ -93,6 +96,9 @@ namespace AvatarDataPacket {
*/
}
static const int TRANSLATION_COMPRESSION_RADIX = 12;
static const int SENSOR_TO_WORLD_SCALE_RADIX = 10;
#define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0)
AvatarData::AvatarData() :
@ -210,6 +216,14 @@ QByteArray AvatarData::toByteArray(bool cullSmallChanges, bool sendAll) {
header->lookAtPosition[2] = _headData->_lookAtPosition.z;
header->audioLoudness = _headData->_audioLoudness;
glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix();
packOrientationQuatToSixBytes(header->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix));
glm::vec3 scale = extractScale(sensorToWorldMatrix);
packFloatScalarToSignedTwoByteFixed((uint8_t*)&header->sensorToWorldScale, scale.x, SENSOR_TO_WORLD_SCALE_RADIX);
header->sensorToWorldTrans[0] = sensorToWorldMatrix[0][3];
header->sensorToWorldTrans[1] = sensorToWorldMatrix[1][3];
header->sensorToWorldTrans[2] = sensorToWorldMatrix[2][3];
setSemiNibbleAt(header->flags, KEY_STATE_START_BIT, _keyState);
// hand state
bool isFingerPointing = _handState & IS_FINGER_POINTING_FLAG;
@ -346,8 +360,6 @@ QByteArray AvatarData::toByteArray(bool cullSmallChanges, bool sendAll) {
*destinationBuffer++ = validity;
}
const int TRANSLATION_COMPRESSION_RADIX = 12;
validityBit = 0;
validity = *validityPosition++;
for (int i = 0; i < _jointData.size(); i ++) {
@ -500,6 +512,14 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
}
_headData->_audioLoudness = audioLoudness;
glm::quat sensorToWorldQuat;
unpackOrientationQuatFromSixBytes(header->sensorToWorldQuat, sensorToWorldQuat);
float sensorToWorldScale;
unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&header->sensorToWorldScale, &sensorToWorldScale, SENSOR_TO_WORLD_SCALE_RADIX);
glm::vec3 sensorToWorldTrans(header->sensorToWorldTrans[0], header->sensorToWorldTrans[1], header->sensorToWorldTrans[2]);
glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans);
_sensorToWorldMatrixCache.set(sensorToWorldMatrix);
{ // bitFlags and face data
uint8_t bitItems = header->flags;
@ -616,7 +636,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
// each joint translation component is stored in 6 bytes.
const int COMPRESSED_TRANSLATION_SIZE = 6;
PACKET_READ_CHECK(JointTranslation, numValidJointTranslations * COMPRESSED_TRANSLATION_SIZE);
const int TRANSLATION_COMPRESSION_RADIX = 12;
for (int i = 0; i < numJoints; i++) {
JointData& data = _jointData[i];
@ -1718,6 +1737,11 @@ AvatarEntityIDs AvatarData::getAndClearRecentlyDetachedIDs() {
return result;
}
// thread-safe
glm::mat4 AvatarData::getSensorToWorldMatrix() const {
return _sensorToWorldMatrixCache.get();
}
QScriptValue RayToAvatarIntersectionResultToScriptValue(QScriptEngine* engine, const RayToAvatarIntersectionResult& value) {
QScriptValue obj = engine->newObject();
obj.setProperty("intersects", value.intersects);

View file

@ -54,6 +54,7 @@ typedef unsigned long long quint64;
#include <SpatiallyNestable.h>
#include <NumericalConstants.h>
#include <Packed.h>
#include <ThreadSafeValueCache.h>
#include "AABox.h"
#include "HeadData.h"
@ -171,6 +172,8 @@ class AvatarData : public QObject, public SpatiallyNestable {
Q_PROPERTY(QUuid sessionUUID READ getSessionUUID)
Q_PROPERTY(glm::mat4 sensorToWorldMatrix READ getSensorToWorldMatrix)
public:
static const QString FRAME_NAME;
@ -351,6 +354,9 @@ public:
void setAvatarEntityDataChanged(bool value) { _avatarEntityDataChanged = value; }
AvatarEntityIDs getAndClearRecentlyDetachedIDs();
// thread safe
Q_INVOKABLE glm::mat4 getSensorToWorldMatrix() const;
public slots:
void sendAvatarDataPacket();
void sendIdentityPacket();
@ -425,6 +431,9 @@ protected:
bool _avatarEntityDataLocallyEdited { false };
bool _avatarEntityDataChanged { false };
// used to transform any sensor into world space, including the _hmdSensorMat, or hand controllers.
ThreadSafeValueCache<glm::mat4> _sensorToWorldMatrixCache { glm::mat4() };
private:
friend void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar);
static QUrl _defaultFullAvatarModelUrl;

View file

@ -52,7 +52,7 @@ PacketVersion versionForPacketType(PacketType packetType) {
case PacketType::AvatarData:
case PacketType::BulkAvatarData:
case PacketType::KillAvatar:
return static_cast<PacketVersion>(AvatarMixerPacketVersion::AbsoluteSixByteRotations);
return static_cast<PacketVersion>(AvatarMixerPacketVersion::SensorToWorldMat);
case PacketType::ICEServerHeartbeat:
return 18; // ICE Server Heartbeat signing
case PacketType::AssetGetInfo:

View file

@ -192,7 +192,8 @@ enum class AvatarMixerPacketVersion : PacketVersion {
TranslationSupport = 17,
SoftAttachmentSupport,
AvatarEntities,
AbsoluteSixByteRotations
AbsoluteSixByteRotations,
SensorToWorldMat
};
enum class DomainConnectRequestVersion : PacketVersion {

View file

@ -8,8 +8,6 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
var NEGATIVE_ONE = 65535;
var RAD_TO_DEG = 180 / Math.PI;
var X_AXIS = {x: 1, y: 0, z: 0};
var Y_AXIS = {x: 0, y: 1, z: 0};
@ -61,7 +59,7 @@ WebTablet = function (url) {
}),
dimensions: {x: WIDTH, y: HEIGHT, z: DEPTH},
parentID: MyAvatar.sessionUUID,
parentJointIndex: NEGATIVE_ONE
parentJointIndex: -2
});
var WEB_ENTITY_REDUCTION_FACTOR = {x: 0.78, y: 0.85};
@ -82,61 +80,14 @@ WebTablet = function (url) {
shapeType: "box",
dpi: 45,
parentID: this.tabletEntityID,
parentJointIndex: NEGATIVE_ONE
parentJointIndex: -1
});
this.state = "idle";
// compute the room/sensor matrix of the entity.
var invRoomMat = Mat4.inverse(MyAvatar.sensorToWorldMatrix);
var entityWorldMat = Mat4.createFromRotAndTrans(tabletEntityRotation, tabletEntityPosition);
this.entityRoomMat = Mat4.multiply(invRoomMat, entityWorldMat);
var _this = this;
this.updateFunc = function (dt) {
_this.update(dt);
};
Script.update.connect(this.updateFunc);
};
WebTablet.prototype.destroy = function () {
Entities.deleteEntity(this.webEntityID);
Entities.deleteEntity(this.tabletEntityID);
Script.update.disconnect(this.updateFunc);
};
WebTablet.prototype.update = function (dt) {
var props = Entities.getEntityProperties(this.tabletEntityID, ["position", "rotation", "parentID", "parentJointIndex"]);
var entityWorldMat;
if (this.state === "idle") {
if (props.parentID !== MyAvatar.sessionUUID || props.parentJointIndex !== NEGATIVE_ONE) {
this.state = "held";
return;
}
// convert the sensor/room matrix of the entity into world space, using the current sensorToWorldMatrix
var roomMat = MyAvatar.sensorToWorldMatrix;
entityWorldMat = Mat4.multiply(roomMat, this.entityRoomMat);
// slam the world space position and orientation
Entities.editEntity(this.tabletEntityID, {
position: Mat4.extractTranslation(entityWorldMat),
rotation: Mat4.extractRotation(entityWorldMat)
});
} else if (this.state === "held") {
if (props.parentID === MyAvatar.sessionUUID && props.parentJointIndex === NEGATIVE_ONE) {
// re-compute the room/sensor matrix for the avatar now that it has been released.
var invRoomMat = Mat4.inverse(MyAvatar.sensorToWorldMatrix);
entityWorldMat = Mat4.createFromRotAndTrans(props.rotation, props.position);
this.entityRoomMat = Mat4.multiply(invRoomMat, entityWorldMat);
this.state = "idle";
return;
}
}
};