mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 03:24:00 +02:00
Merge branch 'master' into particleSorting
This commit is contained in:
commit
a722ad46f6
46 changed files with 1037 additions and 431 deletions
|
@ -291,7 +291,7 @@ function MyController(hand) {
|
|||
return this.triggerValue < TRIGGER_OFF_VALUE;
|
||||
};
|
||||
|
||||
this.triggerSqueezed = function() {
|
||||
this.triggerSqueezed = function() {
|
||||
var triggerValue = this.rawTriggerValue;
|
||||
return triggerValue > TRIGGER_ON_VALUE;
|
||||
};
|
||||
|
@ -352,14 +352,14 @@ function MyController(hand) {
|
|||
|
||||
var intersection = Entities.findRayIntersection(pickRayBacked, true);
|
||||
|
||||
if (intersection.intersects && intersection.properties.locked === 0) {
|
||||
if (intersection.intersects) {
|
||||
// the ray is intersecting something we can move.
|
||||
var intersectionDistance = Vec3.distance(pickRay.origin, intersection.intersection);
|
||||
this.grabbedEntity = intersection.entityID;
|
||||
|
||||
//this code will disabled the beam for the opposite hand of the one that grabbed it if the entity says so
|
||||
var grabbableData = getEntityCustomData(GRABBABLE_DATA_KEY, intersection.entityID, DEFAULT_GRABBABLE_DATA);
|
||||
if (grabbableData["turnOffOppositeBeam"] === true) {
|
||||
if (grabbableData["turnOffOppositeBeam"]) {
|
||||
if (this.hand === RIGHT_HAND) {
|
||||
disabledHand = LEFT_HAND;
|
||||
} else {
|
||||
|
@ -369,7 +369,7 @@ function MyController(hand) {
|
|||
disabledHand = 'none';
|
||||
}
|
||||
|
||||
if (grabbableData.grabbable === false) {
|
||||
if (typeof grabbableData.grabbable !== 'undefined' && !grabbableData.grabbable) {
|
||||
this.grabbedEntity = null;
|
||||
continue;
|
||||
}
|
||||
|
@ -379,10 +379,9 @@ function MyController(hand) {
|
|||
}
|
||||
if (intersectionDistance <= NEAR_PICK_MAX_DISTANCE) {
|
||||
// the hand is very close to the intersected object. go into close-grabbing mode.
|
||||
var grabbableData = getEntityCustomData(GRABBABLE_DATA_KEY, this.grabbedEntity, DEFAULT_GRABBABLE_DATA);
|
||||
if (grabbableData.wantsTrigger) {
|
||||
this.setState(STATE_NEAR_GRABBING_NON_COLLIDING);
|
||||
} else {
|
||||
} else if (!intersection.properties.locked) {
|
||||
this.setState(STATE_NEAR_GRABBING);
|
||||
}
|
||||
} else {
|
||||
|
@ -391,7 +390,8 @@ function MyController(hand) {
|
|||
this.grabbedEntity = null;
|
||||
} else {
|
||||
// the hand is far from the intersected object. go into distance-holding mode
|
||||
if (intersection.properties.collisionsWillMove === 1) {
|
||||
if (intersection.properties.collisionsWillMove
|
||||
&& !intersection.properties.locked) {
|
||||
this.setState(STATE_DISTANCE_HOLDING);
|
||||
} else {
|
||||
this.setState(STATE_FAR_GRABBING_NON_COLLIDING);
|
||||
|
@ -409,7 +409,7 @@ function MyController(hand) {
|
|||
for (i = 0; i < nearbyEntities.length; i++) {
|
||||
var grabbableDataForCandidate =
|
||||
getEntityCustomData(GRABBABLE_DATA_KEY, nearbyEntities[i], DEFAULT_GRABBABLE_DATA);
|
||||
if (grabbableDataForCandidate.grabbable === false) {
|
||||
if (!grabbableDataForCandidate.grabbable) {
|
||||
continue;
|
||||
}
|
||||
var propsForCandidate =
|
||||
|
@ -427,7 +427,7 @@ function MyController(hand) {
|
|||
}
|
||||
if (grabbableData.wantsTrigger) {
|
||||
this.setState(STATE_NEAR_GRABBING_NON_COLLIDING);
|
||||
} else if (props.locked === 0) {
|
||||
} else if (!props.locked) {
|
||||
this.setState(STATE_NEAR_GRABBING);
|
||||
}
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ function MyController(hand) {
|
|||
this.currentObjectPosition = grabbedProperties.position;
|
||||
this.currentObjectRotation = grabbedProperties.rotation;
|
||||
this.currentObjectTime = now;
|
||||
this.handPreviousPosition = handControllerPosition;
|
||||
this.handRelativePreviousPosition = Vec3.subtract(handControllerPosition, MyAvatar.position);
|
||||
this.handPreviousRotation = handRotation;
|
||||
|
||||
this.actionID = NULL_ACTION_ID;
|
||||
|
@ -503,7 +503,7 @@ function MyController(hand) {
|
|||
|
||||
// How far did the avatar turn this timestep?
|
||||
// Note: The following code is too long because we need a Quat.quatBetween() function
|
||||
// that returns the minimum quaternion between two quaternions.
|
||||
// that returns the minimum quaternion between two quaternions.
|
||||
var currentOrientation = MyAvatar.orientation;
|
||||
if (Quat.dot(currentOrientation, this.currentAvatarOrientation) < 0.0) {
|
||||
var negativeCurrentOrientation = {
|
||||
|
@ -523,11 +523,10 @@ function MyController(hand) {
|
|||
this.currentAvatarOrientation = currentOrientation;
|
||||
|
||||
// how far did hand move this timestep?
|
||||
var handMoved = Vec3.subtract(handControllerPosition, this.handPreviousPosition);
|
||||
this.handPreviousPosition = handControllerPosition;
|
||||
var handMoved = Vec3.subtract(handToAvatar, this.handRelativePreviousPosition);
|
||||
this.handRelativePreviousPosition = handToAvatar;
|
||||
|
||||
// magnify the hand movement but not the change from avatar movement & rotation
|
||||
handMoved = Vec3.subtract(handMoved, avatarDeltaPosition);
|
||||
handMoved = Vec3.subtract(handMoved, handMovementFromTurning);
|
||||
var superHandMoved = Vec3.multiply(handMoved, radius);
|
||||
|
||||
|
@ -570,7 +569,7 @@ function MyController(hand) {
|
|||
var grabbableData = getEntityCustomData(GRABBABLE_DATA_KEY, this.grabbedEntity, DEFAULT_GRABBABLE_DATA);
|
||||
|
||||
var turnOffOtherHand = grabbableData["turnOffOtherHand"];
|
||||
if (turnOffOtherHand === true) {
|
||||
if (turnOffOtherHand) {
|
||||
//don't activate the second hand grab because the script is handling the second hand logic
|
||||
return;
|
||||
}
|
||||
|
@ -783,11 +782,11 @@ function MyController(hand) {
|
|||
// we haven't been touched before, but either right or left is touching us now
|
||||
_this.allTouchedIDs[id] = true;
|
||||
_this.startTouch(id);
|
||||
} else if ((leftIsTouching || rightIsTouching) && _this.allTouchedIDs[id] === true) {
|
||||
} else if ((leftIsTouching || rightIsTouching) && _this.allTouchedIDs[id]) {
|
||||
// we have been touched before and are still being touched
|
||||
// continue touch
|
||||
_this.continueTouch(id);
|
||||
} else if (_this.allTouchedIDs[id] === true) {
|
||||
} else if (_this.allTouchedIDs[id]) {
|
||||
delete _this.allTouchedIDs[id];
|
||||
_this.stopTouch(id);
|
||||
|
||||
|
|
10
examples/controllers/rightClickExample.js
Normal file
10
examples/controllers/rightClickExample.js
Normal file
|
@ -0,0 +1,10 @@
|
|||
var MAPPING_NAME = "com.highfidelity.rightClickExample";
|
||||
var mapping = Controller.newMapping(MAPPING_NAME);
|
||||
mapping.from(Controller.Hardware.Keyboard.RightMouseClicked).to(function (value) {
|
||||
print("Keyboard.RightMouseClicked");
|
||||
});
|
||||
Controller.enableMapping(MAPPING_NAME);
|
||||
|
||||
Script.scriptEnding.connect(function () {
|
||||
Controller.disableMapping(MAPPING_NAME);
|
||||
});
|
|
@ -1,68 +0,0 @@
|
|||
// createBoxes.js
|
||||
// part of bubblewand
|
||||
//
|
||||
// Created by James B. Pollack @imgntn -- 09/07/2015
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Loads a wand model and attaches the bubble wand behavior.
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
|
||||
|
||||
|
||||
Script.include("https://raw.githubusercontent.com/highfidelity/hifi/master/examples/utilities.js");
|
||||
Script.include("https://raw.githubusercontent.com/highfidelity/hifi/master/examples/libraries/utils.js");
|
||||
|
||||
var bubbleModel = 'http://hifi-public.s3.amazonaws.com/james/bubblewand/models/bubble/bubble.fbx?' + randInt(0, 10000);;
|
||||
//var scriptURL'http://hifi-public.s3.amazonaws.com/james/bubblewand/scripts/wand.js?'+randInt(0,10000);
|
||||
|
||||
//for local testing
|
||||
//var scriptURL = "http://localhost:8080/scripts/setRecurringTimeout.js?" + randInt(0, 10000);
|
||||
|
||||
|
||||
var scriptURL='http://hifi-public.s3.amazonaws.com/james/debug/timeouts/setRecurringTimeout.js?'+ randInt(0, 10000);
|
||||
//create the wand in front of the avatar
|
||||
|
||||
var boxes=[];
|
||||
var TEST_ENTITY_NAME = "TimerScript";
|
||||
|
||||
|
||||
var TOTAL_ENTITIES = 100;
|
||||
for (var i = 0; i < TOTAL_ENTITIES; i++) {
|
||||
var box = Entities.addEntity({
|
||||
type: "Box",
|
||||
name: TEST_ENTITY_NAME,
|
||||
position: {
|
||||
x: randInt(0, 100) - 50 + MyAvatar.position.x,
|
||||
y: randInt(0, 100) - 50 + MyAvatar.position.x,
|
||||
z: randInt(0, 100) - 50 + MyAvatar.position.x,
|
||||
},
|
||||
dimensions: {
|
||||
x: 1,
|
||||
y: 1,
|
||||
z: 1,
|
||||
},
|
||||
color: {
|
||||
red: 255,
|
||||
green: 0,
|
||||
blue: 0,
|
||||
},
|
||||
//must be enabled to be grabbable in the physics engine
|
||||
collisionsWillMove: true,
|
||||
shapeType: 'box',
|
||||
lifetime:60,
|
||||
script: scriptURL
|
||||
});
|
||||
boxes.push(box)
|
||||
}
|
||||
|
||||
|
||||
function cleanup() {
|
||||
while (boxes.length > 0) {
|
||||
Entities.deleteEntity(boxes.pop());
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Script.scriptEnding.connect(cleanup);
|
|
@ -2,7 +2,7 @@ set(TARGET_NAME interface)
|
|||
project(${TARGET_NAME})
|
||||
|
||||
# set a default root dir for each of our optional externals if it was not passed
|
||||
set(OPTIONAL_EXTERNALS "LeapMotion" "RtMidi" "RSSDK" "iViewHMD")
|
||||
set(OPTIONAL_EXTERNALS "LeapMotion" "RtMidi" "RSSDK")
|
||||
|
||||
if(WIN32)
|
||||
list(APPEND OPTIONAL_EXTERNALS "3DConnexionClient")
|
||||
|
|
|
@ -13,17 +13,12 @@
|
|||
{ "from": "Hydra.RB", "to": "Standard.RB" },
|
||||
{ "from": "Hydra.RS", "to": "Standard.RS" },
|
||||
|
||||
{ "from": "Hydra.L0", "to": "Standard.Back" },
|
||||
{ "from": "Hydra.L1", "to": "Standard.DL" },
|
||||
{ "from": "Hydra.L2", "to": "Standard.DD" },
|
||||
{ "from": "Hydra.L3", "to": "Standard.DR" },
|
||||
{ "from": "Hydra.L4", "to": "Standard.DU" },
|
||||
{ "from": [ "Hydra.L3", "Hydra.L4" ], "to": "Standard.LeftPrimaryThumb" },
|
||||
{ "from": [ "Hydra.L1", "Hydra.L2" ], "to": "Standard.LeftSecondaryThumb" },
|
||||
|
||||
{ "from": [ "Hydra.R3", "Hydra.R4" ], "to": "Standard.RightPrimaryThumb" },
|
||||
{ "from": [ "Hydra.R1", "Hydra.R2" ], "to": "Standard.RightSecondaryThumb" },
|
||||
|
||||
{ "from": "Hydra.R0", "to": "Standard.Start" },
|
||||
{ "from": "Hydra.R1", "to": "Standard.X" },
|
||||
{ "from": "Hydra.R2", "to": "Standard.A" },
|
||||
{ "from": "Hydra.R3", "to": "Standard.B" },
|
||||
{ "from": "Hydra.R4", "to": "Standard.Y" },
|
||||
|
||||
{ "from": "Hydra.LeftHand", "to": "Standard.LeftHand" },
|
||||
{ "from": "Hydra.RightHand", "to": "Standard.RightHand" }
|
||||
|
|
|
@ -2,9 +2,8 @@
|
|||
"name": "Standard to Action",
|
||||
"channels": [
|
||||
{ "from": "Standard.LY", "to": "Actions.TranslateZ" },
|
||||
{ "from": "Standard.LX", "to": "Actions.TranslateX" },
|
||||
|
||||
{ "from": "Standard.RX",
|
||||
{ "from": "Standard.LX",
|
||||
"when": [ "Application.InHMD", "Application.ComfortMode" ],
|
||||
"to": "Actions.StepYaw",
|
||||
"filters":
|
||||
|
@ -14,8 +13,9 @@
|
|||
]
|
||||
},
|
||||
|
||||
{ "from": "Standard.LX", "to": "Actions.Yaw" },
|
||||
|
||||
{ "from": "Standard.RX", "to": "Actions.Yaw" },
|
||||
{ "from": "Standard.RX", "to": "Actions.TranslateX" },
|
||||
{ "from": "Standard.RY", "filters": "invert", "to": "Actions.TranslateY" },
|
||||
|
||||
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
{
|
||||
"name": "Vive to Standard",
|
||||
"channels": [
|
||||
{ "from": "Vive.LY", "filters": [ "invert", { "type": "deadZone", "min": 0.7 } ], "to": "Standard.LY" },
|
||||
{ "from": "Vive.LX", "filters": { "type": "deadZone", "min": 0.7 }, "to": "Standard.LX" },
|
||||
{ "from": "Vive.LY", "when": "Vive.LS", "filters": "invert", "to": "Standard.LY" },
|
||||
{ "from": "Vive.LX", "when": "Vive.LS", "to": "Standard.LX" },
|
||||
|
||||
{ "from": "Vive.LT", "to": "Standard.LT" },
|
||||
{ "from": "Vive.LB", "to": "Standard.LB" },
|
||||
{ "from": "Vive.LS", "to": "Standard.LS" },
|
||||
|
||||
{ "from": "Vive.RY", "filters": "invert", "to": "Standard.RY" },
|
||||
{ "from": "Vive.RX", "to": "Standard.RX" },
|
||||
{ "from": "Vive.RY", "when": "Vive.RS", "filters": "invert", "to": "Standard.RY" },
|
||||
{ "from": "Vive.RX", "when": "Vive.RS", "to": "Standard.RX" },
|
||||
|
||||
{ "from": "Vive.RT", "to": "Standard.RT" },
|
||||
{ "from": "Vive.RB", "to": "Standard.RB" },
|
||||
|
|
|
@ -523,29 +523,89 @@
|
|||
},
|
||||
{
|
||||
"id": "walkFwd",
|
||||
"type": "clip",
|
||||
"type": "blendLinearMove",
|
||||
"data": {
|
||||
"url": "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/walk_fwd.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 35.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true,
|
||||
"timeScaleVar": "walkTimeScale"
|
||||
"alpha": 0.0,
|
||||
"desiredSpeed": 1.4,
|
||||
"characteristicSpeeds": [0.5, 1.4, 4.5],
|
||||
"alphaVar": "moveForwardAlpha",
|
||||
"desiredSpeedVar": "moveForwardSpeed"
|
||||
},
|
||||
"children": []
|
||||
"children": [
|
||||
{
|
||||
"id": "walkFwdShort",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/walk_short_fwd.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 39.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "walkFwdNormal",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/walk_fwd.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 35.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "walkFwdRun",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/run_fwd.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 21.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "walkBwd",
|
||||
"type": "clip",
|
||||
"type": "blendLinearMove",
|
||||
"data": {
|
||||
"url": "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/walk_bwd.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 37.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true,
|
||||
"timeScaleVar": "walkTimeScale"
|
||||
"alpha": 0.0,
|
||||
"desiredSpeed": 1.4,
|
||||
"characteristicSpeeds": [0.6, 1.45],
|
||||
"alphaVar": "moveBackwardAlpha",
|
||||
"desiredSpeedVar": "moveBackwardSpeed"
|
||||
},
|
||||
"children": []
|
||||
"children": [
|
||||
{
|
||||
"id": "walkBwdShort",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/walk_short_bwd.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 38.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "walkBwdNormal",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/walk_bwd.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 36.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "turnLeft",
|
||||
|
@ -573,27 +633,77 @@
|
|||
},
|
||||
{
|
||||
"id": "strafeLeft",
|
||||
"type": "clip",
|
||||
"type": "blendLinearMove",
|
||||
"data": {
|
||||
"url": "http://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/side_step_left.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 31.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
"alpha": 0.0,
|
||||
"desiredSpeed": 1.4,
|
||||
"characteristicSpeeds": [0.2, 0.65],
|
||||
"alphaVar": "moveLateralAlpha",
|
||||
"desiredSpeedVar": "moveLateralSpeed"
|
||||
},
|
||||
"children": []
|
||||
"children": [
|
||||
{
|
||||
"id": "strafeLeftShort",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/side_step_short_left.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 28.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "strafeLeftNormal",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "http://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/side_step_left.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 30.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "strafeRight",
|
||||
"type": "clip",
|
||||
"type": "blendLinearMove",
|
||||
"data": {
|
||||
"url": "http://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/side_step_right.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 31.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
"alpha": 0.0,
|
||||
"desiredSpeed": 1.4,
|
||||
"characteristicSpeeds": [0.2, 0.65],
|
||||
"alphaVar": "moveLateralAlpha",
|
||||
"desiredSpeedVar": "moveLateralSpeed"
|
||||
},
|
||||
"children": []
|
||||
"children": [
|
||||
{
|
||||
"id": "strafeRightShort",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "http://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/side_step_short_right.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 28.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "strafeRightNormal",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "http://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/side_step_right.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 30.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@ namespace render {
|
|||
avatarPtr->setDisplayingLookatTarget(renderLookAtTarget);
|
||||
|
||||
if (avatarPtr->isInitialized() && args) {
|
||||
PROFILE_RANGE_BATCH(*args->_batch, "renderAvatarPayload");
|
||||
avatarPtr->render(args, qApp->getCamera()->getPosition());
|
||||
}
|
||||
}
|
||||
|
@ -334,6 +335,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
}
|
||||
|
||||
auto& batch = *renderArgs->_batch;
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__);
|
||||
|
||||
if (glm::distance(DependencyManager::get<AvatarManager>()->getMyAvatar()->getPosition(), _position) < 10.0f) {
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
|
@ -360,6 +362,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
}
|
||||
|
||||
if (havePosition && haveRotation) {
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":leftHandPointer");
|
||||
Transform pointerTransform;
|
||||
pointerTransform.setTranslation(position);
|
||||
pointerTransform.setRotation(rotation);
|
||||
|
@ -383,6 +386,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
}
|
||||
|
||||
if (havePosition && haveRotation) {
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":rightHandPointer");
|
||||
Transform pointerTransform;
|
||||
pointerTransform.setTranslation(position);
|
||||
pointerTransform.setRotation(rotation);
|
||||
|
@ -455,6 +459,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
|
||||
bool renderBounding = Menu::getInstance()->isOptionChecked(MenuOption::RenderBoundingCollisionShapes);
|
||||
if (renderBounding && shouldRenderHead(renderArgs) && _skeletonModel.isRenderable()) {
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":skeletonBoundingCollisionShapes");
|
||||
_skeletonModel.renderBoundingCollisionShapes(*renderArgs->_batch, 0.7f);
|
||||
}
|
||||
|
||||
|
@ -464,6 +469,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
static const float INDICATOR_RADIUS = 0.03f;
|
||||
static const glm::vec4 LOOK_AT_INDICATOR_COLOR = { 0.8f, 0.0f, 0.0f, 0.75f };
|
||||
glm::vec3 position = glm::vec3(_position.x, getDisplayNamePosition().y + INDICATOR_OFFSET, _position.z);
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":renderFocusIndicator");
|
||||
Transform transform;
|
||||
transform.setTranslation(position);
|
||||
transform.postScale(INDICATOR_RADIUS);
|
||||
|
@ -472,6 +478,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
|
||||
// If the avatar is looking at me, indicate that they are
|
||||
if (getHead()->isLookingAtMe() && Menu::getInstance()->isOptionChecked(MenuOption::ShowWhosLookingAtMe)) {
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":renderLookingAtMe");
|
||||
const glm::vec3 LOOKING_AT_ME_COLOR = { 1.0f, 1.0f, 1.0f };
|
||||
const float LOOKING_AT_ME_ALPHA_START = 0.8f;
|
||||
const float LOOKING_AT_ME_DURATION = 0.5f; // seconds
|
||||
|
@ -517,6 +524,7 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
|
|||
const float MIN_VOICE_SPHERE_DISTANCE = 12.0f;
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::BlueSpeechSphere)
|
||||
&& distanceToTarget > MIN_VOICE_SPHERE_DISTANCE) {
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":renderVoiceSphere");
|
||||
|
||||
// render voice intensity sphere for avatars that are farther away
|
||||
const float MAX_SPHERE_ANGLE = 10.0f * RADIANS_PER_DEGREE;
|
||||
|
@ -653,6 +661,9 @@ void Avatar::updateJointMappings() {
|
|||
}
|
||||
|
||||
void Avatar::renderBillboard(RenderArgs* renderArgs) {
|
||||
// FIXME disabling the billboard because it doesn't appear to work reliably
|
||||
// the billboard is ending up with a random texture and position.
|
||||
return;
|
||||
if (_billboard.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
@ -684,6 +695,7 @@ void Avatar::renderBillboard(RenderArgs* renderArgs) {
|
|||
glm::vec2 texCoordBottomRight(1.0f, 1.0f);
|
||||
|
||||
gpu::Batch& batch = *renderArgs->_batch;
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__);
|
||||
batch.setResourceTexture(0, _billboardTexture->getGPUTexture());
|
||||
DependencyManager::get<DeferredLightingEffect>()->bindSimpleProgram(batch, true);
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight,
|
||||
|
@ -766,6 +778,8 @@ Transform Avatar::calculateDisplayNameTransform(const ViewFrustum& frustum, cons
|
|||
}
|
||||
|
||||
void Avatar::renderDisplayName(gpu::Batch& batch, const ViewFrustum& frustum, const glm::vec3& textPosition) const {
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__);
|
||||
|
||||
bool shouldShowReceiveStats = DependencyManager::get<AvatarManager>()->shouldShowReceiveStats() && !isMyAvatar();
|
||||
|
||||
// If we have nothing to draw, or it's totally transparent, or it's too close or behind the camera, return
|
||||
|
@ -816,17 +830,24 @@ void Avatar::renderDisplayName(gpu::Batch& batch, const ViewFrustum& frustum, co
|
|||
// Test on extent above insures abs(height) > 0.0f
|
||||
textTransform.postScale(1.0f / height);
|
||||
batch.setModelTransform(textTransform);
|
||||
|
||||
DependencyManager::get<DeferredLightingEffect>()->bindSimpleProgram(batch, false, true, true, true);
|
||||
DependencyManager::get<GeometryCache>()->renderBevelCornersRect(batch, left, bottom, width, height,
|
||||
bevelDistance, backgroundColor);
|
||||
|
||||
{
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":renderBevelCornersRect");
|
||||
DependencyManager::get<DeferredLightingEffect>()->bindSimpleProgram(batch, false, true, true, true);
|
||||
DependencyManager::get<GeometryCache>()->renderBevelCornersRect(batch, left, bottom, width, height,
|
||||
bevelDistance, backgroundColor);
|
||||
}
|
||||
|
||||
// Render actual name
|
||||
QByteArray nameUTF8 = renderedDisplayName.toLocal8Bit();
|
||||
|
||||
// Render text slightly in front to avoid z-fighting
|
||||
textTransform.postTranslate(glm::vec3(0.0f, 0.0f, SLIGHTLY_IN_FRONT * renderer->getFontSize()));
|
||||
batch.setModelTransform(textTransform);
|
||||
renderer->draw(batch, text_x, -text_y, nameUTF8.data(), textColor);
|
||||
{
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__":renderText");
|
||||
renderer->draw(batch, text_x, -text_y, nameUTF8.data(), textColor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1089,6 +1110,7 @@ void Avatar::renderJointConnectingCone(gpu::Batch& batch, glm::vec3 position1, g
|
|||
points << p1a << p1b << p2a << p1b << p2a << p2b;
|
||||
}
|
||||
|
||||
PROFILE_RANGE_BATCH(batch, __FUNCTION__);
|
||||
// TODO: this is really inefficient constantly recreating these vertices buffers. It would be
|
||||
// better if the avatars cached these buffers for each of the joints they are rendering
|
||||
geometryCache->updateVertices(_jointConesID, points, color);
|
||||
|
|
|
@ -286,10 +286,10 @@ void AvatarManager::handleOutgoingChanges(const VectorOfMotionStates& motionStat
|
|||
|
||||
void AvatarManager::handleCollisionEvents(const CollisionEvents& collisionEvents) {
|
||||
for (Collision collision : collisionEvents) {
|
||||
// TODO: Current physics uses null idA or idB for non-entities. The plan is to handle MOTIONSTATE_TYPE_AVATAR,
|
||||
// and then MOTIONSTATE_TYPE_MYAVATAR. As it is, this code only covers the case of my avatar (in which case one
|
||||
// if the ids will be null), and the behavior for other avatars is not specified. This has to be fleshed
|
||||
// out as soon as we use the new motionstates.
|
||||
// TODO: The plan is to handle MOTIONSTATE_TYPE_AVATAR, and then MOTIONSTATE_TYPE_MYAVATAR. As it is, other
|
||||
// people's avatars will have an id that doesn't match any entities, and one's own avatar will have
|
||||
// an id of null. Thus this code handles any collision in which one of the participating objects is
|
||||
// my avatar. (Other user machines will make a similar analysis and inject sound for their collisions.)
|
||||
if (collision.idA.isNull() || collision.idB.isNull()) {
|
||||
MyAvatar* myAvatar = getMyAvatar();
|
||||
const QString& collisionSoundURL = myAvatar->getCollisionSoundURL();
|
||||
|
@ -299,9 +299,7 @@ void AvatarManager::handleCollisionEvents(const CollisionEvents& collisionEvents
|
|||
const bool isSound = (collision.type == CONTACT_EVENT_TYPE_START) && (velocityChange > MIN_AVATAR_COLLISION_ACCELERATION);
|
||||
|
||||
if (!isSound) {
|
||||
// TODO: When the new motion states are used, we'll probably break from the whole loop as soon as we hit our own avatar
|
||||
// (regardless of isSound), because other users should inject for their own avatars.
|
||||
continue;
|
||||
return; // No sense iterating for others. We only have one avatar.
|
||||
}
|
||||
// Your avatar sound is personal to you, so let's say the "mass" part of the kinetic energy is already accounted for.
|
||||
const float energy = velocityChange * velocityChange;
|
||||
|
@ -314,7 +312,7 @@ void AvatarManager::handleCollisionEvents(const CollisionEvents& collisionEvents
|
|||
|
||||
AudioInjector::playSound(collisionSoundURL, energyFactorOfFull, AVATAR_STRETCH_FACTOR, myAvatar->getPosition());
|
||||
myAvatar->collisionWithEntity(collision);
|
||||
}
|
||||
return; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
AvatarMotionState::AvatarMotionState(Avatar* avatar, btCollisionShape* shape) : ObjectMotionState(shape), _avatar(avatar) {
|
||||
assert(_avatar);
|
||||
_type = MOTIONSTATE_TYPE_AVATAR;
|
||||
if (_shape) {
|
||||
_mass = 100.0f; // HACK
|
||||
}
|
||||
|
|
|
@ -121,6 +121,8 @@ MyAvatar::MyAvatar(RigPointer rig) :
|
|||
connect(DependencyManager::get<AddressManager>().data(), &AddressManager::locationChangeRequired,
|
||||
this, &MyAvatar::goToLocation);
|
||||
_characterController.setEnabled(true);
|
||||
|
||||
_bodySensorMatrix = deriveBodyFromHMDSensor();
|
||||
}
|
||||
|
||||
MyAvatar::~MyAvatar() {
|
||||
|
@ -345,23 +347,6 @@ void MyAvatar::updateFromHMDSensorMatrix(const glm::mat4& hmdSensorMatrix) {
|
|||
}
|
||||
|
||||
void MyAvatar::updateHMDFollowVelocity() {
|
||||
bool isMoving;
|
||||
if (_lastIsMoving) {
|
||||
const float MOVE_EXIT_SPEED_THRESHOLD = 0.07f; // m/sec
|
||||
isMoving = glm::length(_velocity) >= MOVE_EXIT_SPEED_THRESHOLD;
|
||||
} else {
|
||||
const float MOVE_ENTER_SPEED_THRESHOLD = 0.2f; // m/sec
|
||||
isMoving = glm::length(_velocity) > MOVE_ENTER_SPEED_THRESHOLD;
|
||||
}
|
||||
|
||||
bool justStartedMoving = (_lastIsMoving != isMoving) && isMoving;
|
||||
_lastIsMoving = isMoving;
|
||||
|
||||
bool hmdIsAtRest = _hmdAtRestDetector.update(_hmdSensorPosition, _hmdSensorOrientation);
|
||||
if (hmdIsAtRest || justStartedMoving) {
|
||||
_isFollowingHMD = true;
|
||||
}
|
||||
|
||||
// compute offset to body's target position (in sensor-frame)
|
||||
auto sensorBodyMatrix = deriveBodyFromHMDSensor();
|
||||
_hmdFollowOffset = extractTranslation(sensorBodyMatrix) - extractTranslation(_bodySensorMatrix);
|
||||
|
@ -370,13 +355,29 @@ void MyAvatar::updateHMDFollowVelocity() {
|
|||
// don't pull the body DOWN to match the target (allow animation system to squat)
|
||||
truncatedOffset.y = 0.0f;
|
||||
}
|
||||
float truncatedOffsetDistance = glm::length(truncatedOffset);
|
||||
|
||||
bool isMoving;
|
||||
if (_lastIsMoving) {
|
||||
const float MOVE_EXIT_SPEED_THRESHOLD = 0.07f; // m/sec
|
||||
isMoving = glm::length(_velocity) >= MOVE_EXIT_SPEED_THRESHOLD;
|
||||
} else {
|
||||
const float MOVE_ENTER_SPEED_THRESHOLD = 0.2f; // m/sec
|
||||
isMoving = glm::length(_velocity) > MOVE_ENTER_SPEED_THRESHOLD;
|
||||
}
|
||||
bool justStartedMoving = (_lastIsMoving != isMoving) && isMoving;
|
||||
_lastIsMoving = isMoving;
|
||||
bool hmdIsAtRest = _hmdAtRestDetector.update(_hmdSensorPosition, _hmdSensorOrientation);
|
||||
const float MIN_HMD_HIP_SHIFT = 0.05f;
|
||||
if (justStartedMoving || (hmdIsAtRest && truncatedOffsetDistance > MIN_HMD_HIP_SHIFT)) {
|
||||
_isFollowingHMD = true;
|
||||
}
|
||||
|
||||
bool needNewFollowSpeed = (_isFollowingHMD && _hmdFollowSpeed == 0.0f);
|
||||
if (!needNewFollowSpeed) {
|
||||
// check to see if offset has exceeded its threshold
|
||||
float distance = glm::length(truncatedOffset);
|
||||
const float MAX_HMD_HIP_SHIFT = 0.2f;
|
||||
if (distance > MAX_HMD_HIP_SHIFT) {
|
||||
if (truncatedOffsetDistance > MAX_HMD_HIP_SHIFT) {
|
||||
_isFollowingHMD = true;
|
||||
needNewFollowSpeed = true;
|
||||
}
|
||||
|
|
|
@ -118,18 +118,6 @@ void EyeTracker::init() {
|
|||
qCWarning(interfaceapp) << "Eye Tracker: Already initialized";
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef HAVE_IVIEWHMD
|
||||
int result = smi_setCallback(eyeTrackerCallback);
|
||||
if (result != SMI_RET_SUCCESS) {
|
||||
qCWarning(interfaceapp) << "Eye Tracker: Error setting callback:" << smiReturnValueToString(result);
|
||||
QMessageBox::warning(nullptr, "Eye Tracker Error", smiReturnValueToString(result));
|
||||
} else {
|
||||
_isInitialized = true;
|
||||
}
|
||||
|
||||
connect(&_startStreamingWatcher, SIGNAL(finished()), this, SLOT(onStreamStarted()));
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef HAVE_IVIEWHMD
|
||||
|
@ -140,6 +128,10 @@ int EyeTracker::startStreaming(bool simulate) {
|
|||
|
||||
#ifdef HAVE_IVIEWHMD
|
||||
void EyeTracker::onStreamStarted() {
|
||||
if (!_isInitialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
int result = _startStreamingWatcher.result();
|
||||
_isStreaming = (result == SMI_RET_SUCCESS);
|
||||
|
||||
|
@ -171,6 +163,20 @@ void EyeTracker::onStreamStarted() {
|
|||
#endif
|
||||
|
||||
void EyeTracker::setEnabled(bool enabled, bool simulate) {
|
||||
if (enabled && !_isInitialized) {
|
||||
#ifdef HAVE_IVIEWHMD
|
||||
int result = smi_setCallback(eyeTrackerCallback);
|
||||
if (result != SMI_RET_SUCCESS) {
|
||||
qCWarning(interfaceapp) << "Eye Tracker: Error setting callback:" << smiReturnValueToString(result);
|
||||
QMessageBox::warning(nullptr, "Eye Tracker Error", smiReturnValueToString(result));
|
||||
} else {
|
||||
_isInitialized = true;
|
||||
}
|
||||
|
||||
connect(&_startStreamingWatcher, SIGNAL(finished()), this, SLOT(onStreamStarted()));
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!_isInitialized) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "GLMHelpers.h"
|
||||
#include "AnimationLogging.h"
|
||||
#include "AnimUtil.h"
|
||||
#include "AnimClip.h"
|
||||
|
||||
AnimBlendLinear::AnimBlendLinear(const QString& id, float alpha) :
|
||||
AnimNode(AnimNode::Type::BlendLinear, id),
|
||||
|
@ -34,24 +35,13 @@ const AnimPoseVec& AnimBlendLinear::evaluate(const AnimVariantMap& animVars, flo
|
|||
} else if (_children.size() == 1) {
|
||||
_poses = _children[0]->evaluate(animVars, dt, triggersOut);
|
||||
} else {
|
||||
|
||||
float clampedAlpha = glm::clamp(_alpha, 0.0f, (float)(_children.size() - 1));
|
||||
size_t prevPoseIndex = glm::floor(clampedAlpha);
|
||||
size_t nextPoseIndex = glm::ceil(clampedAlpha);
|
||||
float alpha = glm::fract(clampedAlpha);
|
||||
if (prevPoseIndex == nextPoseIndex) {
|
||||
// this can happen if alpha is on an integer boundary
|
||||
_poses = _children[prevPoseIndex]->evaluate(animVars, dt, triggersOut);
|
||||
} else {
|
||||
// need to eval and blend between two children.
|
||||
auto prevPoses = _children[prevPoseIndex]->evaluate(animVars, dt, triggersOut);
|
||||
auto nextPoses = _children[nextPoseIndex]->evaluate(animVars, dt, triggersOut);
|
||||
|
||||
if (prevPoses.size() > 0 && prevPoses.size() == nextPoses.size()) {
|
||||
_poses.resize(prevPoses.size());
|
||||
|
||||
::blend(_poses.size(), &prevPoses[0], &nextPoses[0], alpha, &_poses[0]);
|
||||
}
|
||||
}
|
||||
evaluateAndBlendChildren(animVars, triggersOut, alpha, prevPoseIndex, nextPoseIndex, dt);
|
||||
}
|
||||
return _poses;
|
||||
}
|
||||
|
@ -60,3 +50,21 @@ const AnimPoseVec& AnimBlendLinear::evaluate(const AnimVariantMap& animVars, flo
|
|||
const AnimPoseVec& AnimBlendLinear::getPosesInternal() const {
|
||||
return _poses;
|
||||
}
|
||||
|
||||
void AnimBlendLinear::evaluateAndBlendChildren(const AnimVariantMap& animVars, Triggers& triggersOut, float alpha,
|
||||
size_t prevPoseIndex, size_t nextPoseIndex, float dt) {
|
||||
if (prevPoseIndex == nextPoseIndex) {
|
||||
// this can happen if alpha is on an integer boundary
|
||||
_poses = _children[prevPoseIndex]->evaluate(animVars, dt, triggersOut);
|
||||
} else {
|
||||
// need to eval and blend between two children.
|
||||
auto prevPoses = _children[prevPoseIndex]->evaluate(animVars, dt, triggersOut);
|
||||
auto nextPoses = _children[nextPoseIndex]->evaluate(animVars, dt, triggersOut);
|
||||
|
||||
if (prevPoses.size() > 0 && prevPoses.size() == nextPoses.size()) {
|
||||
_poses.resize(prevPoses.size());
|
||||
|
||||
::blend(_poses.size(), &prevPoses[0], &nextPoses[0], alpha, &_poses[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,9 @@ protected:
|
|||
// for AnimDebugDraw rendering
|
||||
virtual const AnimPoseVec& getPosesInternal() const override;
|
||||
|
||||
void evaluateAndBlendChildren(const AnimVariantMap& animVars, Triggers& triggersOut, float alpha,
|
||||
size_t prevPoseIndex, size_t nextPoseIndex, float dt);
|
||||
|
||||
AnimPoseVec _poses;
|
||||
|
||||
float _alpha;
|
||||
|
|
126
libraries/animation/src/AnimBlendLinearMove.cpp
Normal file
126
libraries/animation/src/AnimBlendLinearMove.cpp
Normal file
|
@ -0,0 +1,126 @@
|
|||
//
|
||||
// AnimBlendLinearMove.cpp
|
||||
//
|
||||
// Created by Anthony J. Thibault on 10/22/15.
|
||||
// Copyright (c) 2015 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "AnimBlendLinearMove.h"
|
||||
#include <GLMHelpers.h>
|
||||
#include "AnimationLogging.h"
|
||||
#include "AnimUtil.h"
|
||||
#include "AnimClip.h"
|
||||
|
||||
AnimBlendLinearMove::AnimBlendLinearMove(const QString& id, float alpha, float desiredSpeed, const std::vector<float>& characteristicSpeeds) :
|
||||
AnimNode(AnimNode::Type::BlendLinearMove, id),
|
||||
_alpha(alpha),
|
||||
_desiredSpeed(desiredSpeed),
|
||||
_characteristicSpeeds(characteristicSpeeds) {
|
||||
|
||||
}
|
||||
|
||||
AnimBlendLinearMove::~AnimBlendLinearMove() {
|
||||
|
||||
}
|
||||
|
||||
const AnimPoseVec& AnimBlendLinearMove::evaluate(const AnimVariantMap& animVars, float dt, Triggers& triggersOut) {
|
||||
|
||||
assert(_children.size() == _characteristicSpeeds.size());
|
||||
|
||||
_alpha = animVars.lookup(_alphaVar, _alpha);
|
||||
_desiredSpeed = animVars.lookup(_desiredSpeedVar, _desiredSpeed);
|
||||
|
||||
if (_children.size() == 0) {
|
||||
for (auto&& pose : _poses) {
|
||||
pose = AnimPose::identity;
|
||||
}
|
||||
} else if (_children.size() == 1) {
|
||||
const float alpha = 0.0f;
|
||||
const int prevPoseIndex = 0;
|
||||
const int nextPoseIndex = 0;
|
||||
float prevDeltaTime, nextDeltaTime;
|
||||
setFrameAndPhase(dt, alpha, prevPoseIndex, nextPoseIndex, &prevDeltaTime, &nextDeltaTime, triggersOut);
|
||||
evaluateAndBlendChildren(animVars, triggersOut, alpha, prevPoseIndex, nextPoseIndex, prevDeltaTime, nextDeltaTime);
|
||||
} else {
|
||||
|
||||
float clampedAlpha = glm::clamp(_alpha, 0.0f, (float)(_children.size() - 1));
|
||||
size_t prevPoseIndex = glm::floor(clampedAlpha);
|
||||
size_t nextPoseIndex = glm::ceil(clampedAlpha);
|
||||
float alpha = glm::fract(clampedAlpha);
|
||||
float prevDeltaTime, nextDeltaTime;
|
||||
setFrameAndPhase(dt, alpha, prevPoseIndex, nextPoseIndex, &prevDeltaTime, &nextDeltaTime, triggersOut);
|
||||
evaluateAndBlendChildren(animVars, triggersOut, alpha, prevPoseIndex, nextPoseIndex, prevDeltaTime, nextDeltaTime);
|
||||
}
|
||||
return _poses;
|
||||
}
|
||||
|
||||
// for AnimDebugDraw rendering
|
||||
const AnimPoseVec& AnimBlendLinearMove::getPosesInternal() const {
|
||||
return _poses;
|
||||
}
|
||||
|
||||
void AnimBlendLinearMove::evaluateAndBlendChildren(const AnimVariantMap& animVars, Triggers& triggersOut, float alpha,
|
||||
size_t prevPoseIndex, size_t nextPoseIndex,
|
||||
float prevDeltaTime, float nextDeltaTime) {
|
||||
if (prevPoseIndex == nextPoseIndex) {
|
||||
// this can happen if alpha is on an integer boundary
|
||||
_poses = _children[prevPoseIndex]->evaluate(animVars, prevDeltaTime, triggersOut);
|
||||
} else {
|
||||
// need to eval and blend between two children.
|
||||
auto prevPoses = _children[prevPoseIndex]->evaluate(animVars, prevDeltaTime, triggersOut);
|
||||
auto nextPoses = _children[nextPoseIndex]->evaluate(animVars, nextDeltaTime, triggersOut);
|
||||
|
||||
if (prevPoses.size() > 0 && prevPoses.size() == nextPoses.size()) {
|
||||
_poses.resize(prevPoses.size());
|
||||
|
||||
::blend(_poses.size(), &prevPoses[0], &nextPoses[0], alpha, &_poses[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AnimBlendLinearMove::setFrameAndPhase(float dt, float alpha, int prevPoseIndex, int nextPoseIndex,
|
||||
float* prevDeltaTimeOut, float* nextDeltaTimeOut, Triggers& triggersOut) {
|
||||
|
||||
const float FRAMES_PER_SECOND = 30.0f;
|
||||
auto prevClipNode = std::dynamic_pointer_cast<AnimClip>(_children[prevPoseIndex]);
|
||||
assert(prevClipNode);
|
||||
auto nextClipNode = std::dynamic_pointer_cast<AnimClip>(_children[nextPoseIndex]);
|
||||
assert(nextClipNode);
|
||||
|
||||
float v0 = _characteristicSpeeds[prevPoseIndex];
|
||||
float n0 = (prevClipNode->getEndFrame() - prevClipNode->getStartFrame()) + 1.0f;
|
||||
float v1 = _characteristicSpeeds[nextPoseIndex];
|
||||
float n1 = (nextClipNode->getEndFrame() - nextClipNode->getStartFrame()) + 1.0f;
|
||||
|
||||
// rate of change in phase space, necessary to achive desired speed.
|
||||
float omega = (_desiredSpeed * FRAMES_PER_SECOND) / ((1.0f - alpha) * v0 * n0 + alpha * v1 * n1);
|
||||
|
||||
float f0 = prevClipNode->getStartFrame() + _phase * n0;
|
||||
prevClipNode->setCurrentFrame(f0);
|
||||
|
||||
float f1 = nextClipNode->getStartFrame() + _phase * n1;
|
||||
nextClipNode->setCurrentFrame(f1);
|
||||
|
||||
// integrate phase forward in time.
|
||||
_phase += omega * dt;
|
||||
|
||||
// detect loop trigger events
|
||||
if (_phase >= 1.0f) {
|
||||
triggersOut.push_back(_id + "Loop");
|
||||
_phase = glm::fract(_phase);
|
||||
}
|
||||
|
||||
*prevDeltaTimeOut = omega * dt * (n0 / FRAMES_PER_SECOND);
|
||||
*nextDeltaTimeOut = omega * dt * (n1 / FRAMES_PER_SECOND);
|
||||
}
|
||||
|
||||
void AnimBlendLinearMove::setCurrentFrameInternal(float frame) {
|
||||
assert(_children.size() > 0);
|
||||
auto clipNode = std::dynamic_pointer_cast<AnimClip>(_children.front());
|
||||
assert(clipNode);
|
||||
const float NUM_FRAMES = (clipNode->getEndFrame() - clipNode->getStartFrame()) + 1.0f;
|
||||
_phase = fmodf(frame, NUM_FRAMES);
|
||||
}
|
77
libraries/animation/src/AnimBlendLinearMove.h
Normal file
77
libraries/animation/src/AnimBlendLinearMove.h
Normal file
|
@ -0,0 +1,77 @@
|
|||
//
|
||||
// AnimBlendLinearMove.h
|
||||
//
|
||||
// Created by Anthony J. Thibault on 10/22/15.
|
||||
// Copyright (c) 2015 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AnimBlendLinearMove_h
|
||||
#define hifi_AnimBlendLinearMove_h
|
||||
|
||||
#include "AnimNode.h"
|
||||
|
||||
// Synced linear blend between two AnimNodes, where the playback speed of
|
||||
// the animation is timeScaled to match movement speed.
|
||||
//
|
||||
// Each child animation is associated with a chracteristic speed.
|
||||
// This defines the speed of that animation when played at the normal playback rate, 30 frames per second.
|
||||
//
|
||||
// The user also specifies a desired speed. This desired speed is used to timescale
|
||||
// the animation to achive the desired movement velocity.
|
||||
//
|
||||
// Blending is determined by the alpha parameter.
|
||||
// If the number of children is 2, then the alpha parameters should be between
|
||||
// 0 and 1. The first animation will have a (1 - alpha) factor, and the second
|
||||
// will have factor of alpha.
|
||||
//
|
||||
// This node supports more then 2 children. In this case the alpha should be
|
||||
// between 0 and n - 1. This alpha can be used to linearly interpolate between
|
||||
// the closest two children poses. This can be used to sweep through a series
|
||||
// of animation poses.
|
||||
|
||||
class AnimBlendLinearMove : public AnimNode {
|
||||
public:
|
||||
friend class AnimTests;
|
||||
|
||||
AnimBlendLinearMove(const QString& id, float alpha, float desiredSpeed, const std::vector<float>& characteristicSpeeds);
|
||||
virtual ~AnimBlendLinearMove() override;
|
||||
|
||||
virtual const AnimPoseVec& evaluate(const AnimVariantMap& animVars, float dt, Triggers& triggersOut) override;
|
||||
|
||||
void setAlphaVar(const QString& alphaVar) { _alphaVar = alphaVar; }
|
||||
void setDesiredSpeedVar(const QString& desiredSpeedVar) { _desiredSpeedVar = desiredSpeedVar; }
|
||||
|
||||
protected:
|
||||
// for AnimDebugDraw rendering
|
||||
virtual const AnimPoseVec& getPosesInternal() const override;
|
||||
|
||||
void evaluateAndBlendChildren(const AnimVariantMap& animVars, Triggers& triggersOut, float alpha,
|
||||
size_t prevPoseIndex, size_t nextPoseIndex,
|
||||
float prevDeltaTime, float nextDeltaTime);
|
||||
|
||||
void setFrameAndPhase(float dt, float alpha, int prevPoseIndex, int nextPoseIndex,
|
||||
float* prevDeltaTimeOut, float* nextDeltaTimeOut, Triggers& triggersOut);
|
||||
|
||||
virtual void setCurrentFrameInternal(float frame) override;
|
||||
|
||||
AnimPoseVec _poses;
|
||||
|
||||
float _alpha;
|
||||
float _desiredSpeed;
|
||||
|
||||
float _phase = 0.0f;
|
||||
|
||||
QString _alphaVar;
|
||||
QString _desiredSpeedVar;
|
||||
|
||||
std::vector<float> _characteristicSpeeds;
|
||||
|
||||
// no copies
|
||||
AnimBlendLinearMove(const AnimBlendLinearMove&) = delete;
|
||||
AnimBlendLinearMove& operator=(const AnimBlendLinearMove&) = delete;
|
||||
};
|
||||
|
||||
#endif // hifi_AnimBlendLinearMove_h
|
|
@ -35,7 +35,9 @@ const AnimPoseVec& AnimClip::evaluate(const AnimVariantMap& animVars, float dt,
|
|||
_endFrame = animVars.lookup(_endFrameVar, _endFrame);
|
||||
_timeScale = animVars.lookup(_timeScaleVar, _timeScale);
|
||||
_loopFlag = animVars.lookup(_loopFlagVar, _loopFlag);
|
||||
_frame = accumulateTime(animVars.lookup(_frameVar, _frame), dt, triggersOut);
|
||||
float frame = animVars.lookup(_frameVar, _frame);
|
||||
|
||||
_frame = ::accumulateTime(_startFrame, _endFrame, _timeScale, frame, dt, _loopFlag, _id, triggersOut);
|
||||
|
||||
// poll network anim to see if it's finished loading yet.
|
||||
if (_networkAnim && _networkAnim->isLoaded() && _skeleton) {
|
||||
|
@ -45,16 +47,17 @@ const AnimPoseVec& AnimClip::evaluate(const AnimVariantMap& animVars, float dt,
|
|||
}
|
||||
|
||||
if (_anim.size()) {
|
||||
int frameCount = _anim.size();
|
||||
|
||||
int prevIndex = (int)glm::floor(_frame);
|
||||
int nextIndex = (int)glm::ceil(_frame);
|
||||
if (_loopFlag && nextIndex >= frameCount) {
|
||||
nextIndex = 0;
|
||||
int nextIndex;
|
||||
if (_loopFlag && _frame >= _endFrame) {
|
||||
nextIndex = (int)glm::ceil(_startFrame);
|
||||
} else {
|
||||
nextIndex = (int)glm::ceil(_frame);
|
||||
}
|
||||
|
||||
// It can be quite possible for the user to set _startFrame and _endFrame to
|
||||
// values before or past valid ranges. We clamp the frames here.
|
||||
int frameCount = _anim.size();
|
||||
prevIndex = std::min(std::max(0, prevIndex), frameCount - 1);
|
||||
nextIndex = std::min(std::max(0, nextIndex), frameCount - 1);
|
||||
|
||||
|
@ -78,39 +81,7 @@ void AnimClip::setCurrentFrameInternal(float frame) {
|
|||
// because dt is 0, we should not encounter any triggers
|
||||
const float dt = 0.0f;
|
||||
Triggers triggers;
|
||||
_frame = accumulateTime(frame * _timeScale, dt, triggers);
|
||||
}
|
||||
|
||||
float AnimClip::accumulateTime(float frame, float dt, Triggers& triggersOut) const {
|
||||
const float startFrame = std::min(_startFrame, _endFrame);
|
||||
if (startFrame == _endFrame) {
|
||||
// when startFrame >= endFrame
|
||||
frame = _endFrame;
|
||||
} else if (_timeScale > 0.0f) {
|
||||
// accumulate time, keeping track of loops and end of animation events.
|
||||
const float FRAMES_PER_SECOND = 30.0f;
|
||||
float framesRemaining = (dt * _timeScale) * FRAMES_PER_SECOND;
|
||||
while (framesRemaining > 0.0f) {
|
||||
float framesTillEnd = _endFrame - _frame;
|
||||
if (framesRemaining >= framesTillEnd) {
|
||||
if (_loopFlag) {
|
||||
// anim loop
|
||||
triggersOut.push_back(_id + "OnLoop");
|
||||
framesRemaining -= framesTillEnd;
|
||||
frame = startFrame;
|
||||
} else {
|
||||
// anim end
|
||||
triggersOut.push_back(_id + "OnDone");
|
||||
frame = _endFrame;
|
||||
framesRemaining = 0.0f;
|
||||
}
|
||||
} else {
|
||||
frame += framesRemaining;
|
||||
framesRemaining = 0.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
return frame;
|
||||
_frame = ::accumulateTime(_startFrame, _endFrame, _timeScale, frame, dt, _loopFlag, _id, triggers);
|
||||
}
|
||||
|
||||
void AnimClip::copyFromNetworkAnim() {
|
||||
|
|
|
@ -36,12 +36,17 @@ public:
|
|||
void setLoopFlagVar(const QString& loopFlagVar) { _loopFlagVar = loopFlagVar; }
|
||||
void setFrameVar(const QString& frameVar) { _frameVar = frameVar; }
|
||||
|
||||
float getStartFrame() const { return _startFrame; }
|
||||
float getEndFrame() const { return _endFrame; }
|
||||
|
||||
void setTimeScale(float timeScale) { _timeScale = timeScale; }
|
||||
float getTimeScale() const { return _timeScale; }
|
||||
|
||||
protected:
|
||||
void loadURL(const QString& url);
|
||||
|
||||
virtual void setCurrentFrameInternal(float frame) override;
|
||||
|
||||
float accumulateTime(float frame, float dt, Triggers& triggersOut) const;
|
||||
void copyFromNetworkAnim();
|
||||
|
||||
// for AnimDebugDraw rendering
|
||||
|
|
|
@ -38,6 +38,7 @@ public:
|
|||
enum class Type {
|
||||
Clip = 0,
|
||||
BlendLinear,
|
||||
BlendLinearMove,
|
||||
Overlay,
|
||||
StateMachine,
|
||||
Manipulator,
|
||||
|
@ -75,10 +76,10 @@ public:
|
|||
return evaluate(animVars, dt, triggersOut);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
void setCurrentFrame(float frame);
|
||||
|
||||
protected:
|
||||
|
||||
virtual void setCurrentFrameInternal(float frame) {}
|
||||
virtual void setSkeletonInternal(AnimSkeleton::ConstPointer skeleton) { _skeleton = skeleton; }
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "AnimNode.h"
|
||||
#include "AnimClip.h"
|
||||
#include "AnimBlendLinear.h"
|
||||
#include "AnimBlendLinearMove.h"
|
||||
#include "AnimationLogging.h"
|
||||
#include "AnimOverlay.h"
|
||||
#include "AnimNodeLoader.h"
|
||||
|
@ -29,6 +30,7 @@ using NodeProcessFunc = bool (*)(AnimNode::Pointer node, const QJsonObject& json
|
|||
// factory functions
|
||||
static AnimNode::Pointer loadClipNode(const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl);
|
||||
static AnimNode::Pointer loadBlendLinearNode(const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl);
|
||||
static AnimNode::Pointer loadBlendLinearMoveNode(const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl);
|
||||
static AnimNode::Pointer loadOverlayNode(const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl);
|
||||
static AnimNode::Pointer loadStateMachineNode(const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl);
|
||||
static AnimNode::Pointer loadManipulatorNode(const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl);
|
||||
|
@ -36,17 +38,14 @@ static AnimNode::Pointer loadInverseKinematicsNode(const QJsonObject& jsonObj, c
|
|||
|
||||
// called after children have been loaded
|
||||
// returns node on success, nullptr on failure.
|
||||
static bool processClipNode(AnimNode::Pointer node, const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl) { return true; }
|
||||
static bool processBlendLinearNode(AnimNode::Pointer node, const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl) { return true; }
|
||||
static bool processOverlayNode(AnimNode::Pointer node, const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl) { return true; }
|
||||
static bool processDoNothing(AnimNode::Pointer node, const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl) { return true; }
|
||||
bool processStateMachineNode(AnimNode::Pointer node, const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl);
|
||||
static bool processManipulatorNode(AnimNode::Pointer node, const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl) { return true; }
|
||||
static bool processInverseKinematicsNode(AnimNode::Pointer node, const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl) { return true; }
|
||||
|
||||
static const char* animNodeTypeToString(AnimNode::Type type) {
|
||||
switch (type) {
|
||||
case AnimNode::Type::Clip: return "clip";
|
||||
case AnimNode::Type::BlendLinear: return "blendLinear";
|
||||
case AnimNode::Type::BlendLinearMove: return "blendLinearMove";
|
||||
case AnimNode::Type::Overlay: return "overlay";
|
||||
case AnimNode::Type::StateMachine: return "stateMachine";
|
||||
case AnimNode::Type::Manipulator: return "manipulator";
|
||||
|
@ -60,6 +59,7 @@ static NodeLoaderFunc animNodeTypeToLoaderFunc(AnimNode::Type type) {
|
|||
switch (type) {
|
||||
case AnimNode::Type::Clip: return loadClipNode;
|
||||
case AnimNode::Type::BlendLinear: return loadBlendLinearNode;
|
||||
case AnimNode::Type::BlendLinearMove: return loadBlendLinearMoveNode;
|
||||
case AnimNode::Type::Overlay: return loadOverlayNode;
|
||||
case AnimNode::Type::StateMachine: return loadStateMachineNode;
|
||||
case AnimNode::Type::Manipulator: return loadManipulatorNode;
|
||||
|
@ -71,12 +71,13 @@ static NodeLoaderFunc animNodeTypeToLoaderFunc(AnimNode::Type type) {
|
|||
|
||||
static NodeProcessFunc animNodeTypeToProcessFunc(AnimNode::Type type) {
|
||||
switch (type) {
|
||||
case AnimNode::Type::Clip: return processClipNode;
|
||||
case AnimNode::Type::BlendLinear: return processBlendLinearNode;
|
||||
case AnimNode::Type::Overlay: return processOverlayNode;
|
||||
case AnimNode::Type::Clip: return processDoNothing;
|
||||
case AnimNode::Type::BlendLinear: return processDoNothing;
|
||||
case AnimNode::Type::BlendLinearMove: return processDoNothing;
|
||||
case AnimNode::Type::Overlay: return processDoNothing;
|
||||
case AnimNode::Type::StateMachine: return processStateMachineNode;
|
||||
case AnimNode::Type::Manipulator: return processManipulatorNode;
|
||||
case AnimNode::Type::InverseKinematics: return processInverseKinematicsNode;
|
||||
case AnimNode::Type::Manipulator: return processDoNothing;
|
||||
case AnimNode::Type::InverseKinematics: return processDoNothing;
|
||||
case AnimNode::Type::NumTypes: return nullptr;
|
||||
};
|
||||
return nullptr;
|
||||
|
@ -160,6 +161,9 @@ static AnimNode::Pointer loadNode(const QJsonObject& jsonObj, const QUrl& jsonUr
|
|||
|
||||
assert((int)type >= 0 && type < AnimNode::Type::NumTypes);
|
||||
auto node = (animNodeTypeToLoaderFunc(type))(dataObj, id, jsonUrl);
|
||||
if (!node) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto childrenValue = jsonObj.value("children");
|
||||
if (!childrenValue.isArray()) {
|
||||
|
@ -233,6 +237,45 @@ static AnimNode::Pointer loadBlendLinearNode(const QJsonObject& jsonObj, const Q
|
|||
return node;
|
||||
}
|
||||
|
||||
static AnimNode::Pointer loadBlendLinearMoveNode(const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl) {
|
||||
|
||||
READ_FLOAT(alpha, jsonObj, id, jsonUrl, nullptr);
|
||||
READ_FLOAT(desiredSpeed, jsonObj, id, jsonUrl, nullptr);
|
||||
|
||||
std::vector<float> characteristicSpeeds;
|
||||
auto speedsValue = jsonObj.value("characteristicSpeeds");
|
||||
if (!speedsValue.isArray()) {
|
||||
qCCritical(animation) << "AnimNodeLoader, bad array \"characteristicSpeeds\" in blendLinearMove node, id =" << id << ", url =" << jsonUrl.toDisplayString();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto speedsArray = speedsValue.toArray();
|
||||
for (const auto& speedValue : speedsArray) {
|
||||
if (!speedValue.isDouble()) {
|
||||
qCCritical(animation) << "AnimNodeLoader, bad number in \"characteristicSpeeds\", id =" << id << ", url =" << jsonUrl.toDisplayString();
|
||||
return nullptr;
|
||||
}
|
||||
float speedVal = (float)speedValue.toDouble();
|
||||
characteristicSpeeds.push_back(speedVal);
|
||||
};
|
||||
|
||||
READ_OPTIONAL_STRING(alphaVar, jsonObj);
|
||||
READ_OPTIONAL_STRING(desiredSpeedVar, jsonObj);
|
||||
|
||||
auto node = std::make_shared<AnimBlendLinearMove>(id, alpha, desiredSpeed, characteristicSpeeds);
|
||||
|
||||
if (!alphaVar.isEmpty()) {
|
||||
node->setAlphaVar(alphaVar);
|
||||
}
|
||||
|
||||
if (!desiredSpeedVar.isEmpty()) {
|
||||
node->setDesiredSpeedVar(desiredSpeedVar);
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
|
||||
static const char* boneSetStrings[AnimOverlay::NumBoneSets] = {
|
||||
"fullBody",
|
||||
"upperBody",
|
||||
|
|
56
libraries/animation/src/AnimPose.cpp
Normal file
56
libraries/animation/src/AnimPose.cpp
Normal file
|
@ -0,0 +1,56 @@
|
|||
//
|
||||
// AnimPose.cpp
|
||||
//
|
||||
// Created by Anthony J. Thibault on 10/14/15.
|
||||
// Copyright (c) 2015 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "AnimPose.h"
|
||||
#include "GLMHelpers.h"
|
||||
|
||||
const AnimPose AnimPose::identity = AnimPose(glm::vec3(1.0f),
|
||||
glm::quat(),
|
||||
glm::vec3(0.0f));
|
||||
|
||||
AnimPose::AnimPose(const glm::mat4& mat) {
|
||||
scale = extractScale(mat);
|
||||
rot = glm::normalize(glm::quat_cast(mat));
|
||||
trans = extractTranslation(mat);
|
||||
}
|
||||
|
||||
glm::vec3 AnimPose::operator*(const glm::vec3& rhs) const {
|
||||
return trans + (rot * (scale * rhs));
|
||||
}
|
||||
|
||||
glm::vec3 AnimPose::xformPoint(const glm::vec3& rhs) const {
|
||||
return *this * rhs;
|
||||
}
|
||||
|
||||
// really slow
|
||||
glm::vec3 AnimPose::xformVector(const glm::vec3& rhs) const {
|
||||
glm::vec3 xAxis = rot * glm::vec3(scale.x, 0.0f, 0.0f);
|
||||
glm::vec3 yAxis = rot * glm::vec3(0.0f, scale.y, 0.0f);
|
||||
glm::vec3 zAxis = rot * glm::vec3(0.0f, 0.0f, scale.z);
|
||||
glm::mat3 mat(xAxis, yAxis, zAxis);
|
||||
glm::mat3 transInvMat = glm::inverse(glm::transpose(mat));
|
||||
return transInvMat * rhs;
|
||||
}
|
||||
|
||||
AnimPose AnimPose::operator*(const AnimPose& rhs) const {
|
||||
return AnimPose(static_cast<glm::mat4>(*this) * static_cast<glm::mat4>(rhs));
|
||||
}
|
||||
|
||||
AnimPose AnimPose::inverse() const {
|
||||
return AnimPose(glm::inverse(static_cast<glm::mat4>(*this)));
|
||||
}
|
||||
|
||||
AnimPose::operator glm::mat4() const {
|
||||
glm::vec3 xAxis = rot * glm::vec3(scale.x, 0.0f, 0.0f);
|
||||
glm::vec3 yAxis = rot * glm::vec3(0.0f, scale.y, 0.0f);
|
||||
glm::vec3 zAxis = rot * glm::vec3(0.0f, 0.0f, scale.z);
|
||||
return glm::mat4(glm::vec4(xAxis, 0.0f), glm::vec4(yAxis, 0.0f),
|
||||
glm::vec4(zAxis, 0.0f), glm::vec4(trans, 1.0f));
|
||||
}
|
47
libraries/animation/src/AnimPose.h
Normal file
47
libraries/animation/src/AnimPose.h
Normal file
|
@ -0,0 +1,47 @@
|
|||
//
|
||||
// AnimPose.h
|
||||
//
|
||||
// Created by Anthony J. Thibault on 10/14/15.
|
||||
// Copyright (c) 2015 High Fidelity, Inc. All rights reserved.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AnimPose
|
||||
#define hifi_AnimPose
|
||||
|
||||
#include <QtGlobal>
|
||||
#include <QDebug>
|
||||
#include <vector>
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
struct AnimPose {
|
||||
AnimPose() {}
|
||||
explicit AnimPose(const glm::mat4& mat);
|
||||
AnimPose(const glm::vec3& scaleIn, const glm::quat& rotIn, const glm::vec3& transIn) : scale(scaleIn), rot(rotIn), trans(transIn) {}
|
||||
static const AnimPose identity;
|
||||
|
||||
glm::vec3 xformPoint(const glm::vec3& rhs) const;
|
||||
glm::vec3 xformVector(const glm::vec3& rhs) const; // really slow
|
||||
|
||||
glm::vec3 operator*(const glm::vec3& rhs) const; // same as xformPoint
|
||||
AnimPose operator*(const AnimPose& rhs) const;
|
||||
|
||||
AnimPose inverse() const;
|
||||
operator glm::mat4() const;
|
||||
|
||||
glm::vec3 scale;
|
||||
glm::quat rot;
|
||||
glm::vec3 trans;
|
||||
};
|
||||
|
||||
inline QDebug operator<<(QDebug debug, const AnimPose& pose) {
|
||||
debug << "AnimPose, trans = (" << pose.trans.x << pose.trans.y << pose.trans.z << "), rot = (" << pose.rot.x << pose.rot.y << pose.rot.z << pose.rot.w << "), scale = (" << pose.scale.x << pose.scale.y << pose.scale.z << ")";
|
||||
return debug;
|
||||
}
|
||||
|
||||
using AnimPoseVec = std::vector<AnimPose>;
|
||||
|
||||
#endif
|
|
@ -16,50 +16,6 @@
|
|||
|
||||
#include "AnimationLogging.h"
|
||||
|
||||
const AnimPose AnimPose::identity = AnimPose(glm::vec3(1.0f),
|
||||
glm::quat(),
|
||||
glm::vec3(0.0f));
|
||||
|
||||
AnimPose::AnimPose(const glm::mat4& mat) {
|
||||
scale = extractScale(mat);
|
||||
rot = glm::normalize(glm::quat_cast(mat));
|
||||
trans = extractTranslation(mat);
|
||||
}
|
||||
|
||||
glm::vec3 AnimPose::operator*(const glm::vec3& rhs) const {
|
||||
return trans + (rot * (scale * rhs));
|
||||
}
|
||||
|
||||
glm::vec3 AnimPose::xformPoint(const glm::vec3& rhs) const {
|
||||
return *this * rhs;
|
||||
}
|
||||
|
||||
// really slow
|
||||
glm::vec3 AnimPose::xformVector(const glm::vec3& rhs) const {
|
||||
glm::vec3 xAxis = rot * glm::vec3(scale.x, 0.0f, 0.0f);
|
||||
glm::vec3 yAxis = rot * glm::vec3(0.0f, scale.y, 0.0f);
|
||||
glm::vec3 zAxis = rot * glm::vec3(0.0f, 0.0f, scale.z);
|
||||
glm::mat3 mat(xAxis, yAxis, zAxis);
|
||||
glm::mat3 transInvMat = glm::inverse(glm::transpose(mat));
|
||||
return transInvMat * rhs;
|
||||
}
|
||||
|
||||
AnimPose AnimPose::operator*(const AnimPose& rhs) const {
|
||||
return AnimPose(static_cast<glm::mat4>(*this) * static_cast<glm::mat4>(rhs));
|
||||
}
|
||||
|
||||
AnimPose AnimPose::inverse() const {
|
||||
return AnimPose(glm::inverse(static_cast<glm::mat4>(*this)));
|
||||
}
|
||||
|
||||
AnimPose::operator glm::mat4() const {
|
||||
glm::vec3 xAxis = rot * glm::vec3(scale.x, 0.0f, 0.0f);
|
||||
glm::vec3 yAxis = rot * glm::vec3(0.0f, scale.y, 0.0f);
|
||||
glm::vec3 zAxis = rot * glm::vec3(0.0f, 0.0f, scale.z);
|
||||
return glm::mat4(glm::vec4(xAxis, 0.0f), glm::vec4(yAxis, 0.0f),
|
||||
glm::vec4(zAxis, 0.0f), glm::vec4(trans, 1.0f));
|
||||
}
|
||||
|
||||
AnimSkeleton::AnimSkeleton(const FBXGeometry& fbxGeometry) {
|
||||
// convert to std::vector of joints
|
||||
std::vector<FBXJoint> joints;
|
||||
|
|
|
@ -16,33 +16,7 @@
|
|||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
#include <FBXReader.h>
|
||||
|
||||
struct AnimPose {
|
||||
AnimPose() {}
|
||||
explicit AnimPose(const glm::mat4& mat);
|
||||
AnimPose(const glm::vec3& scaleIn, const glm::quat& rotIn, const glm::vec3& transIn) : scale(scaleIn), rot(rotIn), trans(transIn) {}
|
||||
static const AnimPose identity;
|
||||
|
||||
glm::vec3 xformPoint(const glm::vec3& rhs) const;
|
||||
glm::vec3 xformVector(const glm::vec3& rhs) const; // really slow
|
||||
|
||||
glm::vec3 operator*(const glm::vec3& rhs) const; // same as xformPoint
|
||||
AnimPose operator*(const AnimPose& rhs) const;
|
||||
|
||||
AnimPose inverse() const;
|
||||
operator glm::mat4() const;
|
||||
|
||||
glm::vec3 scale;
|
||||
glm::quat rot;
|
||||
glm::vec3 trans;
|
||||
};
|
||||
|
||||
inline QDebug operator<<(QDebug debug, const AnimPose& pose) {
|
||||
debug << "AnimPose, trans = (" << pose.trans.x << pose.trans.y << pose.trans.z << "), rot = (" << pose.rot.x << pose.rot.y << pose.rot.z << pose.rot.w << "), scale = (" << pose.scale.x << pose.scale.y << pose.scale.z << ")";
|
||||
return debug;
|
||||
}
|
||||
|
||||
using AnimPoseVec = std::vector<AnimPose>;
|
||||
#include "AnimPose.h"
|
||||
|
||||
class AnimSkeleton {
|
||||
public:
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
#include "AnimUtil.h"
|
||||
#include "GLMHelpers.h"
|
||||
|
||||
// TODO: use restrict keyword
|
||||
// TODO: excellent candidate for simd vectorization.
|
||||
|
||||
void blend(size_t numPoses, const AnimPose* a, const AnimPose* b, float alpha, AnimPose* result) {
|
||||
for (size_t i = 0; i < numPoses; i++) {
|
||||
const AnimPose& aPose = a[i];
|
||||
|
@ -20,3 +23,42 @@ void blend(size_t numPoses, const AnimPose* a, const AnimPose* b, float alpha, A
|
|||
result[i].trans = lerp(aPose.trans, bPose.trans, alpha);
|
||||
}
|
||||
}
|
||||
|
||||
float accumulateTime(float startFrame, float endFrame, float timeScale, float currentFrame, float dt, bool loopFlag,
|
||||
const QString& id, AnimNode::Triggers& triggersOut) {
|
||||
|
||||
float frame = currentFrame;
|
||||
const float clampedStartFrame = std::min(startFrame, endFrame);
|
||||
if (fabsf(clampedStartFrame - endFrame) < 1.0f) {
|
||||
frame = endFrame;
|
||||
} else if (timeScale > 0.0f) {
|
||||
// accumulate time, keeping track of loops and end of animation events.
|
||||
const float FRAMES_PER_SECOND = 30.0f;
|
||||
float framesRemaining = (dt * timeScale) * FRAMES_PER_SECOND;
|
||||
while (framesRemaining > 0.0f) {
|
||||
float framesTillEnd = endFrame - frame;
|
||||
// when looping, add one frame between start and end.
|
||||
if (loopFlag) {
|
||||
framesTillEnd += 1.0f;
|
||||
}
|
||||
if (framesRemaining >= framesTillEnd) {
|
||||
if (loopFlag) {
|
||||
// anim loop
|
||||
triggersOut.push_back(id + "OnLoop");
|
||||
framesRemaining -= framesTillEnd;
|
||||
frame = clampedStartFrame;
|
||||
} else {
|
||||
// anim end
|
||||
triggersOut.push_back(id + "OnDone");
|
||||
frame = endFrame;
|
||||
framesRemaining = 0.0f;
|
||||
}
|
||||
} else {
|
||||
frame += framesRemaining;
|
||||
framesRemaining = 0.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,12 +13,12 @@
|
|||
|
||||
#include "AnimNode.h"
|
||||
|
||||
// TODO: use restrict keyword
|
||||
// TODO: excellent candidate for simd vectorization.
|
||||
|
||||
// this is where the magic happens
|
||||
void blend(size_t numPoses, const AnimPose* a, const AnimPose* b, float alpha, AnimPose* result);
|
||||
|
||||
float accumulateTime(float startFrame, float endFrame, float timeScale, float currentFrame, float dt, bool loopFlag,
|
||||
const QString& id, AnimNode::Triggers& triggersOut);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -381,6 +381,33 @@ glm::mat4 Rig::getJointTransform(int jointIndex) const {
|
|||
return _jointStates[jointIndex].getTransform();
|
||||
}
|
||||
|
||||
void Rig::calcAnimAlpha(float speed, const std::vector<float>& referenceSpeeds, float* alphaOut) const {
|
||||
|
||||
assert(referenceSpeeds.size() > 0);
|
||||
|
||||
// calculate alpha from linear combination of referenceSpeeds.
|
||||
float alpha = 0.0f;
|
||||
if (speed <= referenceSpeeds.front()) {
|
||||
alpha = 0.0f;
|
||||
} else if (speed > referenceSpeeds.back()) {
|
||||
alpha = (float)(referenceSpeeds.size() - 1);
|
||||
} else {
|
||||
for (size_t i = 0; i < referenceSpeeds.size() - 1; i++) {
|
||||
if (referenceSpeeds[i] < speed && speed < referenceSpeeds[i + 1]) {
|
||||
alpha = (float)i + ((speed - referenceSpeeds[i]) / (referenceSpeeds[i + 1] - referenceSpeeds[i]));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*alphaOut = alpha;
|
||||
}
|
||||
|
||||
// animation reference speeds.
|
||||
static const std::vector<float> FORWARD_SPEEDS = { 0.4f, 1.4f, 4.5f }; // m/s
|
||||
static const std::vector<float> BACKWARD_SPEEDS = { 0.6f, 1.45f }; // m/s
|
||||
static const std::vector<float> LATERAL_SPEEDS = { 0.2f, 0.65f }; // m/s
|
||||
|
||||
void Rig::computeMotionAnimationState(float deltaTime, const glm::vec3& worldPosition, const glm::vec3& worldVelocity, const glm::quat& worldRotation) {
|
||||
|
||||
glm::vec3 front = worldRotation * IDENTITY_FRONT;
|
||||
|
@ -389,8 +416,16 @@ void Rig::computeMotionAnimationState(float deltaTime, const glm::vec3& worldPos
|
|||
// but some modes (e.g., hmd standing) update position without updating velocity.
|
||||
// It's very hard to debug hmd standing. (Look down at yourself, or have a second person observe. HMD third person is a bit undefined...)
|
||||
// So, let's create our own workingVelocity from the worldPosition...
|
||||
glm::vec3 workingVelocity = _lastVelocity;
|
||||
glm::vec3 positionDelta = worldPosition - _lastPosition;
|
||||
glm::vec3 workingVelocity = positionDelta / deltaTime;
|
||||
|
||||
// Don't trust position delta if deltaTime is 'small'.
|
||||
// NOTE: This is mostly just a work around for an issue in oculus 0.7 runtime, where
|
||||
// Application::idle() is being called more frequently and with smaller dt's then expected.
|
||||
const float SMALL_DELTA_TIME = 0.006f; // 6 ms
|
||||
if (deltaTime > SMALL_DELTA_TIME) {
|
||||
workingVelocity = positionDelta / deltaTime;
|
||||
}
|
||||
|
||||
#if !WANT_DEBUG
|
||||
// But for smoothest (non-hmd standing) results, go ahead and use velocity:
|
||||
|
@ -399,19 +434,43 @@ void Rig::computeMotionAnimationState(float deltaTime, const glm::vec3& worldPos
|
|||
}
|
||||
#endif
|
||||
|
||||
if (deltaTime > SMALL_DELTA_TIME) {
|
||||
_lastVelocity = workingVelocity;
|
||||
}
|
||||
|
||||
if (_enableAnimGraph) {
|
||||
|
||||
glm::vec3 localVel = glm::inverse(worldRotation) * workingVelocity;
|
||||
|
||||
float forwardSpeed = glm::dot(localVel, IDENTITY_FRONT);
|
||||
float lateralSpeed = glm::dot(localVel, IDENTITY_RIGHT);
|
||||
float turningSpeed = glm::orientedAngle(front, _lastFront, IDENTITY_UP) / deltaTime;
|
||||
|
||||
// filter speeds using a simple moving average.
|
||||
_averageForwardSpeed.updateAverage(forwardSpeed);
|
||||
_averageLateralSpeed.updateAverage(lateralSpeed);
|
||||
|
||||
// sine wave LFO var for testing.
|
||||
static float t = 0.0f;
|
||||
_animVars.set("sine", static_cast<float>(0.5 * sin(t) + 0.5));
|
||||
_animVars.set("sine", 2.0f * static_cast<float>(0.5 * sin(t) + 0.5));
|
||||
|
||||
const float ANIM_WALK_SPEED = 1.4f; // m/s
|
||||
_animVars.set("walkTimeScale", glm::clamp(0.5f, 2.0f, glm::length(localVel) / ANIM_WALK_SPEED));
|
||||
float moveForwardAlpha = 0.0f;
|
||||
float moveBackwardAlpha = 0.0f;
|
||||
float moveLateralAlpha = 0.0f;
|
||||
|
||||
// calcuate the animation alpha and timeScale values based on current speeds and animation reference speeds.
|
||||
calcAnimAlpha(_averageForwardSpeed.getAverage(), FORWARD_SPEEDS, &moveForwardAlpha);
|
||||
calcAnimAlpha(-_averageForwardSpeed.getAverage(), BACKWARD_SPEEDS, &moveBackwardAlpha);
|
||||
calcAnimAlpha(fabsf(_averageLateralSpeed.getAverage()), LATERAL_SPEEDS, &moveLateralAlpha);
|
||||
|
||||
_animVars.set("moveForwardSpeed", _averageForwardSpeed.getAverage());
|
||||
_animVars.set("moveForwardAlpha", moveForwardAlpha);
|
||||
|
||||
_animVars.set("moveBackwardSpeed", -_averageForwardSpeed.getAverage());
|
||||
_animVars.set("moveBackwardAlpha", moveBackwardAlpha);
|
||||
|
||||
_animVars.set("moveLateralSpeed", fabsf(_averageLateralSpeed.getAverage()));
|
||||
_animVars.set("moveLateralAlpha", moveLateralAlpha);
|
||||
|
||||
const float MOVE_ENTER_SPEED_THRESHOLD = 0.2f; // m/sec
|
||||
const float MOVE_EXIT_SPEED_THRESHOLD = 0.07f; // m/sec
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
#include "AnimNode.h"
|
||||
#include "AnimNodeLoader.h"
|
||||
#include "SimpleMovingAverage.h"
|
||||
|
||||
class AnimationHandle;
|
||||
typedef std::shared_ptr<AnimationHandle> AnimationHandlePointer;
|
||||
|
@ -219,6 +220,7 @@ public:
|
|||
void updateLeanJoint(int index, float leanSideways, float leanForward, float torsoTwist);
|
||||
void updateNeckJoint(int index, const HeadParameters& params);
|
||||
void updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm::quat& modelRotation, const glm::quat& worldHeadOrientation, const glm::vec3& lookAt, const glm::vec3& saccade);
|
||||
void calcAnimAlpha(float speed, const std::vector<float>& referenceSpeeds, float* alphaOut) const;
|
||||
|
||||
QVector<JointState> _jointStates;
|
||||
int _rootJointIndex = -1;
|
||||
|
@ -238,6 +240,7 @@ public:
|
|||
bool _enableAnimGraph = false;
|
||||
glm::vec3 _lastFront;
|
||||
glm::vec3 _lastPosition;
|
||||
glm::vec3 _lastVelocity;
|
||||
|
||||
std::shared_ptr<AnimNode> _animNode;
|
||||
std::shared_ptr<AnimSkeleton> _animSkeleton;
|
||||
|
@ -254,6 +257,9 @@ public:
|
|||
float _leftHandOverlayAlpha = 0.0f;
|
||||
float _rightHandOverlayAlpha = 0.0f;
|
||||
|
||||
SimpleMovingAverage _averageForwardSpeed{ 10 };
|
||||
SimpleMovingAverage _averageLateralSpeed{ 10 };
|
||||
|
||||
private:
|
||||
QMap<int, StateHandler> _stateHandlers;
|
||||
int _nextStateHandlerId {0};
|
||||
|
|
|
@ -55,6 +55,9 @@ Input::NamedVector StandardController::getAvailableInputs() const {
|
|||
makePair(LS, "LS"),
|
||||
makePair(RS, "RS"),
|
||||
|
||||
makePair(LS_TOUCH, "LSTouch"),
|
||||
makePair(RS_TOUCH, "RSTouch"),
|
||||
|
||||
// Center buttons
|
||||
makePair(START, "Start"),
|
||||
makePair(BACK, "Back"),
|
||||
|
@ -69,26 +72,41 @@ Input::NamedVector StandardController::getAvailableInputs() const {
|
|||
makePair(LT, "LT"),
|
||||
makePair(RT, "RT"),
|
||||
|
||||
|
||||
// Finger abstractions
|
||||
makePair(LEFT_PRIMARY_THUMB, "LeftPrimaryThumb"),
|
||||
makePair(LEFT_SECONDARY_THUMB, "LeftSecondaryThumb"),
|
||||
makePair(LEFT_THUMB_UP, "LeftThumbUp"),
|
||||
makePair(RIGHT_PRIMARY_THUMB, "RightPrimaryThumb"),
|
||||
makePair(RIGHT_SECONDARY_THUMB, "RightSecondaryThumb"),
|
||||
makePair(RIGHT_THUMB_UP, "RightThumbUp"),
|
||||
|
||||
makePair(LEFT_PRIMARY_THUMB_TOUCH, "LeftPrimaryThumbTouch"),
|
||||
makePair(LEFT_SECONDARY_THUMB_TOUCH, "LeftSecondaryThumbTouch"),
|
||||
makePair(RIGHT_PRIMARY_THUMB_TOUCH, "RightPrimaryThumbTouch"),
|
||||
makePair(RIGHT_SECONDARY_THUMB_TOUCH, "RightSecondaryThumbTouch"),
|
||||
|
||||
makePair(LEFT_INDEX_POINT, "LeftIndexPoint"),
|
||||
makePair(RIGHT_INDEX_POINT, "RightIndexPoint"),
|
||||
|
||||
makePair(LEFT_PRIMARY_INDEX, "LeftPrimaryIndex"),
|
||||
makePair(LEFT_SECONDARY_INDEX, "LeftSecondaryIndex"),
|
||||
makePair(RIGHT_PRIMARY_INDEX, "RightPrimaryIndex"),
|
||||
makePair(RIGHT_SECONDARY_INDEX, "RightSecondaryIndex"),
|
||||
|
||||
makePair(LEFT_PRIMARY_INDEX_TOUCH, "LeftPrimaryIndexTouch"),
|
||||
makePair(LEFT_SECONDARY_INDEX_TOUCH, "LeftSecondaryIndexTouch"),
|
||||
makePair(RIGHT_PRIMARY_INDEX_TOUCH, "RightPrimaryIndexTouch"),
|
||||
makePair(RIGHT_SECONDARY_INDEX_TOUCH, "RightSecondaryIndexTouch"),
|
||||
|
||||
makePair(LEFT_GRIP, "LeftGrip"),
|
||||
makePair(LEFT_GRIP_TOUCH, "LeftGripTouch"),
|
||||
makePair(RIGHT_GRIP, "RightGrip"),
|
||||
makePair(RIGHT_GRIP_TOUCH, "RightGripTouch"),
|
||||
|
||||
// Poses
|
||||
makePair(LEFT_HAND, "LeftHand"),
|
||||
makePair(RIGHT_HAND, "RightHand"),
|
||||
|
||||
|
||||
// Aliases, PlayStation style names
|
||||
makePair(LB, "L1"),
|
||||
makePair(RB, "R1"),
|
||||
|
|
|
@ -39,16 +39,33 @@ namespace controller {
|
|||
// These don't map to SDL types
|
||||
LEFT_PRIMARY_THUMB,
|
||||
LEFT_SECONDARY_THUMB,
|
||||
LEFT_PRIMARY_THUMB_TOUCH,
|
||||
LEFT_SECONDARY_THUMB_TOUCH,
|
||||
LS_TOUCH,
|
||||
LEFT_THUMB_UP,
|
||||
|
||||
RIGHT_PRIMARY_THUMB,
|
||||
RIGHT_SECONDARY_THUMB,
|
||||
RIGHT_PRIMARY_THUMB_TOUCH,
|
||||
RIGHT_SECONDARY_THUMB_TOUCH,
|
||||
RS_TOUCH,
|
||||
RIGHT_THUMB_UP,
|
||||
|
||||
LEFT_PRIMARY_INDEX,
|
||||
LEFT_SECONDARY_INDEX,
|
||||
LEFT_PRIMARY_INDEX_TOUCH,
|
||||
LEFT_SECONDARY_INDEX_TOUCH,
|
||||
LEFT_INDEX_POINT,
|
||||
RIGHT_PRIMARY_INDEX,
|
||||
RIGHT_SECONDARY_INDEX,
|
||||
RIGHT_PRIMARY_INDEX_TOUCH,
|
||||
RIGHT_SECONDARY_INDEX_TOUCH,
|
||||
RIGHT_INDEX_POINT,
|
||||
|
||||
LEFT_GRIP,
|
||||
LEFT_GRIP_TOUCH,
|
||||
RIGHT_GRIP,
|
||||
RIGHT_GRIP_TOUCH,
|
||||
|
||||
NUM_STANDARD_BUTTONS
|
||||
};
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "Logging.h"
|
||||
|
||||
#include "impl/conditionals/AndConditional.h"
|
||||
#include "impl/conditionals/NotConditional.h"
|
||||
#include "impl/conditionals/EndpointConditional.h"
|
||||
#include "impl/conditionals/ScriptConditional.h"
|
||||
|
||||
|
@ -676,7 +677,7 @@ Mapping::Pointer UserInputMapper::newMapping(const QString& mappingName) {
|
|||
|
||||
void UserInputMapper::enableMapping(const QString& mappingName, bool enable) {
|
||||
Locker locker(_lock);
|
||||
qCDebug(controllers) << "Attempting to enable mapping " << mappingName;
|
||||
qCDebug(controllers) << "Attempting to " << (enable ? "enable" : "disable") << " mapping " << mappingName;
|
||||
auto iterator = _mappingsByName.find(mappingName);
|
||||
if (_mappingsByName.end() == iterator) {
|
||||
qCWarning(controllers) << "Request to enable / disable unknown mapping " << mappingName;
|
||||
|
@ -826,13 +827,31 @@ Conditional::Pointer UserInputMapper::parseConditional(const QJsonValue& value)
|
|||
return std::make_shared<AndConditional>(children);
|
||||
} else if (value.isString()) {
|
||||
// Support "when" : "GamePad.RB"
|
||||
auto input = findDeviceInput(value.toString());
|
||||
auto conditionalToken = value.toString();
|
||||
|
||||
// Detect for modifier case (Not...)
|
||||
QString conditionalModifier;
|
||||
const QString JSON_CONDITIONAL_MODIFIER_NOT("!");
|
||||
if (conditionalToken.startsWith(JSON_CONDITIONAL_MODIFIER_NOT)) {
|
||||
conditionalModifier = JSON_CONDITIONAL_MODIFIER_NOT;
|
||||
conditionalToken = conditionalToken.right(conditionalToken.size() - conditionalModifier.size());
|
||||
}
|
||||
|
||||
auto input = findDeviceInput(conditionalToken);
|
||||
auto endpoint = endpointFor(input);
|
||||
if (!endpoint) {
|
||||
return Conditional::Pointer();
|
||||
}
|
||||
auto conditional = std::make_shared<EndpointConditional>(endpoint);
|
||||
|
||||
return std::make_shared<EndpointConditional>(endpoint);
|
||||
if (!conditionalModifier.isEmpty()) {
|
||||
if (conditionalModifier == JSON_CONDITIONAL_MODIFIER_NOT) {
|
||||
return std::make_shared<NotConditional>(conditional);
|
||||
}
|
||||
}
|
||||
|
||||
// Default and conditional behavior
|
||||
return conditional;
|
||||
}
|
||||
|
||||
return Conditional::parse(value);
|
||||
|
|
|
@ -6,10 +6,16 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
// NOTE: we don't need to include this header unless/until we add additional symbols.
|
||||
// By removing this header we prevent these warnings on windows:
|
||||
//
|
||||
// warning LNK4221: This object file does not define any previously undefined public symbols,
|
||||
// so it will not be used by any link operation that consumes this library
|
||||
//
|
||||
//#include "NotConditional.h"
|
||||
|
||||
#include "NotConditional.h"
|
||||
|
||||
using namespace controller;
|
||||
|
||||
bool NotConditional::satisfied() {
|
||||
if (_operand) {
|
||||
return (!_operand->satisfied());
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,5 +12,19 @@
|
|||
|
||||
#include "../Conditional.h"
|
||||
|
||||
namespace controller {
|
||||
|
||||
class NotConditional : public Conditional {
|
||||
public:
|
||||
using Pointer = std::shared_ptr<NotConditional>;
|
||||
|
||||
NotConditional(Conditional::Pointer operand) : _operand(operand) { }
|
||||
|
||||
virtual bool satisfied() override;
|
||||
|
||||
private:
|
||||
Conditional::Pointer _operand;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -152,7 +152,6 @@ public slots:
|
|||
Q_INVOKABLE glm::vec3 localCoordsToVoxelCoords(const QUuid& entityID, glm::vec3 localCoords);
|
||||
|
||||
signals:
|
||||
void entityCollisionWithEntity(const EntityItemID& idA, const EntityItemID& idB, const Collision& collision);
|
||||
void collisionWithEntity(const EntityItemID& idA, const EntityItemID& idB, const Collision& collision);
|
||||
|
||||
void canAdjustLocksChanged(bool canAdjustLocks);
|
||||
|
|
|
@ -22,6 +22,14 @@ ProfileRange::ProfileRange(const char *name) {
|
|||
ProfileRange::~ProfileRange() {
|
||||
nvtxRangePop();
|
||||
}
|
||||
|
||||
ProfileRangeBatch::ProfileRangeBatch(gpu::Batch& batch, const char *name) : _batch(batch) {
|
||||
_batch.pushProfileRange(name);
|
||||
}
|
||||
|
||||
ProfileRangeBatch::~ProfileRangeBatch() {
|
||||
_batch.popProfileRange();
|
||||
}
|
||||
#endif
|
||||
|
||||
#define ADD_COMMAND(call) _commands.push_back(COMMAND_##call); _commandOffsets.push_back(_params.size());
|
||||
|
@ -391,3 +399,17 @@ QDebug& operator<<(QDebug& debug, const Batch::CacheState& cacheState) {
|
|||
|
||||
return debug;
|
||||
}
|
||||
|
||||
// Debugging
|
||||
void Batch::pushProfileRange(const char* name) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
ADD_COMMAND(pushProfileRange);
|
||||
_params.push_back(_profileRanges.cache(name));
|
||||
#endif
|
||||
}
|
||||
|
||||
void Batch::popProfileRange() {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
ADD_COMMAND(popProfileRange);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -229,6 +229,10 @@ public:
|
|||
// Reset the stage caches and states
|
||||
void resetStages();
|
||||
|
||||
// Debugging
|
||||
void pushProfileRange(const char* name);
|
||||
void popProfileRange();
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
|
@ -324,6 +328,9 @@ public:
|
|||
|
||||
COMMAND_glColor4f,
|
||||
|
||||
COMMAND_pushProfileRange,
|
||||
COMMAND_popProfileRange,
|
||||
|
||||
NUM_COMMANDS,
|
||||
};
|
||||
typedef std::vector<Command> Commands;
|
||||
|
@ -389,6 +396,7 @@ public:
|
|||
typedef Cache<PipelinePointer>::Vector PipelineCaches;
|
||||
typedef Cache<FramebufferPointer>::Vector FramebufferCaches;
|
||||
typedef Cache<QueryPointer>::Vector QueryCaches;
|
||||
typedef Cache<std::string>::Vector ProfileRangeCaches;
|
||||
typedef Cache<std::function<void()>>::Vector LambdaCache;
|
||||
|
||||
// Cache Data in a byte array if too big to fit in Param
|
||||
|
@ -416,6 +424,7 @@ public:
|
|||
FramebufferCaches _framebuffers;
|
||||
QueryCaches _queries;
|
||||
LambdaCache _lambdas;
|
||||
ProfileRangeCaches _profileRanges;
|
||||
|
||||
NamedBatchDataMap _namedData;
|
||||
|
||||
|
@ -429,6 +438,25 @@ protected:
|
|||
|
||||
}
|
||||
|
||||
#if defined(NSIGHT_FOUND)
|
||||
|
||||
class ProfileRangeBatch {
|
||||
public:
|
||||
ProfileRangeBatch(gpu::Batch& batch, const char *name);
|
||||
~ProfileRangeBatch();
|
||||
|
||||
private:
|
||||
gpu::Batch& _batch;
|
||||
};
|
||||
|
||||
#define PROFILE_RANGE_BATCH(batch, name) ProfileRangeBatch profileRangeThis(batch, name);
|
||||
|
||||
#else
|
||||
|
||||
#define PROFILE_RANGE_BATCH(batch, name)
|
||||
|
||||
#endif
|
||||
|
||||
QDebug& operator<<(QDebug& debug, const gpu::Batch::CacheState& cacheState);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,6 +15,11 @@
|
|||
#include <list>
|
||||
#include <glm/gtc/type_ptr.hpp>
|
||||
|
||||
#if defined(NSIGHT_FOUND)
|
||||
#include "nvToolsExt.h"
|
||||
#endif
|
||||
|
||||
|
||||
using namespace gpu;
|
||||
|
||||
GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
||||
|
@ -69,6 +74,9 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
|||
(&::gpu::GLBackend::do_glUniformMatrix4fv),
|
||||
|
||||
(&::gpu::GLBackend::do_glColor4f),
|
||||
|
||||
(&::gpu::GLBackend::do_pushProfileRange),
|
||||
(&::gpu::GLBackend::do_popProfileRange),
|
||||
};
|
||||
|
||||
void GLBackend::init() {
|
||||
|
@ -710,3 +718,17 @@ void GLBackend::do_glColor4f(Batch& batch, uint32 paramOffset) {
|
|||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_pushProfileRange(Batch& batch, uint32 paramOffset) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
auto name = batch._profileRanges.get(batch._params[paramOffset]._uint);
|
||||
nvtxRangePush(name.c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::do_popProfileRange(Batch& batch, uint32 paramOffset) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
nvtxRangePop();
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -479,6 +479,9 @@ protected:
|
|||
|
||||
void do_glColor4f(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_pushProfileRange(Batch& batch, uint32 paramOffset);
|
||||
void do_popProfileRange(Batch& batch, uint32 paramOffset);
|
||||
|
||||
typedef void (GLBackend::*CommandCall)(Batch&, uint32);
|
||||
static CommandCall _commandCalls[Batch::NUM_COMMANDS];
|
||||
};
|
||||
|
|
|
@ -360,16 +360,16 @@ const CollisionEvents& PhysicsEngine::getCollisionEvents() {
|
|||
glm::vec3 velocityChange = (motionStateA ? motionStateA->getObjectLinearVelocityChange() : glm::vec3(0.0f)) +
|
||||
(motionStateB ? motionStateB->getObjectLinearVelocityChange() : glm::vec3(0.0f));
|
||||
|
||||
if (motionStateA && motionStateA->getType() == MOTIONSTATE_TYPE_ENTITY) {
|
||||
if (motionStateA) {
|
||||
QUuid idA = motionStateA->getObjectID();
|
||||
QUuid idB;
|
||||
if (motionStateB && motionStateB->getType() == MOTIONSTATE_TYPE_ENTITY) {
|
||||
if (motionStateB) {
|
||||
idB = motionStateB->getObjectID();
|
||||
}
|
||||
glm::vec3 position = bulletToGLM(contact.getPositionWorldOnB()) + _originOffset;
|
||||
glm::vec3 penetration = bulletToGLM(contact.distance * contact.normalWorldOnB);
|
||||
_collisionEvents.push_back(Collision(type, idA, idB, position, penetration, velocityChange));
|
||||
} else if (motionStateB && motionStateB->getType() == MOTIONSTATE_TYPE_ENTITY) {
|
||||
} else if (motionStateB) {
|
||||
QUuid idB = motionStateB->getObjectID();
|
||||
glm::vec3 position = bulletToGLM(contact.getPositionWorldOnA()) + _originOffset;
|
||||
// NOTE: we're flipping the order of A and B (so that the first objectID is never NULL)
|
||||
|
|
|
@ -84,7 +84,7 @@ private:
|
|||
const glm::vec3& TWO() { return Vectors::TWO; }
|
||||
const glm::vec3& HALF() { return Vectors::HALF; }
|
||||
const glm::vec3& RIGHT() { return Vectors::RIGHT; }
|
||||
const glm::vec3& UP() { return Vectors::UNIT_X; }
|
||||
const glm::vec3& UP() { return Vectors::UP; }
|
||||
const glm::vec3& FRONT() { return Vectors::FRONT; }
|
||||
};
|
||||
|
||||
|
|
|
@ -183,6 +183,11 @@ T toNormalizedDeviceScale(const T& value, const T& size) {
|
|||
#define PITCH(euler) euler.x
|
||||
#define ROLL(euler) euler.z
|
||||
|
||||
// float - linear interpolate
|
||||
inline float lerp(float x, float y, float a) {
|
||||
return x * (1.0f - a) + (y * a);
|
||||
}
|
||||
|
||||
// vec2 lerp - linear interpolate
|
||||
template<typename T, glm::precision P>
|
||||
glm::detail::tvec2<T, P> lerp(const glm::detail::tvec2<T, P>& x, const glm::detail::tvec2<T, P>& y, T a) {
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "AnimBlendLinear.h"
|
||||
#include "AnimationLogging.h"
|
||||
#include "AnimVariant.h"
|
||||
#include "AnimUtil.h"
|
||||
|
||||
#include <../QTestExtensions.h>
|
||||
|
||||
|
@ -30,8 +31,8 @@ void AnimTests::cleanupTestCase() {
|
|||
}
|
||||
|
||||
void AnimTests::testClipInternalState() {
|
||||
std::string id = "my anim clip";
|
||||
std::string url = "https://hifi-public.s3.amazonaws.com/ozan/support/FightClubBotTest1/Animations/standard_idle.fbx";
|
||||
QString id = "my anim clip";
|
||||
QString url = "https://hifi-public.s3.amazonaws.com/ozan/support/FightClubBotTest1/Animations/standard_idle.fbx";
|
||||
float startFrame = 2.0f;
|
||||
float endFrame = 20.0f;
|
||||
float timeScale = 1.1f;
|
||||
|
@ -55,8 +56,8 @@ static float framesToSec(float secs) {
|
|||
}
|
||||
|
||||
void AnimTests::testClipEvaulate() {
|
||||
std::string id = "myClipNode";
|
||||
std::string url = "https://hifi-public.s3.amazonaws.com/ozan/support/FightClubBotTest1/Animations/standard_idle.fbx";
|
||||
QString id = "myClipNode";
|
||||
QString url = "https://hifi-public.s3.amazonaws.com/ozan/support/FightClubBotTest1/Animations/standard_idle.fbx";
|
||||
float startFrame = 2.0f;
|
||||
float endFrame = 22.0f;
|
||||
float timeScale = 1.0f;
|
||||
|
@ -73,8 +74,8 @@ void AnimTests::testClipEvaulate() {
|
|||
|
||||
// does it loop?
|
||||
triggers.clear();
|
||||
clip.evaluate(vars, framesToSec(11.0f), triggers);
|
||||
QCOMPARE_WITH_ABS_ERROR(clip._frame, 3.0f, EPSILON);
|
||||
clip.evaluate(vars, framesToSec(12.0f), triggers);
|
||||
QCOMPARE_WITH_ABS_ERROR(clip._frame, 3.0f, EPSILON); // Note: frame 3 and not 4, because extra frame between start and end.
|
||||
|
||||
// did we receive a loop trigger?
|
||||
QVERIFY(std::find(triggers.begin(), triggers.end(), "myClipNodeOnLoop") != triggers.end());
|
||||
|
@ -90,8 +91,8 @@ void AnimTests::testClipEvaulate() {
|
|||
}
|
||||
|
||||
void AnimTests::testClipEvaulateWithVars() {
|
||||
std::string id = "myClipNode";
|
||||
std::string url = "https://hifi-public.s3.amazonaws.com/ozan/support/FightClubBotTest1/Animations/standard_idle.fbx";
|
||||
QString id = "myClipNode";
|
||||
QString url = "https://hifi-public.s3.amazonaws.com/ozan/support/FightClubBotTest1/Animations/standard_idle.fbx";
|
||||
float startFrame = 2.0f;
|
||||
float endFrame = 22.0f;
|
||||
float timeScale = 1.0f;
|
||||
|
@ -126,9 +127,9 @@ void AnimTests::testClipEvaulateWithVars() {
|
|||
}
|
||||
|
||||
void AnimTests::testLoader() {
|
||||
auto url = QUrl("https://gist.githubusercontent.com/hyperlogic/857129fe04567cbe670f/raw/8ba57a8f0a76f88b39a11f77f8d9df04af9cec95/test.json");
|
||||
auto url = QUrl("https://gist.githubusercontent.com/hyperlogic/857129fe04567cbe670f/raw/0c54500f480fd7314a5aeb147c45a8a707edcc2e/test.json");
|
||||
// NOTE: This will warn about missing "test01.fbx", "test02.fbx", etc. if the resource loading code doesn't handle relative pathnames!
|
||||
// However, the test will proceed.
|
||||
// However, the test will proceed.
|
||||
AnimNodeLoader loader(url);
|
||||
|
||||
const int timeout = 1000;
|
||||
|
@ -238,3 +239,87 @@ void AnimTests::testVariant() {
|
|||
QVERIFY(m[1].z == -7.0f);
|
||||
QVERIFY(m[3].w == 16.0f);
|
||||
}
|
||||
|
||||
void AnimTests::testAccumulateTime() {
|
||||
|
||||
float startFrame = 0.0f;
|
||||
float endFrame = 10.0f;
|
||||
float timeScale = 1.0f;
|
||||
testAccumulateTimeWithParameters(startFrame, endFrame, timeScale);
|
||||
|
||||
startFrame = 5.0f;
|
||||
endFrame = 15.0f;
|
||||
timeScale = 1.0f;
|
||||
testAccumulateTimeWithParameters(startFrame, endFrame, timeScale);
|
||||
|
||||
startFrame = 0.0f;
|
||||
endFrame = 10.0f;
|
||||
timeScale = 0.5f;
|
||||
testAccumulateTimeWithParameters(startFrame, endFrame, timeScale);
|
||||
|
||||
startFrame = 5.0f;
|
||||
endFrame = 15.0f;
|
||||
timeScale = 2.0f;
|
||||
testAccumulateTimeWithParameters(startFrame, endFrame, timeScale);
|
||||
}
|
||||
|
||||
void AnimTests::testAccumulateTimeWithParameters(float startFrame, float endFrame, float timeScale) const {
|
||||
|
||||
float dt = (1.0f / 30.0f) / timeScale; // sec
|
||||
QString id = "testNode";
|
||||
AnimNode::Triggers triggers;
|
||||
bool loopFlag = false;
|
||||
|
||||
float resultFrame = accumulateTime(startFrame, endFrame, timeScale, startFrame, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == startFrame + 1.0f);
|
||||
QVERIFY(triggers.empty());
|
||||
triggers.clear();
|
||||
|
||||
resultFrame = accumulateTime(startFrame, endFrame, timeScale, resultFrame, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == startFrame + 2.0f);
|
||||
QVERIFY(triggers.empty());
|
||||
triggers.clear();
|
||||
|
||||
resultFrame = accumulateTime(startFrame, endFrame, timeScale, resultFrame, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == startFrame + 3.0f);
|
||||
QVERIFY(triggers.empty());
|
||||
triggers.clear();
|
||||
|
||||
// test onDone trigger and frame clamping.
|
||||
resultFrame = accumulateTime(startFrame, endFrame, timeScale, endFrame - 1.0f, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == endFrame);
|
||||
QVERIFY(!triggers.empty() && triggers[0] == "testNodeOnDone");
|
||||
triggers.clear();
|
||||
|
||||
resultFrame = accumulateTime(startFrame, endFrame, timeScale, endFrame - 0.5f, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == endFrame);
|
||||
QVERIFY(!triggers.empty() && triggers[0] == "testNodeOnDone");
|
||||
triggers.clear();
|
||||
|
||||
// test onLoop trigger and looping frame logic
|
||||
loopFlag = true;
|
||||
|
||||
// should NOT trigger loop even though we stop at last frame, because there is an extra frame between end and start frames.
|
||||
resultFrame = accumulateTime(startFrame, endFrame, timeScale, endFrame - 1.0f, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == endFrame);
|
||||
QVERIFY(triggers.empty());
|
||||
triggers.clear();
|
||||
|
||||
// now we should hit loop trigger
|
||||
resultFrame = accumulateTime(startFrame, endFrame, timeScale, resultFrame, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == startFrame);
|
||||
QVERIFY(!triggers.empty() && triggers[0] == "testNodeOnLoop");
|
||||
triggers.clear();
|
||||
|
||||
// should NOT trigger loop, even though we move past the end frame, because of extra frame between end and start.
|
||||
resultFrame = accumulateTime(startFrame, endFrame, timeScale, endFrame - 0.5f, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == endFrame + 0.5f);
|
||||
QVERIFY(triggers.empty());
|
||||
triggers.clear();
|
||||
|
||||
// now we should hit loop trigger
|
||||
resultFrame = accumulateTime(startFrame, endFrame, timeScale, resultFrame, dt, loopFlag, id, triggers);
|
||||
QVERIFY(resultFrame == startFrame + 0.5f);
|
||||
QVERIFY(!triggers.empty() && triggers[0] == "testNodeOnLoop");
|
||||
triggers.clear();
|
||||
}
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
|
||||
class AnimTests : public QObject {
|
||||
Q_OBJECT
|
||||
public:
|
||||
void testAccumulateTimeWithParameters(float startFrame, float endFrame, float timeScale) const;
|
||||
private slots:
|
||||
void initTestCase();
|
||||
void cleanupTestCase();
|
||||
|
@ -23,6 +25,7 @@ private slots:
|
|||
void testClipEvaulateWithVars();
|
||||
void testLoader();
|
||||
void testVariant();
|
||||
void testAccumulateTime();
|
||||
};
|
||||
|
||||
#endif // hifi_AnimTests_h
|
||||
|
|
|
@ -114,7 +114,6 @@
|
|||
createTargets();
|
||||
createTargetResetter();
|
||||
|
||||
createBasketballHoop();
|
||||
createBasketballRack();
|
||||
createBasketballResetter();
|
||||
|
||||
|
@ -130,14 +129,11 @@
|
|||
z: 503.49
|
||||
});
|
||||
|
||||
|
||||
createSprayCan({
|
||||
x: 549.7,
|
||||
y: 495.6,
|
||||
z: 503.91
|
||||
});
|
||||
|
||||
|
||||
}
|
||||
|
||||
function deleteAllToys() {
|
||||
|
@ -930,45 +926,6 @@
|
|||
});
|
||||
}
|
||||
|
||||
function createBasketballHoop() {
|
||||
var position = {
|
||||
x: 539.23,
|
||||
y: 496.13,
|
||||
z: 475.89
|
||||
};
|
||||
var rotation = Quat.fromPitchYawRollDegrees(0, 58.49, 0);
|
||||
|
||||
var hoopURL = "http://hifi-public.s3.amazonaws.com/models/basketball_hoop/basketball_hoop.fbx";
|
||||
var hoopCollisionHullURL = "http://hifi-public.s3.amazonaws.com/models/basketball_hoop/basketball_hoop_collision_hull.obj";
|
||||
|
||||
var hoop = Entities.addEntity({
|
||||
type: "Model",
|
||||
modelURL: hoopURL,
|
||||
position: position,
|
||||
rotation: rotation,
|
||||
shapeType: 'compound',
|
||||
gravity: {
|
||||
x: 0,
|
||||
y: -9.8,
|
||||
z: 0
|
||||
},
|
||||
dimensions: {
|
||||
x: 1.89,
|
||||
y: 3.99,
|
||||
z: 3.79
|
||||
},
|
||||
compoundShapeURL: hoopCollisionHullURL,
|
||||
userData: JSON.stringify({
|
||||
resetMe: {
|
||||
resetMe: true
|
||||
},
|
||||
grabbableKey: {
|
||||
grabbable: false
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
function createWand(position) {
|
||||
var WAND_MODEL = 'http://hifi-public.s3.amazonaws.com/james/bubblewand/models/wand/wand.fbx';
|
||||
var WAND_COLLISION_SHAPE = 'http://hifi-public.s3.amazonaws.com/james/bubblewand/models/wand/actual_no_top_collision_hull.obj';
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
//per script
|
||||
|
||||
|
||||
/*global deleteAllToys, createAllToys, createGates, createPingPongBallGun, createFire, createPottedPlant, createCombinedArmChair, createBasketballHoop, createBasketBall, createSprayCan, createDoll, createWand, createDice, createCat, deleteAllToys, createFlashlight, createBlocks, createMagballs, createLights */
|
||||
/*global deleteAllToys, createAllToys, createGates, createPingPongBallGun, createFire, createPottedPlant, createCombinedArmChair, createBasketBall, createSprayCan, createDoll, createWand, createDice, createCat, deleteAllToys, createFlashlight, createBlocks, createMagballs, createLights */
|
||||
var utilitiesScript = Script.resolvePath("../examples/libraries/utils.js");
|
||||
Script.include(utilitiesScript);
|
||||
|
||||
|
@ -87,7 +87,6 @@ MasterReset = function() {
|
|||
createTargets();
|
||||
createTargetResetter();
|
||||
|
||||
createBasketballHoop();
|
||||
createBasketballRack();
|
||||
createBasketballResetter();
|
||||
|
||||
|
@ -908,45 +907,6 @@ MasterReset = function() {
|
|||
});
|
||||
}
|
||||
|
||||
function createBasketballHoop() {
|
||||
var position = {
|
||||
x: 539.23,
|
||||
y: 496.13,
|
||||
z: 475.89
|
||||
};
|
||||
var rotation = Quat.fromPitchYawRollDegrees(0, 58.49, 0);
|
||||
|
||||
var hoopURL = "http://hifi-public.s3.amazonaws.com/models/basketball_hoop/basketball_hoop.fbx";
|
||||
var hoopCollisionHullURL = "http://hifi-public.s3.amazonaws.com/models/basketball_hoop/basketball_hoop_collision_hull.obj";
|
||||
|
||||
var hoop = Entities.addEntity({
|
||||
type: "Model",
|
||||
modelURL: hoopURL,
|
||||
position: position,
|
||||
rotation: rotation,
|
||||
shapeType: 'compound',
|
||||
gravity: {
|
||||
x: 0,
|
||||
y: -9.8,
|
||||
z: 0
|
||||
},
|
||||
dimensions: {
|
||||
x: 1.89,
|
||||
y: 3.99,
|
||||
z: 3.79
|
||||
},
|
||||
compoundShapeURL: hoopCollisionHullURL,
|
||||
userData: JSON.stringify({
|
||||
resetMe: {
|
||||
resetMe: true
|
||||
},
|
||||
grabbableKey: {
|
||||
grabbable: false
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
function createWand(position) {
|
||||
var WAND_MODEL = 'http://hifi-public.s3.amazonaws.com/james/bubblewand/models/wand/wand.fbx';
|
||||
var WAND_COLLISION_SHAPE = 'http://hifi-public.s3.amazonaws.com/james/bubblewand/models/wand/actual_no_top_collision_hull.obj';
|
||||
|
|
Loading…
Reference in a new issue