Merge pull request from hyperlogic/feature/aim-offset-blend

Additive animation support
This commit is contained in:
Shannon Romano 2019-09-04 16:55:50 -07:00 committed by GitHub
commit 96f6793a87
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 322 additions and 118 deletions

View file

@ -14,9 +14,10 @@
#include "AnimUtil.h"
#include "AnimClip.h"
AnimBlendLinear::AnimBlendLinear(const QString& id, float alpha) :
AnimBlendLinear::AnimBlendLinear(const QString& id, float alpha, AnimBlendType blendType) :
AnimNode(AnimNode::Type::BlendLinear, id),
_alpha(alpha) {
_alpha(alpha),
_blendType(blendType) {
}
@ -36,6 +37,19 @@ const AnimPoseVec& AnimBlendLinear::evaluate(const AnimVariantMap& animVars, con
} else if (_children.size() == 1) {
_poses = _children[0]->evaluate(animVars, context, dt, triggersOut);
context.setDebugAlpha(_children[0]->getID(), parentDebugAlpha, _children[0]->getType());
} else if (_children.size() == 2 && _blendType != AnimBlendType_Normal) {
// special case for additive blending
float alpha = glm::clamp(_alpha, 0.0f, 1.0f);
const size_t prevPoseIndex = 0;
const size_t nextPoseIndex = 1;
evaluateAndBlendChildren(animVars, context, triggersOut, alpha, prevPoseIndex, nextPoseIndex, dt);
// for animation stack debugging
float weight2 = alpha;
float weight1 = 1.0f - weight2;
context.setDebugAlpha(_children[prevPoseIndex]->getID(), weight1 * parentDebugAlpha, _children[prevPoseIndex]->getType());
context.setDebugAlpha(_children[nextPoseIndex]->getID(), weight2 * parentDebugAlpha, _children[nextPoseIndex]->getType());
} else {
float clampedAlpha = glm::clamp(_alpha, 0.0f, (float)(_children.size() - 1));
size_t prevPoseIndex = glm::floor(clampedAlpha);
@ -79,7 +93,35 @@ void AnimBlendLinear::evaluateAndBlendChildren(const AnimVariantMap& animVars, c
if (prevPoses.size() > 0 && prevPoses.size() == nextPoses.size()) {
_poses.resize(prevPoses.size());
::blend(_poses.size(), &prevPoses[0], &nextPoses[0], alpha, &_poses[0]);
if (_blendType == AnimBlendType_Normal) {
::blend(_poses.size(), &prevPoses[0], &nextPoses[0], alpha, &_poses[0]);
} else if (_blendType == AnimBlendType_AddRelative) {
::blendAdd(_poses.size(), &prevPoses[0], &nextPoses[0], alpha, &_poses[0]);
} else if (_blendType == AnimBlendType_AddAbsolute) {
// convert prev from relative to absolute
AnimPoseVec absPrev = prevPoses;
_skeleton->convertRelativePosesToAbsolute(absPrev);
// rotate the offset rotations from next into the parent relative frame of each joint.
AnimPoseVec relOffsetPoses;
relOffsetPoses.reserve(nextPoses.size());
for (size_t i = 0; i < nextPoses.size(); ++i) {
// copy translation and scale from nextPoses
AnimPose pose = nextPoses[i];
int parentIndex = _skeleton->getParentIndex((int)i);
if (parentIndex >= 0) {
// but transform nextPoses rot into absPrev parent frame.
pose.rot() = glm::inverse(absPrev[parentIndex].rot()) * pose.rot() * absPrev[parentIndex].rot();
}
relOffsetPoses.push_back(pose);
}
// then blend
::blendAdd(_poses.size(), &prevPoses[0], &relOffsetPoses[0], alpha, &_poses[0]);
}
}
}
}

View file

@ -27,7 +27,7 @@ class AnimBlendLinear : public AnimNode {
public:
friend class AnimTests;
AnimBlendLinear(const QString& id, float alpha);
AnimBlendLinear(const QString& id, float alpha, AnimBlendType blendType);
virtual ~AnimBlendLinear() override;
virtual const AnimPoseVec& evaluate(const AnimVariantMap& animVars, const AnimContext& context, float dt, AnimVariantMap& triggersOut) override;
@ -44,6 +44,7 @@ protected:
AnimPoseVec _poses;
float _alpha;
AnimBlendType _blendType;
QString _alphaVar;

View file

@ -16,100 +16,6 @@
#include "AnimationLogging.h"
#include "AnimUtil.h"
AnimClip::AnimClip(const QString& id, const QString& url, float startFrame, float endFrame, float timeScale, bool loopFlag, bool mirrorFlag) :
AnimNode(AnimNode::Type::Clip, id),
_startFrame(startFrame),
_endFrame(endFrame),
_timeScale(timeScale),
_loopFlag(loopFlag),
_mirrorFlag(mirrorFlag),
_frame(startFrame)
{
loadURL(url);
}
AnimClip::~AnimClip() {
}
const AnimPoseVec& AnimClip::evaluate(const AnimVariantMap& animVars, const AnimContext& context, float dt, AnimVariantMap& triggersOut) {
// lookup parameters from animVars, using current instance variables as defaults.
_startFrame = animVars.lookup(_startFrameVar, _startFrame);
_endFrame = animVars.lookup(_endFrameVar, _endFrame);
_timeScale = animVars.lookup(_timeScaleVar, _timeScale);
_loopFlag = animVars.lookup(_loopFlagVar, _loopFlag);
_mirrorFlag = animVars.lookup(_mirrorFlagVar, _mirrorFlag);
float frame = animVars.lookup(_frameVar, _frame);
_frame = ::accumulateTime(_startFrame, _endFrame, _timeScale, frame, dt, _loopFlag, _id, triggersOut);
// poll network anim to see if it's finished loading yet.
if (_networkAnim && _networkAnim->isLoaded() && _skeleton) {
// loading is complete, copy animation frames from network animation, then throw it away.
copyFromNetworkAnim();
_networkAnim.reset();
}
if (_anim.size()) {
// lazy creation of mirrored animation frames.
if (_mirrorFlag && _anim.size() != _mirrorAnim.size()) {
buildMirrorAnim();
}
int prevIndex = (int)glm::floor(_frame);
int nextIndex;
if (_loopFlag && _frame >= _endFrame) {
nextIndex = (int)glm::ceil(_startFrame);
} else {
nextIndex = (int)glm::ceil(_frame);
}
// It can be quite possible for the user to set _startFrame and _endFrame to
// values before or past valid ranges. We clamp the frames here.
int frameCount = (int)_anim.size();
prevIndex = std::min(std::max(0, prevIndex), frameCount - 1);
nextIndex = std::min(std::max(0, nextIndex), frameCount - 1);
const AnimPoseVec& prevFrame = _mirrorFlag ? _mirrorAnim[prevIndex] : _anim[prevIndex];
const AnimPoseVec& nextFrame = _mirrorFlag ? _mirrorAnim[nextIndex] : _anim[nextIndex];
float alpha = glm::fract(_frame);
::blend(_poses.size(), &prevFrame[0], &nextFrame[0], alpha, &_poses[0]);
}
processOutputJoints(triggersOut);
return _poses;
}
void AnimClip::loadURL(const QString& url) {
auto animCache = DependencyManager::get<AnimationCache>();
_networkAnim = animCache->getAnimation(url);
_url = url;
}
void AnimClip::setCurrentFrameInternal(float frame) {
// because dt is 0, we should not encounter any triggers
const float dt = 0.0f;
AnimVariantMap triggers;
_frame = ::accumulateTime(_startFrame, _endFrame, _timeScale, frame + _startFrame, dt, _loopFlag, _id, triggers);
}
static std::vector<int> buildJointIndexMap(const AnimSkeleton& dstSkeleton, const AnimSkeleton& srcSkeleton) {
std::vector<int> jointIndexMap;
int srcJointCount = srcSkeleton.getNumJoints();
jointIndexMap.reserve(srcJointCount);
for (int srcJointIndex = 0; srcJointIndex < srcJointCount; srcJointIndex++) {
QString srcJointName = srcSkeleton.getJointName(srcJointIndex);
int dstJointIndex = dstSkeleton.nameToJointIndex(srcJointName);
jointIndexMap.push_back(dstJointIndex);
}
return jointIndexMap;
}
#ifdef USE_CUSTOM_ASSERT
#undef ASSERT
#define ASSERT(x) \
@ -123,12 +29,73 @@ static std::vector<int> buildJointIndexMap(const AnimSkeleton& dstSkeleton, cons
#define ASSERT assert
#endif
void AnimClip::copyFromNetworkAnim() {
assert(_networkAnim && _networkAnim->isLoaded() && _skeleton);
_anim.clear();
static std::vector<int> buildJointIndexMap(const AnimSkeleton& dstSkeleton, const AnimSkeleton& srcSkeleton) {
std::vector<int> jointIndexMap;
int srcJointCount = srcSkeleton.getNumJoints();
jointIndexMap.reserve(srcJointCount);
for (int srcJointIndex = 0; srcJointIndex < srcJointCount; srcJointIndex++) {
QString srcJointName = srcSkeleton.getJointName(srcJointIndex);
int dstJointIndex = dstSkeleton.nameToJointIndex(srcJointName);
jointIndexMap.push_back(dstJointIndex);
}
return jointIndexMap;
}
auto avatarSkeleton = getSkeleton();
const HFMModel& animModel = _networkAnim->getHFMModel();
static void bakeRelativeDeltaAnim(std::vector<AnimPoseVec>& anim, const AnimPoseVec& basePoses) {
// invert all the basePoses
AnimPoseVec invBasePoses = basePoses;
for (auto&& invBasePose : invBasePoses) {
invBasePose = invBasePose.inverse();
}
// for each frame of the animation
for (auto&& animPoses : anim) {
ASSERT(animPoses.size() == basePoses.size());
// for each joint in animPoses
for (size_t i = 0; i < animPoses.size(); ++i) {
// convert this relative AnimPose into a delta animation.
animPoses[i] = animPoses[i] * invBasePoses[i];
}
}
}
void bakeAbsoluteDeltaAnim(std::vector<AnimPoseVec>& anim, const AnimPoseVec& basePoses, AnimSkeleton::ConstPointer skeleton) {
// invert all the basePoses
AnimPoseVec invBasePoses = basePoses;
for (auto&& invBasePose : invBasePoses) {
invBasePose = invBasePose.inverse();
}
AnimPoseVec absBasePoses = basePoses;
skeleton->convertRelativePosesToAbsolute(absBasePoses);
// for each frame of the animation
for (auto&& animPoses : anim) {
ASSERT(animPoses.size() == basePoses.size());
// for each joint in animPoses
for (size_t i = 0; i < animPoses.size(); ++i) {
// scale and translation are relative frame
animPoses[i] = animPoses[i] * invBasePoses[i];
// but transform the rotation delta into the absolute frame.
int parentIndex = skeleton->getParentIndex((int)i);
if (parentIndex >= 0) {
animPoses[i].rot() = absBasePoses[parentIndex].rot() * animPoses[i].rot() * glm::inverse(absBasePoses[parentIndex].rot());
}
}
}
}
static std::vector<AnimPoseVec> copyAndRetargetFromNetworkAnim(AnimationPointer networkAnim, AnimSkeleton::ConstPointer avatarSkeleton) {
ASSERT(networkAnim && networkAnim->isLoaded() && avatarSkeleton);
std::vector<AnimPoseVec> anim;
const HFMModel& animModel = networkAnim->getHFMModel();
AnimSkeleton animSkeleton(animModel);
const int animJointCount = animSkeleton.getNumJoints();
const int avatarJointCount = avatarSkeleton->getNumJoints();
@ -137,7 +104,7 @@ void AnimClip::copyFromNetworkAnim() {
std::vector<int> avatarToAnimJointIndexMap = buildJointIndexMap(animSkeleton, *avatarSkeleton);
const int animFrameCount = animModel.animationFrames.size();
_anim.resize(animFrameCount);
anim.resize(animFrameCount);
// find the size scale factor for translation in the animation.
float boneLengthScale = 1.0f;
@ -223,8 +190,8 @@ void AnimClip::copyFromNetworkAnim() {
// convert avatar rotations into relative frame
avatarSkeleton->convertAbsoluteRotationsToRelative(avatarRotations);
ASSERT(frame >= 0 && frame < (int)_anim.size());
_anim[frame].reserve(avatarJointCount);
ASSERT(frame >= 0 && frame < (int)anim.size());
anim[frame].reserve(avatarJointCount);
for (int avatarJointIndex = 0; avatarJointIndex < avatarJointCount; avatarJointIndex++) {
const AnimPose& avatarDefaultPose = avatarSkeleton->getRelativeDefaultPose(avatarJointIndex);
@ -251,14 +218,129 @@ void AnimClip::copyFromNetworkAnim() {
// build the final pose
ASSERT(avatarJointIndex >= 0 && avatarJointIndex < (int)avatarRotations.size());
_anim[frame].push_back(AnimPose(relativeScale, avatarRotations[avatarJointIndex], relativeTranslation));
anim[frame].push_back(AnimPose(relativeScale, avatarRotations[avatarJointIndex], relativeTranslation));
}
}
// mirrorAnim will be re-built on demand, if needed.
_mirrorAnim.clear();
return anim;
}
_poses.resize(avatarJointCount);
AnimClip::AnimClip(const QString& id, const QString& url, float startFrame, float endFrame, float timeScale, bool loopFlag, bool mirrorFlag,
AnimBlendType blendType, const QString& baseURL, float baseFrame) :
AnimNode(AnimNode::Type::Clip, id),
_startFrame(startFrame),
_endFrame(endFrame),
_timeScale(timeScale),
_loopFlag(loopFlag),
_mirrorFlag(mirrorFlag),
_frame(startFrame),
_blendType(blendType),
_baseFrame(baseFrame)
{
loadURL(url);
if (blendType != AnimBlendType_Normal) {
auto animCache = DependencyManager::get<AnimationCache>();
_baseNetworkAnim = animCache->getAnimation(baseURL);
_baseURL = baseURL;
}
}
AnimClip::~AnimClip() {
}
const AnimPoseVec& AnimClip::evaluate(const AnimVariantMap& animVars, const AnimContext& context, float dt, AnimVariantMap& triggersOut) {
// lookup parameters from animVars, using current instance variables as defaults.
_startFrame = animVars.lookup(_startFrameVar, _startFrame);
_endFrame = animVars.lookup(_endFrameVar, _endFrame);
_timeScale = animVars.lookup(_timeScaleVar, _timeScale);
_loopFlag = animVars.lookup(_loopFlagVar, _loopFlag);
_mirrorFlag = animVars.lookup(_mirrorFlagVar, _mirrorFlag);
float frame = animVars.lookup(_frameVar, _frame);
_frame = ::accumulateTime(_startFrame, _endFrame, _timeScale, frame, dt, _loopFlag, _id, triggersOut);
// poll network anim to see if it's finished loading yet.
if (_blendType == AnimBlendType_Normal) {
if (_networkAnim && _networkAnim->isLoaded() && _skeleton) {
// loading is complete, copy & retarget animation.
_anim = copyAndRetargetFromNetworkAnim(_networkAnim, _skeleton);
// we no longer need the actual animation resource anymore.
_networkAnim.reset();
// mirrorAnim will be re-built on demand, if needed.
_mirrorAnim.clear();
_poses.resize(_skeleton->getNumJoints());
}
} else {
// an additive blend type
if (_networkAnim && _networkAnim->isLoaded() && _baseNetworkAnim && _baseNetworkAnim->isLoaded() && _skeleton) {
// loading is complete, copy & retarget animation.
_anim = copyAndRetargetFromNetworkAnim(_networkAnim, _skeleton);
// we no longer need the actual animation resource anymore.
_networkAnim.reset();
// mirrorAnim will be re-built on demand, if needed.
// TODO: handle mirrored relative animations.
_mirrorAnim.clear();
_poses.resize(_skeleton->getNumJoints());
// copy & retarget baseAnim!
auto baseAnim = copyAndRetargetFromNetworkAnim(_baseNetworkAnim, _skeleton);
if (_blendType == AnimBlendType_AddAbsolute) {
bakeAbsoluteDeltaAnim(_anim, baseAnim[(int)_baseFrame], _skeleton);
} else {
// AnimBlendType_AddRelative
bakeRelativeDeltaAnim(_anim, baseAnim[(int)_baseFrame]);
}
}
}
if (_anim.size()) {
// lazy creation of mirrored animation frames.
if (_mirrorFlag && _anim.size() != _mirrorAnim.size()) {
buildMirrorAnim();
}
int prevIndex = (int)glm::floor(_frame);
int nextIndex;
if (_loopFlag && _frame >= _endFrame) {
nextIndex = (int)glm::ceil(_startFrame);
} else {
nextIndex = (int)glm::ceil(_frame);
}
// It can be quite possible for the user to set _startFrame and _endFrame to
// values before or past valid ranges. We clamp the frames here.
int frameCount = (int)_anim.size();
prevIndex = std::min(std::max(0, prevIndex), frameCount - 1);
nextIndex = std::min(std::max(0, nextIndex), frameCount - 1);
const AnimPoseVec& prevFrame = _mirrorFlag ? _mirrorAnim[prevIndex] : _anim[prevIndex];
const AnimPoseVec& nextFrame = _mirrorFlag ? _mirrorAnim[nextIndex] : _anim[nextIndex];
float alpha = glm::fract(_frame);
::blend(_poses.size(), &prevFrame[0], &nextFrame[0], alpha, &_poses[0]);
}
processOutputJoints(triggersOut);
return _poses;
}
void AnimClip::setCurrentFrameInternal(float frame) {
// because dt is 0, we should not encounter any triggers
const float dt = 0.0f;
AnimVariantMap triggers;
_frame = ::accumulateTime(_startFrame, _endFrame, _timeScale, frame + _startFrame, dt, _loopFlag, _id, triggers);
}
void AnimClip::buildMirrorAnim() {
@ -275,3 +357,9 @@ void AnimClip::buildMirrorAnim() {
const AnimPoseVec& AnimClip::getPosesInternal() const {
return _poses;
}
void AnimClip::loadURL(const QString& url) {
auto animCache = DependencyManager::get<AnimationCache>();
_networkAnim = animCache->getAnimation(url);
_url = url;
}

View file

@ -25,7 +25,8 @@ class AnimClip : public AnimNode {
public:
friend class AnimTests;
AnimClip(const QString& id, const QString& url, float startFrame, float endFrame, float timeScale, bool loopFlag, bool mirrorFlag);
AnimClip(const QString& id, const QString& url, float startFrame, float endFrame, float timeScale, bool loopFlag, bool mirrorFlag,
AnimBlendType blendType, const QString& baseURL, float baseFrame);
virtual ~AnimClip() override;
virtual const AnimPoseVec& evaluate(const AnimVariantMap& animVars, const AnimContext& context, float dt, AnimVariantMap& triggersOut) override;
@ -52,19 +53,20 @@ public:
void setMirrorFlag(bool mirrorFlag) { _mirrorFlag = mirrorFlag; }
float getFrame() const { return _frame; }
void loadURL(const QString& url);
protected:
virtual void setCurrentFrameInternal(float frame) override;
void copyFromNetworkAnim();
void buildMirrorAnim();
// for AnimDebugDraw rendering
virtual const AnimPoseVec& getPosesInternal() const override;
AnimationPointer _networkAnim;
AnimationPointer _baseNetworkAnim;
AnimPoseVec _poses;
// _anim[frame][joint]
@ -78,6 +80,9 @@ protected:
bool _loopFlag;
bool _mirrorFlag;
float _frame;
AnimBlendType _blendType;
QString _baseURL;
float _baseFrame;
QString _startFrameVar;
QString _endFrameVar;

View file

@ -34,6 +34,13 @@ enum class AnimNodeType {
NumTypes
};
enum AnimBlendType {
AnimBlendType_Normal,
AnimBlendType_AddRelative,
AnimBlendType_AddAbsolute,
AnimBlendType_NumTypes
};
class AnimContext {
public:
AnimContext() {}

View file

@ -161,6 +161,19 @@ static EasingType stringToEasingType(const QString& str) {
}
}
static AnimBlendType stringToAnimBlendType(const QString& str) {
if (str == "normal") {
return AnimBlendType_Normal;
} else if (str == "addRelative") {
return AnimBlendType_AddRelative;
} else if (str == "addAbsolute") {
return AnimBlendType_AddAbsolute;
} else {
return AnimBlendType_NumTypes;
}
}
static const char* animManipulatorJointVarTypeToString(AnimManipulator::JointVar::Type type) {
switch (type) {
case AnimManipulator::JointVar::Type::Absolute: return "absolute";
@ -374,6 +387,9 @@ static AnimNode::Pointer loadClipNode(const QJsonObject& jsonObj, const QString&
READ_FLOAT(timeScale, jsonObj, id, jsonUrl, nullptr);
READ_BOOL(loopFlag, jsonObj, id, jsonUrl, nullptr);
READ_OPTIONAL_BOOL(mirrorFlag, jsonObj, false);
READ_OPTIONAL_STRING(blendType, jsonObj);
READ_OPTIONAL_STRING(baseURL, jsonObj);
READ_OPTIONAL_FLOAT(baseFrame, jsonObj, 0.0f);
READ_OPTIONAL_STRING(startFrameVar, jsonObj);
READ_OPTIONAL_STRING(endFrameVar, jsonObj);
@ -381,11 +397,22 @@ static AnimNode::Pointer loadClipNode(const QJsonObject& jsonObj, const QString&
READ_OPTIONAL_STRING(loopFlagVar, jsonObj);
READ_OPTIONAL_STRING(mirrorFlagVar, jsonObj);
// animation urls can be relative to the containing url document.
auto tempUrl = QUrl(url);
tempUrl = jsonUrl.resolved(tempUrl);
auto node = std::make_shared<AnimClip>(id, tempUrl.toString(), startFrame, endFrame, timeScale, loopFlag, mirrorFlag);
// AJT:
AnimBlendType blendTypeEnum = AnimBlendType_Normal; // default value
if (!blendType.isEmpty()) {
blendTypeEnum = stringToAnimBlendType(blendType);
if (blendTypeEnum == AnimBlendType_NumTypes) {
qCCritical(animation) << "AnimNodeLoader, bad blendType on clip, id = " << id;
return nullptr;
}
}
auto node = std::make_shared<AnimClip>(id, tempUrl.toString(), startFrame, endFrame, timeScale, loopFlag, mirrorFlag, blendTypeEnum, baseURL, baseFrame);
if (!startFrameVar.isEmpty()) {
node->setStartFrameVar(startFrameVar);
@ -409,10 +436,19 @@ static AnimNode::Pointer loadClipNode(const QJsonObject& jsonObj, const QString&
static AnimNode::Pointer loadBlendLinearNode(const QJsonObject& jsonObj, const QString& id, const QUrl& jsonUrl) {
READ_FLOAT(alpha, jsonObj, id, jsonUrl, nullptr);
READ_OPTIONAL_STRING(blendType, jsonObj);
READ_OPTIONAL_STRING(alphaVar, jsonObj);
auto node = std::make_shared<AnimBlendLinear>(id, alpha);
AnimBlendType blendTypeEnum = AnimBlendType_Normal; // default value
if (!blendType.isEmpty()) {
blendTypeEnum = stringToAnimBlendType(blendType);
if (blendTypeEnum == AnimBlendType_NumTypes) {
qCCritical(animation) << "AnimNodeLoader, bad blendType on blendLinear, id = " << id;
return nullptr;
}
}
auto node = std::make_shared<AnimBlendLinear>(id, alpha, blendTypeEnum);
if (!alphaVar.isEmpty()) {
node->setAlphaVar(alphaVar);

View file

@ -15,7 +15,6 @@
// TODO: use restrict keyword
// TODO: excellent candidate for simd vectorization.
void blend(size_t numPoses, const AnimPose* a, const AnimPose* b, float alpha, AnimPose* result) {
for (size_t i = 0; i < numPoses; i++) {
const AnimPose& aPose = a[i];
@ -27,6 +26,29 @@ void blend(size_t numPoses, const AnimPose* a, const AnimPose* b, float alpha, A
}
}
// additive blend
void blendAdd(size_t numPoses, const AnimPose* a, const AnimPose* b, float alpha, AnimPose* result) {
const glm::quat identity = glm::quat();
for (size_t i = 0; i < numPoses; i++) {
const AnimPose& aPose = a[i];
const AnimPose& bPose = b[i];
result[i].scale() = lerp(aPose.scale(), bPose.scale(), alpha);
// ensure that delta has the same "polarity" as the identity quat.
// we don't need to do a full dot product, just sign of w is sufficient.
glm::quat delta = bPose.rot();
if (delta.w < 0.0f) {
delta = -delta;
}
delta = glm::lerp(identity, delta, alpha);
result[i].rot() = glm::normalize(delta * aPose.rot());
result[i].trans() = aPose.trans() + (alpha * bPose.trans());
}
}
glm::quat averageQuats(size_t numQuats, const glm::quat* quats) {
if (numQuats == 0) {
return glm::quat();

View file

@ -16,6 +16,9 @@
// this is where the magic happens
void blend(size_t numPoses, const AnimPose* a, const AnimPose* b, float alpha, AnimPose* result);
// additive blending
void blendAdd(size_t numPoses, const AnimPose* a, const AnimPose* b, float alpha, AnimPose* result);
glm::quat averageQuats(size_t numQuats, const glm::quat* quats);
float accumulateTime(float startFrame, float endFrame, float timeScale, float currentFrame, float dt, bool loopFlag,

View file

@ -564,7 +564,7 @@ void Rig::overrideRoleAnimation(const QString& role, const QString& url, float f
_origRoleAnimations[role] = node;
const float REFERENCE_FRAMES_PER_SECOND = 30.0f;
float timeScale = fps / REFERENCE_FRAMES_PER_SECOND;
auto clipNode = std::make_shared<AnimClip>(role, url, firstFrame, lastFrame, timeScale, loop, false);
auto clipNode = std::make_shared<AnimClip>(role, url, firstFrame, lastFrame, timeScale, loop, false, AnimBlendType_Normal, "", 0.0f);
_roleAnimStates[role] = { role, url, fps, loop, firstFrame, lastFrame };
AnimNode::Pointer parent = node->getParent();
parent->replaceChild(node, clipNode);