This commit is contained in:
Leonardo Murillo 2015-11-27 14:04:42 -06:00
commit 6ab8821725
55 changed files with 957 additions and 1970 deletions

View file

@ -18,8 +18,8 @@ if (WIN32)
ExternalProject_Add(
${EXTERNAL_NAME}
# URL https://bullet.googlecode.com/files/bullet-2.82-r2704.zip
URL http://hifi-public.s3.amazonaws.com/dependencies/bullet-2.82-r2704.zip
URL_MD5 f5e8914fc9064ad32e0d62d19d33d977
URL http://hifi-public.s3.amazonaws.com/dependencies/bullet-2.82-ccd-fix.zip
URL_MD5 d95b07eb120de7dd7786361c0b5a8d9f
CMAKE_ARGS ${PLATFORM_CMAKE_ARGS} -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR> -DBUILD_EXTRAS=0 -DINSTALL_LIBS=1 -DBUILD_DEMOS=0 -DUSE_GLUT=0 -DUSE_DX11=0
LOG_DOWNLOAD 1
LOG_CONFIGURE 1
@ -30,8 +30,8 @@ else ()
ExternalProject_Add(
${EXTERNAL_NAME}
#URL http://bullet.googlecode.com/files/bullet-2.82-r2704.tgz
URL http://hifi-public.s3.amazonaws.com/dependencies/bullet-2.82-r2704.tgz
URL_MD5 70b3c8d202dee91a0854b4cbc88173e8
URL http://hifi-public.s3.amazonaws.com/dependencies/bullet-2.82-ccd-fix.tgz
URL_MD5 fb140a4983b4109aa1c825a162aa8d64
CMAKE_ARGS ${PLATFORM_CMAKE_ARGS} -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR> -DBUILD_EXTRAS=0 -DINSTALL_LIBS=1 -DBUILD_DEMOS=0 -DUSE_GLUT=0
LOG_DOWNLOAD 1
LOG_CONFIGURE 1
@ -80,4 +80,4 @@ endif ()
if (DEFINED ${EXTERNAL_NAME_UPPER}_DYNAMICS_LIBRARY_RELEASE)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIR ${INSTALL_DIR}/include/bullet CACHE PATH "Path to bullet include directory")
endif ()
endif ()

View file

@ -14,16 +14,17 @@
// An assignment client script that animates one avatar at random location within 'spread' meters of 'origin'.
// In Domain Server Settings, go to scripts and give the url of this script. Press '+', and then 'Save and restart'.
var origin = {x: 500, y: 502, z: 500};
var spread = 10; // meters
var origin = {x: 500, y: 500, z: 500};
var spread = 20; // meters
var animationData = {url: "https://hifi-public.s3.amazonaws.com/ozan/anim/standard_anims/walk_fwd.fbx", lastFrame: 35};
Avatar.skeletonModelURL = "https://hifi-public.s3.amazonaws.com/marketplace/contents/dd03b8e3-52fb-4ab3-9ac9-3b17e00cd85d/98baa90b3b66803c5d7bd4537fca6993.fst"; //lovejoy
Avatar.displayName = "'Bot";
var millisecondsToWaitBeforeStarting = 10 * 1000; // To give the various servers a chance to start.
Agent.isAvatar = true;
function coord() { return (Math.random() * spread) - (spread / 2); } // randomly distribute a coordinate zero += spread/2.
Script.setTimeout(function () {
Avatar.position = Vec3.sum(origin, {x: Math.random() * spread, y: 0, z: Math.random() * spread});
Avatar.position = Vec3.sum(origin, {x: coord(), y: 0, z: coord()});
print("Starting at", JSON.stringify(Avatar.position));
Avatar.startAnimation(animationData.url, animationData.fps || 30, 1, true, false, animationData.firstFrame || 0, animationData.lastFrame);
}, millisecondsToWaitBeforeStarting);

View file

@ -25,8 +25,6 @@ var LAST_FRAME = 15.0; // What is the number of the last frame we want to us
var SMOOTH_FACTOR = 0.75;
var MAX_FRAMES = 30.0;
var LEFT_HAND_CLICK = Controller.findAction("LEFT_HAND_CLICK");
var RIGHT_HAND_CLICK = Controller.findAction("RIGHT_HAND_CLICK");
var CONTROLLER_DEAD_SPOT = 0.25;
@ -45,8 +43,8 @@ function normalizeControllerValue(val) {
}
Script.update.connect(function(deltaTime) {
var leftTrigger = normalizeControllerValue(Controller.getActionValue(LEFT_HAND_CLICK));
var rightTrigger = normalizeControllerValue(Controller.getActionValue(RIGHT_HAND_CLICK));
var leftTrigger = normalizeControllerValue(Controller.getValue(Controller.Standard.LT));
var rightTrigger = normalizeControllerValue(Controller.getValue(Controller.Standard.RT));
// Average last few trigger values together for a bit of smoothing
var smoothLeftTrigger = leftTrigger * (1.0 - SMOOTH_FACTOR) + lastLeftTrigger * SMOOTH_FACTOR;

View file

@ -84,7 +84,7 @@
overlay = null;
},
startRecording: function (entityID) {
startRecording: function () {
if (!isAvatarRecording) {
print("RECORDING STARTED");
Messages.sendMessage(CLIENTS_TO_MASTER_CHANNEL, PARTICIPATING_MESSAGE); //tell to master that I'm participating
@ -94,7 +94,7 @@
}
},
stopRecording: function (entityID) {
stopRecording: function () {
if (isAvatarRecording) {
print("RECORDING ENDED");
Recording.stopRecording();
@ -109,7 +109,7 @@
_this.stopRecording();
Messages.unsubscribe(MASTER_TO_CLIENTS_CHANNEL);
Messages.messageReceived.disconnect(receivingMessage);
if(overlay !== null){
if (overlay !== null) {
Overlays.deleteOverlay(overlay);
overlay = null;
}

View file

@ -29,11 +29,14 @@ var STOP_MESSAGE = "recordingEnded";
var PARTICIPATING_MESSAGE = "participatingToRecording";
var TIMEOUT = 20;
var toolBar = null;
var recordIcon;
var isRecording = false;
var performanceJSON = { "avatarClips" : [] };
var responsesExpected = 0;
var readyToPrintInfo = false;
var performanceFileURL = null;
var waitingForPerformanceFile = true;
var totalWaitingTime = 0;
var extension = "txt";
@ -71,9 +74,9 @@ function mousePressEvent(event) {
print("I'm the master. I want to start recording");
Messages.sendMessage(MASTER_TO_CLIENTS_CHANNEL, START_MESSAGE);
isRecording = true;
waitingForPerformanceFile = true;
} else {
print("I want to stop recording");
waitingForPerformanceFile = true;
Script.update.connect(update);
Messages.sendMessage(MASTER_TO_CLIENTS_CHANNEL, STOP_MESSAGE);
isRecording = false;
@ -108,29 +111,38 @@ function update(deltaTime) {
}
//clean things after upload performance file to asset
waitingForPerformanceFile = false;
responsesExpected = 0;
totalWaitingTime = 0;
Script.update.disconnect(update);
performanceJSON = { "avatarClips" : [] };
}
} else if (readyToPrintInfo == true){
Window.prompt("Performance file and clips: ", getUtilityString());
responsesExpected = 0;
performanceJSON = { "avatarClips" : [] };
Script.update.disconnect(update);
}
}
function getUtilityString() {
var resultString = "JSON:\n" + performanceFileURL + "\n" + responsesExpected + " avatar clips:\n";
var avatarClips = performanceJSON.avatarClips;
avatarClips.forEach(function(param) {
resultString += param + "\n";
});
return resultString;
}
function uploadFinished(url){
//need to print somehow the url here this way the master can copy the url
print("PERFORMANCE FILE URL: " + url);
Assets.downloadData(url, function (data) {
printPerformanceJSON(JSON.parse(data));
});
}
function printPerformanceJSON(obj) {
print("some info:");
print("downloaded performance file from asset and examinating its content...");
var avatarClips = obj.avatarClips;
performanceFileURL = url;
print("PERFORMANCE FILE URL: " + performanceFileURL);
print("number of clips obtained:" + responsesExpected);
var avatarClips = performanceJSON.avatarClips;
avatarClips.forEach(function(param) {
print("clip url obtained: " + param);
});
readyToPrintInfo = true;
Script.update.connect(update);
}
function cleanup() {

View file

@ -361,8 +361,8 @@ function update() {
}
function updateControllerState() {
rightTriggerValue = Controller.getActionValue(rightHandClick);
leftTriggerValue = Controller.getActionValue(leftHandClick);
rightTriggerValue = Controller.getValue(Controller.Standard.RT);
leftTriggerValue =Controller.getValue(Controller.Standard.LT);
if (rightTriggerValue > TRIGGER_THRESHOLD && !swordHeld) {
grabSword("right")

View file

@ -159,7 +159,8 @@ function MyController(hand, triggerAction) {
}
this.updateControllerState = function() {
this.triggerValue = Controller.getActionValue(this.triggerAction);
this.triggerValue = Controller.getValue(this.triggerAction);
if (this.triggerValue > TRIGGER_ON_VALUE && this.prevTriggerValue <= TRIGGER_ON_VALUE) {
this.squeeze();
} else if (this.triggerValue < TRIGGER_ON_VALUE && this.prevTriggerValue >= TRIGGER_ON_VALUE) {
@ -256,8 +257,8 @@ function MyController(hand, triggerAction) {
}
}
var rightController = new MyController(RIGHT_HAND, Controller.findAction("RIGHT_HAND_CLICK"));
var leftController = new MyController(LEFT_HAND, Controller.findAction("LEFT_HAND_CLICK"));
var rightController = new MyController(RIGHT_HAND, Controller.Standard.RT);
var leftController = new MyController(LEFT_HAND, Controller.Standard.LT);
Controller.actionEvent.connect(function(action, state) {
if (state === 0) {

View file

@ -247,4 +247,4 @@ function cleanup() {
// Uncomment this line to delete whiteboard and all associated entity on script close
// Script.scriptEnding.connect(cleanup);
//Script.scriptEnding.connect(cleanup);

View file

@ -12,10 +12,9 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
/*global MyAvatar, Entities, AnimationCache, SoundCache, Scene, Camera, Overlays, HMD, AvatarList, AvatarManager, Controller, UndoStack, Window, Account, GlobalServices, Script, ScriptDiscoveryService, LODManager, Menu, Vec3, Quat, AudioDevice, Paths, Clipboard, Settings, XMLHttpRequest, randFloat, randInt */
Script.include("https://hifi-public.s3.amazonaws.com/scripts/utilities.js");
Script.include("../../libraries/utils.js");
var scriptURL = Script.resolvePath('flashlight.js?123123');
var scriptURL = Script.resolvePath('flashlight.js');
var modelURL = "https://hifi-public.s3.amazonaws.com/models/props/flashlight.fbx";

View file

@ -183,11 +183,14 @@
},
changeLightWithTriggerPressure: function(flashLightHand) {
var handClickString = flashLightHand + "_HAND_CLICK";
var handClick = Controller.findAction(handClickString);
if (flashLightHand === 'LEFT') {
this.triggerValue = Controller.getValue(Controller.Standard.LT);
}
if (flashLightHand === 'RIGHT') {
this.triggerValue = Controller.getValue(Controller.Standard.RT);
this.triggerValue = Controller.getActionValue(handClick);
}
if (this.triggerValue < DISABLE_LIGHT_THRESHOLD && this.lightOn === true) {
this.turnLightOff();
@ -266,4 +269,4 @@
// entity scripts always need to return a newly constructed object of our type
return new Flashlight();
});
});

View file

@ -256,6 +256,12 @@ Item {
visible: root.expanded
text: "LOD: " + root.lodStatus;
}
Text {
color: root.fontColor;
font.pixelSize: root.fontSize
visible: root.expanded
text: "Renderable avatars: " + root.avatarRenderableCount + " w/in " + root.avatarRenderDistance + "m";
}
}
}
}

View file

@ -679,7 +679,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
}));
userInputMapper->registerDevice(_applicationStateDevice);
// Setup the keyboardMouseDevice and the user input mapper with the default bindings
userInputMapper->registerDevice(_keyboardMouseDevice->getInputDevice());
@ -749,7 +749,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
_oldHandRightClick[0] = false;
_oldHandLeftClick[1] = false;
_oldHandRightClick[1] = false;
auto applicationUpdater = DependencyManager::get<AutoUpdater>();
connect(applicationUpdater.data(), &AutoUpdater::newVersionIsAvailable, dialogsManager.data(), &DialogsManager::showUpdateDialog);
applicationUpdater->checkForUpdate();
@ -768,7 +768,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
// If the user clicks an an entity, we will check that it's an unlocked web entity, and if so, set the focus to it
auto entityScriptingInterface = DependencyManager::get<EntityScriptingInterface>();
connect(entityScriptingInterface.data(), &EntityScriptingInterface::clickDownOnEntity,
connect(entityScriptingInterface.data(), &EntityScriptingInterface::clickDownOnEntity,
[this, entityScriptingInterface](const EntityItemID& entityItemID, const MouseEvent& event) {
if (_keyboardFocusedItem != entityItemID) {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
@ -817,7 +817,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
});
// If the user clicks somewhere where there is NO entity at all, we will release focus
connect(getEntities(), &EntityTreeRenderer::mousePressOffEntity,
connect(getEntities(), &EntityTreeRenderer::mousePressOffEntity,
[=](const RayToEntityIntersectionResult& entityItemID, const QMouseEvent* event, unsigned int deviceId) {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
if (_keyboardFocusHighlight) {
@ -826,17 +826,17 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
});
connect(this, &Application::applicationStateChanged, this, &Application::activeChanged);
qCDebug(interfaceapp, "Startup time: %4.2f seconds.", (double)startupTimer.elapsed() / 1000.0);
}
void Application::aboutToQuit() {
emit beforeAboutToQuit();
getActiveDisplayPlugin()->deactivate();
_aboutToQuit = true;
cleanupBeforeQuit();
}
@ -860,16 +860,16 @@ void Application::cleanupBeforeQuit() {
_keyboardFocusHighlight = nullptr;
_entities.clear(); // this will allow entity scripts to properly shutdown
auto nodeList = DependencyManager::get<NodeList>();
// send the domain a disconnect packet, force stoppage of domain-server check-ins
nodeList->getDomainHandler().disconnect();
nodeList->setIsShuttingDown(true);
// tell the packet receiver we're shutting down, so it can drop packets
nodeList->getPacketReceiver().setShouldDropPackets(true);
_entities.shutdown(); // tell the entities system we're shutting down, so it will stop running scripts
ScriptEngine::stopAllScripts(this); // stop all currently running global scripts
@ -947,7 +947,7 @@ Application::~Application() {
DependencyManager::destroy<GeometryCache>();
DependencyManager::destroy<ScriptCache>();
DependencyManager::destroy<SoundCache>();
// cleanup the AssetClient thread
QThread* assetThread = DependencyManager::get<AssetClient>()->thread();
DependencyManager::destroy<AssetClient>();
@ -955,14 +955,14 @@ Application::~Application() {
assetThread->wait();
QThread* nodeThread = DependencyManager::get<NodeList>()->thread();
// remove the NodeList from the DependencyManager
DependencyManager::destroy<NodeList>();
// ask the node thread to quit and wait until it is done
nodeThread->quit();
nodeThread->wait();
Leapmotion::destroy();
RealSense::destroy();
@ -1058,7 +1058,7 @@ void Application::initializeUi() {
resizeGL();
}
});
// This will set up the input plugins UI
_activeInputPlugins.clear();
foreach(auto inputPlugin, PluginManager::getInstance()->getInputPlugins()) {
@ -1077,8 +1077,10 @@ void Application::paintGL() {
uint64_t now = usecTimestampNow();
static uint64_t lastPaintBegin{ now };
uint64_t diff = now - lastPaintBegin;
float instantaneousFps = 0.0f;
if (diff != 0) {
_framesPerSecond.updateAverage((float)USECS_PER_SECOND / (float)diff);
instantaneousFps = (float)USECS_PER_SECOND / (float)diff;
_framesPerSecond.updateAverage(_lastInstantaneousFps);
}
lastPaintBegin = now;
@ -1100,8 +1102,8 @@ void Application::paintGL() {
return;
}
// Some plugins process message events, potentially leading to
// re-entering a paint event. don't allow further processing if this
// Some plugins process message events, potentially leading to
// re-entering a paint event. don't allow further processing if this
// happens
if (_inPaint) {
return;
@ -1109,6 +1111,29 @@ void Application::paintGL() {
_inPaint = true;
Finally clearFlagLambda([this] { _inPaint = false; });
// Some LOD-like controls need to know a smoothly varying "potential" frame rate that doesn't
// include time waiting for vsync, and which can report a number above target if we've got the headroom.
// For example, if we're shooting for 75fps and paintWait is 3.3333ms (= 75% * 13.33ms), our deducedNonVSyncFps
// would be 100fps. In principle, a paintWait of zero would have deducedNonVSyncFps=75.
// Here we make a guess for deducedNonVSyncFps = 1 / deducedNonVSyncPeriod.
//
// Time between previous paintGL call and this one, which can vary not only with vSync misses, but also with QT timing.
// We're using this as a proxy for the time between vsync and displayEnd, below. (Not exact, but tends to be the same over time.)
// This is not the same as update(deltaTime), because the latter attempts to throttle to 60hz and also clamps to 1/4 second.
const float actualPeriod = diff / (float)USECS_PER_SECOND; // same as 1/instantaneousFps but easier for compiler to optimize
// Note that _lastPaintWait (stored at end of last call) is for the same paint cycle.
float deducedNonVSyncPeriod = actualPeriod - _lastPaintWait + _marginForDeducedFramePeriod; // plus a some non-zero time for machinery we can't measure
// We don't know how much time to allow for that, but if we went over the target period, we know it's at least the portion
// of paintWait up to the next vSync. This gives us enough of a penalty so that when actualPeriod crosses two cycles,
// the key part (and not an exagerated part) of _lastPaintWait is accounted for.
const float targetPeriod = getTargetFramePeriod();
if (_lastPaintWait > EPSILON && actualPeriod > targetPeriod) {
// Don't use C++ remainder(). It's authors are mathematically insane.
deducedNonVSyncPeriod += fmod(actualPeriod, _lastPaintWait);
}
_lastDeducedNonVSyncFps = 1.0f / deducedNonVSyncPeriod;
_lastInstantaneousFps = instantaneousFps;
auto displayPlugin = getActiveDisplayPlugin();
displayPlugin->preRender();
_offscreenContext->makeCurrent();
@ -1137,17 +1162,17 @@ void Application::paintGL() {
if (Menu::getInstance()->isOptionChecked(MenuOption::Mirror)) {
PerformanceTimer perfTimer("Mirror");
auto primaryFbo = DependencyManager::get<FramebufferCache>()->getPrimaryFramebufferDepthColor();
renderArgs._renderMode = RenderArgs::MIRROR_RENDER_MODE;
renderRearViewMirror(&renderArgs, _mirrorViewRect);
renderArgs._renderMode = RenderArgs::DEFAULT_RENDER_MODE;
{
float ratio = ((float)QApplication::desktop()->windowHandle()->devicePixelRatio() * getRenderResolutionScale());
// Flip the src and destination rect horizontally to do the mirror
auto mirrorRect = glm::ivec4(0, 0, _mirrorViewRect.width() * ratio, _mirrorViewRect.height() * ratio);
auto mirrorRectDest = glm::ivec4(mirrorRect.z, mirrorRect.y, mirrorRect.x, mirrorRect.w);
auto selfieFbo = DependencyManager::get<FramebufferCache>()->getSelfieFramebuffer();
gpu::doInBatch(renderArgs._context, [=](gpu::Batch& batch) {
batch.setFramebuffer(selfieFbo);
@ -1169,9 +1194,9 @@ void Application::paintGL() {
{
PerformanceTimer perfTimer("CameraUpdates");
auto myAvatar = getMyAvatar();
myAvatar->startCapture();
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON || _myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
Menu::getInstance()->setIsOptionChecked(MenuOption::FirstPerson, myAvatar->getBoomLength() <= MyAvatar::ZOOM_MIN);
@ -1208,26 +1233,26 @@ void Application::paintGL() {
* (myAvatar->getScale() * myAvatar->getBoomLength() * glm::vec3(0.0f, 0.0f, 1.0f)));
} else {
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
+ myAvatar->getOrientation()
+ myAvatar->getOrientation()
* (myAvatar->getScale() * myAvatar->getBoomLength() * glm::vec3(0.0f, 0.0f, 1.0f)));
}
}
} else if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
if (isHMDMode()) {
glm::quat hmdRotation = extractRotation(myAvatar->getHMDSensorMatrix());
_myCamera.setRotation(myAvatar->getWorldAlignedOrientation()
_myCamera.setRotation(myAvatar->getWorldAlignedOrientation()
* glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)) * hmdRotation);
glm::vec3 hmdOffset = extractTranslation(myAvatar->getHMDSensorMatrix());
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
+ glm::vec3(0, _raiseMirror * myAvatar->getScale(), 0)
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
+ glm::vec3(0, _raiseMirror * myAvatar->getScale(), 0)
+ (myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, _rotateMirror, 0.0f))) *
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror
+ (myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f))) * hmdOffset);
} else {
_myCamera.setRotation(myAvatar->getWorldAlignedOrientation()
_myCamera.setRotation(myAvatar->getWorldAlignedOrientation()
* glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
+ glm::vec3(0, _raiseMirror * myAvatar->getScale(), 0)
_myCamera.setPosition(myAvatar->getDefaultEyePosition()
+ glm::vec3(0, _raiseMirror * myAvatar->getScale(), 0)
+ (myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, _rotateMirror, 0.0f))) *
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
}
@ -1246,7 +1271,7 @@ void Application::paintGL() {
}
}
}
// Update camera position
// Update camera position
if (!isHMDMode()) {
_myCamera.update(1.0f / _fps);
}
@ -1264,12 +1289,12 @@ void Application::paintGL() {
if (displayPlugin->isStereo()) {
// Stereo modes will typically have a larger projection matrix overall,
// so we ask for the 'mono' projection matrix, which for stereo and HMD
// plugins will imply the combined projection for both eyes.
// plugins will imply the combined projection for both eyes.
//
// This is properly implemented for the Oculus plugins, but for OpenVR
// and Stereo displays I'm not sure how to get / calculate it, so we're
// just relying on the left FOV in each case and hoping that the
// overall culling margin of error doesn't cause popping in the
// and Stereo displays I'm not sure how to get / calculate it, so we're
// just relying on the left FOV in each case and hoping that the
// overall culling margin of error doesn't cause popping in the
// right eye. There are FIXMEs in the relevant plugins
_myCamera.setProjection(displayPlugin->getProjection(Mono, _myCamera.getProjection()));
renderArgs._context->enableStereo(true);
@ -1279,11 +1304,11 @@ void Application::paintGL() {
auto hmdInterface = DependencyManager::get<HMDScriptingInterface>();
float IPDScale = hmdInterface->getIPDScale();
// FIXME we probably don't need to set the projection matrix every frame,
// only when the display plugin changes (or in non-HMD modes when the user
// only when the display plugin changes (or in non-HMD modes when the user
// changes the FOV manually, which right now I don't think they can.
for_each_eye([&](Eye eye) {
// For providing the stereo eye views, the HMD head pose has already been
// applied to the avatar, so we need to get the difference between the head
// For providing the stereo eye views, the HMD head pose has already been
// applied to the avatar, so we need to get the difference between the head
// pose applied to the avatar and the per eye pose, and use THAT as
// the per-eye stereo matrix adjustment.
mat4 eyeToHead = displayPlugin->getEyeToHeadTransform(eye);
@ -1293,10 +1318,10 @@ void Application::paintGL() {
mat4 eyeOffsetTransform = glm::translate(mat4(), eyeOffset * -1.0f * IPDScale);
eyeOffsets[eye] = eyeOffsetTransform;
// Tell the plugin what pose we're using to render. In this case we're just using the
// unmodified head pose because the only plugin that cares (the Oculus plugin) uses it
// for rotational timewarp. If we move to support positonal timewarp, we need to
// ensure this contains the full pose composed with the eye offsets.
// Tell the plugin what pose we're using to render. In this case we're just using the
// unmodified head pose because the only plugin that cares (the Oculus plugin) uses it
// for rotational timewarp. If we move to support positonal timewarp, we need to
// ensure this contains the full pose composed with the eye offsets.
mat4 headPose = displayPlugin->getHeadPose();
displayPlugin->setEyeRenderPose(eye, headPose);
@ -1343,7 +1368,7 @@ void Application::paintGL() {
PerformanceTimer perfTimer("pluginOutput");
auto primaryFbo = framebufferCache->getPrimaryFramebuffer();
GLuint finalTexture = gpu::GLBackend::getTextureID(primaryFbo->getRenderBuffer(0));
// Ensure the rendering context commands are completed when rendering
// Ensure the rendering context commands are completed when rendering
GLsync sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
// Ensure the sync object is flushed to the driver thread before releasing the context
// CRITICAL for the mac driver apparently.
@ -1355,6 +1380,7 @@ void Application::paintGL() {
// Ensure all operations from the previous context are complete before we try to read the fbo
glWaitSync(sync, 0, GL_TIMEOUT_IGNORED);
glDeleteSync(sync);
uint64_t displayStart = usecTimestampNow();
{
PROFILE_RANGE(__FUNCTION__ "/pluginDisplay");
@ -1367,6 +1393,10 @@ void Application::paintGL() {
PerformanceTimer perfTimer("bufferSwap");
displayPlugin->finishFrame();
}
uint64_t displayEnd = usecTimestampNow();
const float displayPeriodUsec = (float)(displayEnd - displayStart); // usecs
_lastPaintWait = displayPeriodUsec / (float)USECS_PER_SECOND;
}
{
@ -1394,7 +1424,7 @@ void Application::audioMuteToggled() {
}
void Application::faceTrackerMuteToggled() {
QAction* muteAction = Menu::getInstance()->getActionForOption(MenuOption::MuteFaceTracking);
Q_CHECK_PTR(muteAction);
bool isMuted = getSelectedFaceTracker()->isMuted();
@ -1427,7 +1457,7 @@ void Application::resizeGL() {
if (nullptr == _displayPlugin) {
return;
}
auto displayPlugin = getActiveDisplayPlugin();
// Set the desired FBO texture size. If it hasn't changed, this does nothing.
// Otherwise, it must rebuild the FBOs
@ -1437,14 +1467,14 @@ void Application::resizeGL() {
_renderResolution = renderSize;
DependencyManager::get<FramebufferCache>()->setFrameBufferSize(fromGlm(renderSize));
}
// FIXME the aspect ratio for stereo displays is incorrect based on this.
float aspectRatio = displayPlugin->getRecommendedAspectRatio();
_myCamera.setProjection(glm::perspective(glm::radians(_fieldOfView.get()), aspectRatio,
DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP));
// Possible change in aspect ratio
loadViewFrustum(_myCamera, _viewFrustum);
auto offscreenUi = DependencyManager::get<OffscreenUi>();
auto uiSize = displayPlugin->getRecommendedUiSize();
// Bit of a hack since there's no device pixel ratio change event I can find.
@ -1613,7 +1643,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
if (isMeta) {
auto offscreenUi = DependencyManager::get<OffscreenUi>();
offscreenUi->load("Browser.qml");
}
}
break;
case Qt::Key_X:
@ -2109,7 +2139,7 @@ void Application::wheelEvent(QWheelEvent* event) {
if (_controllerScriptingInterface->isWheelCaptured()) {
return;
}
if (Menu::getInstance()->isOptionChecked(KeyboardMouseDevice::NAME)) {
_keyboardMouseDevice->wheelEvent(event);
}
@ -2227,7 +2257,7 @@ void Application::idle(uint64_t now) {
_idleLoopStdev.reset();
}
}
_overlayConductor.update(secondsSinceLastUpdate);
// check for any requested background downloads.
@ -2481,7 +2511,7 @@ void Application::init() {
DependencyManager::get<AddressManager>()->loadSettings(addressLookupString);
qCDebug(interfaceapp) << "Loaded settings";
Leapmotion::init();
RealSense::init();
@ -2539,7 +2569,7 @@ void Application::setAvatarUpdateThreading(bool isThreaded) {
if (_avatarUpdate && (_avatarUpdate->isThreaded() == isThreaded)) {
return;
}
auto myAvatar = getMyAvatar();
bool isRigEnabled = myAvatar->getEnableRigAnimations();
bool isGraphEnabled = myAvatar->getEnableAnimGraph();
@ -2756,7 +2786,7 @@ void Application::updateDialogs(float deltaTime) {
if(audioStatsDialog) {
audioStatsDialog->update();
}
// Update bandwidth dialog, if any
BandwidthDialog* bandwidthDialog = dialogsManager->getBandwidthDialog();
if (bandwidthDialog) {
@ -2892,7 +2922,7 @@ void Application::update(float deltaTime) {
_entities.getTree()->withWriteLock([&] {
_physicsEngine->stepSimulation();
});
if (_physicsEngine->hasOutgoingChanges()) {
_entities.getTree()->withWriteLock([&] {
_entitySimulation.handleOutgoingChanges(_physicsEngine->getOutgoingChanges(), _physicsEngine->getSessionID());
@ -3026,10 +3056,10 @@ int Application::sendNackPackets() {
foreach(const OCTREE_PACKET_SEQUENCE& missingNumber, missingSequenceNumbers) {
nackPacketList->writePrimitive(missingNumber);
}
if (nackPacketList->getNumPackets()) {
packetsSent += nackPacketList->getNumPackets();
// send the packet list
nodeList->sendPacketList(std::move(nackPacketList), *node);
}
@ -3635,7 +3665,7 @@ void Application::renderRearViewMirror(RenderArgs* renderArgs, const QRect& regi
float fov = MIRROR_FIELD_OF_VIEW;
auto myAvatar = getMyAvatar();
// bool eyeRelativeCamera = false;
if (billboard) {
fov = BILLBOARD_FIELD_OF_VIEW; // degees
@ -3843,7 +3873,7 @@ void Application::nodeKilled(SharedNodePointer node) {
Menu::getInstance()->getActionForOption(MenuOption::UploadAsset)->setEnabled(false);
}
}
void Application::trackIncomingOctreePacket(NLPacket& packet, SharedNodePointer sendingNode, bool wasStatsPacket) {
// Attempt to identify the sender from its address.
@ -3868,7 +3898,7 @@ int Application::processOctreeStats(NLPacket& packet, SharedNodePointer sendingN
int statsMessageLength = 0;
const QUuid& nodeUUID = sendingNode->getUUID();
// now that we know the node ID, let's add these stats to the stats for that node...
_octreeServerSceneStats.withWriteLock([&] {
OctreeSceneStats& octreeStats = _octreeServerSceneStats[nodeUUID];
@ -4069,7 +4099,7 @@ bool Application::acceptURL(const QString& urlString, bool defaultUpload) {
Qt::AutoConnection, Q_ARG(const QString&, urlString));
return true;
}
QUrl url(urlString);
QHashIterator<QString, AcceptURLMethod> i(_acceptedExtensions);
QString lowerPath = url.path().toLower();
@ -4080,7 +4110,7 @@ bool Application::acceptURL(const QString& urlString, bool defaultUpload) {
return (this->*method)(urlString);
}
}
return defaultUpload && askToUploadAsset(urlString);
}
@ -4162,10 +4192,10 @@ bool Application::askToUploadAsset(const QString& filename) {
QString("You don't have upload rights on that domain.\n\n"));
return false;
}
QUrl url { filename };
if (auto upload = DependencyManager::get<AssetClient>()->createUpload(url.toLocalFile())) {
QMessageBox messageBox;
messageBox.setWindowTitle("Asset upload");
messageBox.setText("You are about to upload the following file to the asset server:\n" +
@ -4173,19 +4203,19 @@ bool Application::askToUploadAsset(const QString& filename) {
messageBox.setInformativeText("Do you want to continue?");
messageBox.setStandardButtons(QMessageBox::Ok | QMessageBox::Cancel);
messageBox.setDefaultButton(QMessageBox::Ok);
// Option to drop model in world for models
if (filename.endsWith(FBX_EXTENSION) || filename.endsWith(OBJ_EXTENSION)) {
auto checkBox = new QCheckBox(&messageBox);
checkBox->setText("Add to scene");
messageBox.setCheckBox(checkBox);
}
if (messageBox.exec() != QMessageBox::Ok) {
upload->deleteLater();
return false;
}
// connect to the finished signal so we know when the AssetUpload is done
if (messageBox.checkBox() && (messageBox.checkBox()->checkState() == Qt::Checked)) {
// Custom behavior for models
@ -4195,12 +4225,12 @@ bool Application::askToUploadAsset(const QString& filename) {
&AssetUploadDialogFactory::getInstance(),
&AssetUploadDialogFactory::handleUploadFinished);
}
// start the upload now
upload->start();
return true;
}
// display a message box with the error
QMessageBox::warning(_window, "Failed Upload", QString("Failed to upload %1.\n\n").arg(filename));
return false;
@ -4208,20 +4238,20 @@ bool Application::askToUploadAsset(const QString& filename) {
void Application::modelUploadFinished(AssetUpload* upload, const QString& hash) {
auto filename = QFileInfo(upload->getFilename()).fileName();
if ((upload->getError() == AssetUpload::NoError) &&
(filename.endsWith(FBX_EXTENSION) || filename.endsWith(OBJ_EXTENSION))) {
auto entities = DependencyManager::get<EntityScriptingInterface>();
EntityItemProperties properties;
properties.setType(EntityTypes::Model);
properties.setModelURL(QString("%1:%2.%3").arg(URL_SCHEME_ATP).arg(hash).arg(upload->getExtension()));
properties.setPosition(_myCamera.getPosition() + _myCamera.getOrientation() * Vectors::FRONT * 2.0f);
properties.setName(QUrl(upload->getFilename()).fileName());
entities->addEntity(properties);
upload->deleteLater();
} else {
AssetUploadDialogFactory::getInstance().handleUploadFinished(upload, hash);
@ -4510,7 +4540,7 @@ void Application::takeSnapshot() {
_snapshotShareDialog = new SnapshotShareDialog(fileName, _glWidget);
}
_snapshotShareDialog->show();
}
float Application::getRenderResolutionScale() const {
@ -4755,8 +4785,8 @@ void Application::updateDisplayMode() {
return;
}
// Some plugins *cough* Oculus *cough* process message events from inside their
// display function, and we don't want to change the display plugin underneath
// Some plugins *cough* Oculus *cough* process message events from inside their
// display function, and we don't want to change the display plugin underneath
// the paintGL call, so we need to guard against that
if (_inPaint) {
qDebug() << "Deferring plugin switch until out of painting";
@ -4790,14 +4820,14 @@ void Application::updateDisplayMode() {
oldDisplayPlugin = _displayPlugin;
_displayPlugin = newDisplayPlugin;
// If the displayPlugin is a screen based HMD, then it will want the HMDTools displayed
// Direct Mode HMDs (like windows Oculus) will be isHmd() but will have a screen of -1
bool newPluginWantsHMDTools = newDisplayPlugin ?
(newDisplayPlugin->isHmd() && (newDisplayPlugin->getHmdScreen() >= 0)) : false;
bool oldPluginWantedHMDTools = oldDisplayPlugin ?
bool oldPluginWantedHMDTools = oldDisplayPlugin ?
(oldDisplayPlugin->isHmd() && (oldDisplayPlugin->getHmdScreen() >= 0)) : false;
// Only show the hmd tools after the correct plugin has
// been activated so that it's UI is setup correctly
if (newPluginWantsHMDTools) {
@ -4807,7 +4837,7 @@ void Application::updateDisplayMode() {
if (oldDisplayPlugin) {
oldDisplayPlugin->deactivate();
_offscreenContext->makeCurrent();
// if the old plugin was HMD and the new plugin is not HMD, then hide our hmdtools
if (oldPluginWantedHMDTools && !newPluginWantsHMDTools) {
DependencyManager::get<DialogsManager>()->hmdTools(false);
@ -4940,7 +4970,7 @@ void Application::setPalmData(Hand* hand, const controller::Pose& pose, float de
rawVelocity = glm::vec3(0.0f);
}
palm.setRawVelocity(rawVelocity); // meters/sec
// Angular Velocity of Palm
glm::quat deltaRotation = rotation * glm::inverse(palm.getRawRotation());
glm::vec3 angularVelocity(0.0f);
@ -5020,7 +5050,7 @@ void Application::emulateMouse(Hand* hand, float click, float shift, HandData::H
pos.setY(canvasSize.y / 2.0f + cursorRange * yAngle);
}
//If we are off screen then we should stop processing, and if a trigger or bumper is pressed,
//we should unpress them.
if (pos.x() == INT_MAX) {

View file

@ -159,6 +159,14 @@ public:
bool isForeground() const { return _isForeground; }
float getFps() const { return _fps; }
float const HMD_TARGET_FRAME_RATE = 75.0f;
float const DESKTOP_TARGET_FRAME_RATE = 60.0f;
float getTargetFrameRate() { return isHMDMode() ? HMD_TARGET_FRAME_RATE : DESKTOP_TARGET_FRAME_RATE; }
float getTargetFramePeriod() { return isHMDMode() ? 1.0f / HMD_TARGET_FRAME_RATE : 1.0f / DESKTOP_TARGET_FRAME_RATE; } // same as 1/getTargetFrameRate, but w/compile-time division
float getLastInstanteousFps() const { return _lastInstantaneousFps; }
float getLastPaintWait() const { return _lastPaintWait; };
float getLastDeducedNonVSyncFps() const { return _lastDeducedNonVSyncFps; }
void setMarginForDeducedFramePeriod(float newValue) { _marginForDeducedFramePeriod = newValue; }
float getFieldOfView() { return _fieldOfView.get(); }
void setFieldOfView(float fov);
@ -429,6 +437,10 @@ private:
float _fps;
QElapsedTimer _timerStart;
QElapsedTimer _lastTimeUpdated;
float _lastInstantaneousFps { 0.0f };
float _lastPaintWait { 0.0f };
float _lastDeducedNonVSyncFps { 0.0f };
float _marginForDeducedFramePeriod{ 0.002f }; // 2ms, adjustable
ShapeManager _shapeManager;
PhysicalEntitySimulation _entitySimulation;

View file

@ -40,7 +40,6 @@
#include "Menu.h"
#include "ModelReferential.h"
#include "Physics.h"
#include "Recorder.h"
#include "Util.h"
#include "world.h"
#include "InterfaceLogging.h"
@ -184,9 +183,31 @@ void Avatar::simulate(float deltaTime) {
if (_shouldRenderBillboard) {
if (getLODDistance() < BILLBOARD_LOD_DISTANCE * (1.0f - BILLBOARD_HYSTERESIS_PROPORTION)) {
_shouldRenderBillboard = false;
qCDebug(interfaceapp) << "Unbillboarding" << (isMyAvatar() ? "myself" : getSessionUUID()) << "for LOD" << getLODDistance();
}
} else if (getLODDistance() > BILLBOARD_LOD_DISTANCE * (1.0f + BILLBOARD_HYSTERESIS_PROPORTION)) {
_shouldRenderBillboard = true;
qCDebug(interfaceapp) << "Billboarding" << (isMyAvatar() ? "myself" : getSessionUUID()) << "for LOD" << getLODDistance();
}
const bool isControllerLogging = DependencyManager::get<AvatarManager>()->getRenderDistanceControllerIsLogging();
float renderDistance = DependencyManager::get<AvatarManager>()->getRenderDistance();
const float SKIP_HYSTERESIS_PROPORTION = isControllerLogging ? 0.0f : BILLBOARD_HYSTERESIS_PROPORTION;
float distance = glm::distance(qApp->getCamera()->getPosition(), _position);
if (_shouldSkipRender) {
if (distance < renderDistance * (1.0f - SKIP_HYSTERESIS_PROPORTION)) {
_shouldSkipRender = false;
_skeletonModel.setVisibleInScene(true, qApp->getMain3DScene());
if (!isControllerLogging) { // Test for isMyAvatar is prophylactic. Never occurs in current code.
qCDebug(interfaceapp) << "Rerendering" << (isMyAvatar() ? "myself" : getSessionUUID()) << "for distance" << renderDistance;
}
}
} else if (distance > renderDistance * (1.0f + SKIP_HYSTERESIS_PROPORTION)) {
_shouldSkipRender = true;
_skeletonModel.setVisibleInScene(false, qApp->getMain3DScene());
if (!isControllerLogging) {
qCDebug(interfaceapp) << "Unrendering" << (isMyAvatar() ? "myself" : getSessionUUID()) << "for distance" << renderDistance;
}
}
// simple frustum check
@ -199,7 +220,7 @@ void Avatar::simulate(float deltaTime) {
getHand()->simulate(deltaTime, false);
}
if (!_shouldRenderBillboard && inViewFrustum) {
if (!_shouldRenderBillboard && !_shouldSkipRender && inViewFrustum) {
{
PerformanceTimer perfTimer("skeleton");
for (int i = 0; i < _jointData.size(); i++) {

View file

@ -140,6 +140,8 @@ public:
Q_INVOKABLE glm::vec3 getAngularVelocity() const { return _angularVelocity; }
Q_INVOKABLE glm::vec3 getAngularAcceleration() const { return _angularAcceleration; }
Q_INVOKABLE bool getShouldRender() const { return !_shouldSkipRender; }
/// Scales a world space position vector relative to the avatar position and scale
/// \param vector position to be scaled. Will store the result
void scaleVectorRelativeToPosition(glm::vec3 &positionToScale) const;
@ -226,6 +228,7 @@ private:
bool _initialized;
NetworkTexturePointer _billboardTexture;
bool _shouldRenderBillboard;
bool _shouldSkipRender { false };
bool _isLookAtTarget;
void renderBillboard(RenderArgs* renderArgs);

View file

@ -90,6 +90,21 @@ void AvatarManager::init() {
_myAvatar->addToScene(_myAvatar, scene, pendingChanges);
}
scene->enqueuePendingChanges(pendingChanges);
const float target_fps = qApp->getTargetFrameRate();
_renderDistanceController.setMeasuredValueSetpoint(target_fps);
const float SMALLEST_REASONABLE_HORIZON = 5.0f; // meters
_renderDistanceController.setControlledValueHighLimit(1.0f / SMALLEST_REASONABLE_HORIZON);
_renderDistanceController.setControlledValueLowLimit(1.0f / (float) TREE_SCALE);
// Advice for tuning parameters:
// See PIDController.h. There's a section on tuning in the reference.
// Turn on logging with the following (or from js with AvatarList.setRenderDistanceControllerHistory("avatar render", 300))
//_renderDistanceController.setHistorySize("avatar render", target_fps * 4);
// Note that extra logging/hysteresis is turned off in Avatar.cpp when the above logging is on.
_renderDistanceController.setKP(0.0008f); // Usually about 0.6 of largest that doesn't oscillate when other parameters 0.
_renderDistanceController.setKI(0.0006f); // Big enough to bring us to target with the above KP.
_renderDistanceController.setKD(0.000001f); // A touch of kd increases the speed by which we get there.
}
void AvatarManager::updateMyAvatar(float deltaTime) {
@ -123,6 +138,17 @@ void AvatarManager::updateOtherAvatars(float deltaTime) {
PerformanceWarning warn(showWarnings, "Application::updateAvatars()");
PerformanceTimer perfTimer("otherAvatars");
_renderDistanceController.setMeasuredValueSetpoint(qApp->getTargetFrameRate()); // No problem updating in flight.
// The PID controller raises the controlled value when the measured value goes up.
// The measured value is frame rate. When the controlled value (1 / render cutoff distance)
// goes up, the render cutoff distance gets closer, the number of rendered avatars is less, and frame rate
// goes up.
const float deduced = qApp->getLastDeducedNonVSyncFps();
const float distance = 1.0f / _renderDistanceController.update(deduced, deltaTime);
_renderDistanceAverage.updateAverage(distance);
_renderDistance = _renderDistanceAverage.getAverage();
int renderableCount = 0;
// simulate avatars
auto hashCopy = getHashCopy();
@ -141,10 +167,14 @@ void AvatarManager::updateOtherAvatars(float deltaTime) {
} else {
avatar->startUpdate();
avatar->simulate(deltaTime);
if (avatar->getShouldRender()) {
renderableCount++;
}
avatar->endUpdate();
++avatarIterator;
}
}
_renderedAvatarCount = renderableCount;
// simulate avatar fades
simulateAvatarFades(deltaTime);

View file

@ -18,6 +18,8 @@
#include <AvatarHashMap.h>
#include <PhysicsEngine.h>
#include <PIDController.h>
#include <SimpleMovingAverage.h>
#include "Avatar.h"
#include "AvatarMotionState.h"
@ -43,6 +45,7 @@ public:
void clearOtherAvatars();
bool shouldShowReceiveStats() const { return _shouldShowReceiveStats; }
PIDController& getRenderDistanceController() { return _renderDistanceController; }
class LocalLight {
public:
@ -64,6 +67,17 @@ public:
void handleCollisionEvents(const CollisionEvents& collisionEvents);
void updateAvatarPhysicsShape(Avatar* avatar);
// Expose results and parameter-tuning operations to other systems, such as stats and javascript.
Q_INVOKABLE float getRenderDistance() { return _renderDistance; }
Q_INVOKABLE int getNumberInRenderRange() { return _renderedAvatarCount; }
Q_INVOKABLE bool getRenderDistanceControllerIsLogging() { return _renderDistanceController.getIsLogging(); }
Q_INVOKABLE void setRenderDistanceControllerHistory(QString label, int size) { return _renderDistanceController.setHistorySize(label, size); }
Q_INVOKABLE void setRenderDistanceKP(float newValue) { _renderDistanceController.setKP(newValue); }
Q_INVOKABLE void setRenderDistanceKI(float newValue) { _renderDistanceController.setKI(newValue); }
Q_INVOKABLE void setRenderDistanceKD(float newValue) { _renderDistanceController.setKD(newValue); }
Q_INVOKABLE void setRenderDistanceLowLimit(float newValue) { _renderDistanceController.setControlledValueLowLimit(newValue); }
Q_INVOKABLE void setRenderDistanceHighLimit(float newValue) { _renderDistanceController.setControlledValueHighLimit(newValue); }
public slots:
void setShouldShowReceiveStats(bool shouldShowReceiveStats) { _shouldShowReceiveStats = shouldShowReceiveStats; }
@ -90,6 +104,10 @@ private:
QVector<AvatarManager::LocalLight> _localLights;
bool _shouldShowReceiveStats = false;
float _renderDistance { (float) TREE_SCALE };
int _renderedAvatarCount { 0 };
PIDController _renderDistanceController { };
SimpleMovingAverage _renderDistanceAverage { 10 };
SetOfAvatarMotionStates _avatarMotionStates;
SetOfMotionStates _motionStatesToAdd;

View file

@ -49,7 +49,6 @@
#include "ModelReferential.h"
#include "MyAvatar.h"
#include "Physics.h"
#include "Recorder.h"
#include "Util.h"
#include "InterfaceLogging.h"
#include "DebugDraw.h"
@ -181,10 +180,22 @@ MyAvatar::MyAvatar(RigPointer rig) :
setPosition(dummyAvatar.getPosition());
setOrientation(dummyAvatar.getOrientation());
// FIXME attachments
// FIXME joints
// FIXME head lean
// FIXME head orientation
if (!dummyAvatar.getAttachmentData().isEmpty()) {
setAttachmentData(dummyAvatar.getAttachmentData());
}
auto headData = dummyAvatar.getHeadData();
if (headData && _headData) {
// blendshapes
if (!headData->getBlendshapeCoefficients().isEmpty()) {
_headData->setBlendshapeCoefficients(headData->getBlendshapeCoefficients());
}
// head lean
_headData->setLeanForward(headData->getLeanForward());
_headData->setLeanSideways(headData->getLeanSideways());
// head orientation
_headData->setLookAtPosition(headData->getLookAtPosition());
}
});
}

View file

@ -115,6 +115,8 @@ void Stats::updateStats(bool force) {
auto avatarManager = DependencyManager::get<AvatarManager>();
// we need to take one avatar out so we don't include ourselves
STAT_UPDATE(avatarCount, avatarManager->size() - 1);
STAT_UPDATE(avatarRenderableCount, avatarManager->getNumberInRenderRange());
STAT_UPDATE(avatarRenderDistance, (int) round(avatarManager->getRenderDistance())); // deliberately truncating
STAT_UPDATE(serverCount, nodeList->size());
STAT_UPDATE(framerate, (int)qApp->getFps());
STAT_UPDATE(simrate, (int)qApp->getAverageSimsPerSecond());

View file

@ -36,6 +36,8 @@ class Stats : public QQuickItem {
STATS_PROPERTY(int, simrate, 0)
STATS_PROPERTY(int, avatarSimrate, 0)
STATS_PROPERTY(int, avatarCount, 0)
STATS_PROPERTY(int, avatarRenderableCount, 0)
STATS_PROPERTY(int, avatarRenderDistance, 0)
STATS_PROPERTY(int, packetInCount, 0)
STATS_PROPERTY(int, packetOutCount, 0)
STATS_PROPERTY(float, mbpsIn, 0)
@ -117,6 +119,8 @@ signals:
void simrateChanged();
void avatarSimrateChanged();
void avatarCountChanged();
void avatarRenderableCountChanged();
void avatarRenderDistanceChanged();
void packetInCountChanged();
void packetOutCountChanged();
void mbpsInChanged();

View file

@ -49,23 +49,6 @@ const AnimPose& AnimSkeleton::getAbsoluteBindPose(int jointIndex) const {
return _absoluteBindPoses[jointIndex];
}
AnimPose AnimSkeleton::getRootAbsoluteBindPoseByChildName(const QString& childName) const {
AnimPose pose = AnimPose::identity;
int jointIndex = nameToJointIndex(childName);
if (jointIndex >= 0) {
int numJoints = (int)(_absoluteBindPoses.size());
if (jointIndex < numJoints) {
int parentIndex = getParentIndex(jointIndex);
while (parentIndex != -1 && parentIndex < numJoints) {
jointIndex = parentIndex;
parentIndex = getParentIndex(jointIndex);
}
pose = _absoluteBindPoses[jointIndex];
}
}
return pose;
}
const AnimPose& AnimSkeleton::getRelativeBindPose(int jointIndex) const {
return _relativeBindPoses[jointIndex];
}

View file

@ -31,7 +31,6 @@ public:
// absolute pose, not relative to parent
const AnimPose& getAbsoluteBindPose(int jointIndex) const;
AnimPose getRootAbsoluteBindPoseByChildName(const QString& childName) const;
// relative to parent pose
const AnimPose& getRelativeBindPose(int jointIndex) const;

View file

@ -108,6 +108,7 @@ public:
const glm::quat& getPostRotation() const { return _postRotation; }
const glm::quat& getDefaultRotation() const { return _defaultRotation; }
glm::vec3 getDefaultTranslation() const { return _defaultTranslation * _unitsScale; }
glm::vec3 getUnscaledDefaultTranslation() const { return _defaultTranslation; }
const glm::quat& getInverseDefaultRotation() const { return _inverseDefaultRotation; }
const QString& getName() const { return _name; }
bool getIsFree() const { return _isFree; }

View file

@ -1288,7 +1288,7 @@ void Rig::updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm
// NOTE: at the moment we do the math in the world-frame, hence the inverse transform is more complex than usual.
glm::mat4 inverse = glm::inverse(glm::mat4_cast(modelRotation) * parentState.getTransform() *
glm::translate(state.getDefaultTranslationInConstrainedFrame()) *
glm::translate(state.getUnscaledDefaultTranslation()) *
state.getPreTransform() * glm::mat4_cast(state.getPreRotation() * state.getDefaultRotation()));
glm::vec3 front = glm::vec3(inverse * glm::vec4(worldHeadOrientation * IDENTITY_FRONT, 0.0f));
glm::vec3 lookAtDelta = lookAtSpot - modelTranslation;
@ -1306,10 +1306,10 @@ void Rig::updateFromHandParameters(const HandParameters& params, float dt) {
// TODO: figure out how to obtain the yFlip from where it is actually stored
glm::quat yFlipHACK = glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f));
AnimPose rootBindPose = _animSkeleton->getRootAbsoluteBindPoseByChildName("LeftHand");
AnimPose hipsBindPose = _animSkeleton->getAbsoluteBindPose(_animSkeleton->nameToJointIndex("Hips"));
if (params.isLeftEnabled) {
_animVars.set("leftHandPosition", rootBindPose.trans + rootBindPose.rot * yFlipHACK * params.leftPosition);
_animVars.set("leftHandRotation", rootBindPose.rot * yFlipHACK * params.leftOrientation);
_animVars.set("leftHandPosition", hipsBindPose.trans + hipsBindPose.rot * yFlipHACK * params.leftPosition);
_animVars.set("leftHandRotation", hipsBindPose.rot * yFlipHACK * params.leftOrientation);
_animVars.set("leftHandType", (int)IKTarget::Type::RotationAndPosition);
} else {
_animVars.unset("leftHandPosition");
@ -1317,8 +1317,8 @@ void Rig::updateFromHandParameters(const HandParameters& params, float dt) {
_animVars.set("leftHandType", (int)IKTarget::Type::HipsRelativeRotationAndPosition);
}
if (params.isRightEnabled) {
_animVars.set("rightHandPosition", rootBindPose.trans + rootBindPose.rot * yFlipHACK * params.rightPosition);
_animVars.set("rightHandRotation", rootBindPose.rot * yFlipHACK * params.rightOrientation);
_animVars.set("rightHandPosition", hipsBindPose.trans + hipsBindPose.rot * yFlipHACK * params.rightPosition);
_animVars.set("rightHandRotation", hipsBindPose.rot * yFlipHACK * params.rightOrientation);
_animVars.set("rightHandType", (int)IKTarget::Type::RotationAndPosition);
} else {
_animVars.unset("rightHandPosition");

View file

@ -35,4 +35,4 @@ namespace AudioConstants {
}
#endif // hifi_AudioConstants_h
#endif // hifi_AudioConstants_h

View file

@ -1297,8 +1297,51 @@ void AvatarData::updateJointMappings() {
}
}
AttachmentData::AttachmentData() :
scale(1.0f) {
static const QString JSON_ATTACHMENT_URL = QStringLiteral("modelUrl");
static const QString JSON_ATTACHMENT_JOINT_NAME = QStringLiteral("jointName");
static const QString JSON_ATTACHMENT_TRANSFORM = QStringLiteral("transform");
QJsonObject AttachmentData::toJson() const {
QJsonObject result;
if (modelURL.isValid() && !modelURL.isEmpty()) {
result[JSON_ATTACHMENT_URL] = modelURL.toString();
}
if (!jointName.isEmpty()) {
result[JSON_ATTACHMENT_JOINT_NAME] = jointName;
}
// FIXME the transform constructor that takes rot/scale/translation
// doesn't return the correct value for isIdentity()
Transform transform;
transform.setRotation(rotation);
transform.setScale(scale);
transform.setTranslation(translation);
if (!transform.isIdentity()) {
result[JSON_ATTACHMENT_TRANSFORM] = Transform::toJson(transform);
}
return result;
}
void AttachmentData::fromJson(const QJsonObject& json) {
if (json.contains(JSON_ATTACHMENT_URL)) {
const QString modelURLTemp = json[JSON_ATTACHMENT_URL].toString();
if (modelURLTemp != modelURL.toString()) {
modelURL = modelURLTemp;
}
}
if (json.contains(JSON_ATTACHMENT_JOINT_NAME)) {
const QString jointNameTemp = json[JSON_ATTACHMENT_JOINT_NAME].toString();
if (jointNameTemp != jointName) {
jointName = jointNameTemp;
}
}
if (json.contains(JSON_ATTACHMENT_TRANSFORM)) {
Transform transform = Transform::fromJson(json[JSON_ATTACHMENT_TRANSFORM]);
translation = transform.getTranslation();
rotation = transform.getRotation();
scale = transform.getScale().x;
}
}
bool AttachmentData::operator==(const AttachmentData& other) const {
@ -1399,15 +1442,11 @@ static const QString JSON_AVATAR_BASIS = QStringLiteral("basisTransform");
static const QString JSON_AVATAR_RELATIVE = QStringLiteral("relativeTransform");
static const QString JSON_AVATAR_JOINT_ARRAY = QStringLiteral("jointArray");
static const QString JSON_AVATAR_HEAD = QStringLiteral("head");
static const QString JSON_AVATAR_HEAD_ROTATION = QStringLiteral("rotation");
static const QString JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS = QStringLiteral("blendShapes");
static const QString JSON_AVATAR_HEAD_LEAN_FORWARD = QStringLiteral("leanForward");
static const QString JSON_AVATAR_HEAD_LEAN_SIDEWAYS = QStringLiteral("leanSideways");
static const QString JSON_AVATAR_HEAD_LOOKAT = QStringLiteral("lookAt");
static const QString JSON_AVATAR_HEAD_MODEL = QStringLiteral("headModel");
static const QString JSON_AVATAR_BODY_MODEL = QStringLiteral("bodyModel");
static const QString JSON_AVATAR_DISPLAY_NAME = QStringLiteral("displayName");
static const QString JSON_AVATAR_ATTACHEMENTS = QStringLiteral("attachments");
static const QString JSON_AVATAR_SCALE = QStringLiteral("scale");
QJsonValue toJsonValue(const JointData& joint) {
QJsonArray result;
@ -1428,93 +1467,84 @@ JointData jointDataFromJsonValue(const QJsonValue& json) {
return result;
}
// Every frame will store both a basis for the recording and a relative transform
// This allows the application to decide whether playback should be relative to an avatar's
// transform at the start of playback, or relative to the transform of the recorded
// avatar
QByteArray AvatarData::toFrame(const AvatarData& avatar) {
QJsonObject AvatarData::toJson() const {
QJsonObject root;
if (!avatar.getFaceModelURL().isEmpty()) {
root[JSON_AVATAR_HEAD_MODEL] = avatar.getFaceModelURL().toString();
if (!getFaceModelURL().isEmpty()) {
root[JSON_AVATAR_HEAD_MODEL] = getFaceModelURL().toString();
}
if (!avatar.getSkeletonModelURL().isEmpty()) {
root[JSON_AVATAR_BODY_MODEL] = avatar.getSkeletonModelURL().toString();
if (!getSkeletonModelURL().isEmpty()) {
root[JSON_AVATAR_BODY_MODEL] = getSkeletonModelURL().toString();
}
if (!avatar.getDisplayName().isEmpty()) {
root[JSON_AVATAR_DISPLAY_NAME] = avatar.getDisplayName();
if (!getDisplayName().isEmpty()) {
root[JSON_AVATAR_DISPLAY_NAME] = getDisplayName();
}
if (!avatar.getAttachmentData().isEmpty()) {
// FIXME serialize attachment data
if (!getAttachmentData().isEmpty()) {
QJsonArray attachmentsJson;
for (auto attachment : getAttachmentData()) {
attachmentsJson.push_back(attachment.toJson());
}
root[JSON_AVATAR_ATTACHEMENTS] = attachmentsJson;
}
auto recordingBasis = avatar.getRecordingBasis();
auto recordingBasis = getRecordingBasis();
if (recordingBasis) {
root[JSON_AVATAR_BASIS] = Transform::toJson(*recordingBasis);
// Find the relative transform
auto relativeTransform = recordingBasis->relativeTransform(avatar.getTransform());
root[JSON_AVATAR_RELATIVE] = Transform::toJson(relativeTransform);
auto relativeTransform = recordingBasis->relativeTransform(getTransform());
if (!relativeTransform.isIdentity()) {
root[JSON_AVATAR_RELATIVE] = Transform::toJson(relativeTransform);
}
} else {
root[JSON_AVATAR_RELATIVE] = Transform::toJson(avatar.getTransform());
root[JSON_AVATAR_RELATIVE] = Transform::toJson(getTransform());
}
auto scale = getTargetScale();
if (scale != 1.0f) {
root[JSON_AVATAR_SCALE] = scale;
}
// Skeleton pose
QJsonArray jointArray;
for (const auto& joint : avatar.getRawJointData()) {
for (const auto& joint : getRawJointData()) {
jointArray.push_back(toJsonValue(joint));
}
root[JSON_AVATAR_JOINT_ARRAY] = jointArray;
const HeadData* head = avatar.getHeadData();
const HeadData* head = getHeadData();
if (head) {
QJsonObject headJson;
QJsonArray blendshapeCoefficients;
for (const auto& blendshapeCoefficient : head->getBlendshapeCoefficients()) {
blendshapeCoefficients.push_back(blendshapeCoefficient);
auto headJson = head->toJson();
if (!headJson.isEmpty()) {
root[JSON_AVATAR_HEAD] = headJson;
}
headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS] = blendshapeCoefficients;
headJson[JSON_AVATAR_HEAD_ROTATION] = toJsonValue(head->getRawOrientation());
headJson[JSON_AVATAR_HEAD_LEAN_FORWARD] = QJsonValue(head->getLeanForward());
headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS] = QJsonValue(head->getLeanSideways());
vec3 relativeLookAt = glm::inverse(avatar.getOrientation()) *
(head->getLookAtPosition() - avatar.getPosition());
headJson[JSON_AVATAR_HEAD_LOOKAT] = toJsonValue(relativeLookAt);
root[JSON_AVATAR_HEAD] = headJson;
}
return QJsonDocument(root).toBinaryData();
return root;
}
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
#ifdef WANT_JSON_DEBUG
qDebug() << doc.toJson(QJsonDocument::JsonFormat::Indented);
#endif
QJsonObject root = doc.object();
if (root.contains(JSON_AVATAR_HEAD_MODEL)) {
auto faceModelURL = root[JSON_AVATAR_HEAD_MODEL].toString();
if (faceModelURL != result.getFaceModelURL().toString()) {
void AvatarData::fromJson(const QJsonObject& json) {
if (json.contains(JSON_AVATAR_HEAD_MODEL)) {
auto faceModelURL = json[JSON_AVATAR_HEAD_MODEL].toString();
if (faceModelURL != getFaceModelURL().toString()) {
QUrl faceModel(faceModelURL);
if (faceModel.isValid()) {
result.setFaceModelURL(faceModel);
setFaceModelURL(faceModel);
}
}
}
if (root.contains(JSON_AVATAR_BODY_MODEL)) {
auto bodyModelURL = root[JSON_AVATAR_BODY_MODEL].toString();
if (bodyModelURL != result.getSkeletonModelURL().toString()) {
result.setSkeletonModelURL(bodyModelURL);
if (json.contains(JSON_AVATAR_BODY_MODEL)) {
auto bodyModelURL = json[JSON_AVATAR_BODY_MODEL].toString();
if (bodyModelURL != getSkeletonModelURL().toString()) {
setSkeletonModelURL(bodyModelURL);
}
}
if (root.contains(JSON_AVATAR_DISPLAY_NAME)) {
auto newDisplayName = root[JSON_AVATAR_DISPLAY_NAME].toString();
if (newDisplayName != result.getDisplayName()) {
result.setDisplayName(newDisplayName);
if (json.contains(JSON_AVATAR_DISPLAY_NAME)) {
auto newDisplayName = json[JSON_AVATAR_DISPLAY_NAME].toString();
if (newDisplayName != getDisplayName()) {
setDisplayName(newDisplayName);
}
}
}
if (root.contains(JSON_AVATAR_RELATIVE)) {
if (json.contains(JSON_AVATAR_RELATIVE)) {
// During playback you can either have the recording basis set to the avatar current state
// meaning that all playback is relative to this avatars starting position, or
// the basis can be loaded from the recording, meaning the playback is relative to the
@ -1522,70 +1552,83 @@ void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
// The first is more useful for playing back recordings on your own avatar, while
// the latter is more useful for playing back other avatars within your scene.
auto currentBasis = result.getRecordingBasis();
auto currentBasis = getRecordingBasis();
if (!currentBasis) {
currentBasis = std::make_shared<Transform>(Transform::fromJson(root[JSON_AVATAR_BASIS]));
currentBasis = std::make_shared<Transform>(Transform::fromJson(json[JSON_AVATAR_BASIS]));
}
auto relativeTransform = Transform::fromJson(root[JSON_AVATAR_RELATIVE]);
auto relativeTransform = Transform::fromJson(json[JSON_AVATAR_RELATIVE]);
auto worldTransform = currentBasis->worldTransform(relativeTransform);
result.setPosition(worldTransform.getTranslation());
result.setOrientation(worldTransform.getRotation());
// TODO: find a way to record/playback the Scale of the avatar
//result.setTargetScale(worldTransform.getScale().x);
setPosition(worldTransform.getTranslation());
setOrientation(worldTransform.getRotation());
}
if (json.contains(JSON_AVATAR_SCALE)) {
setTargetScale((float)json[JSON_AVATAR_SCALE].toDouble());
}
if (root.contains(JSON_AVATAR_ATTACHEMENTS)) {
// FIXME de-serialize attachment data
if (json.contains(JSON_AVATAR_ATTACHEMENTS) && json[JSON_AVATAR_ATTACHEMENTS].isArray()) {
QJsonArray attachmentsJson = json[JSON_AVATAR_ATTACHEMENTS].toArray();
QVector<AttachmentData> attachments;
for (auto attachmentJson : attachmentsJson) {
AttachmentData attachment;
attachment.fromJson(attachmentJson.toObject());
attachments.push_back(attachment);
}
setAttachmentData(attachments);
}
// Joint rotations are relative to the avatar, so they require no basis correction
if (root.contains(JSON_AVATAR_JOINT_ARRAY)) {
if (json.contains(JSON_AVATAR_JOINT_ARRAY)) {
QVector<JointData> jointArray;
QJsonArray jointArrayJson = root[JSON_AVATAR_JOINT_ARRAY].toArray();
QJsonArray jointArrayJson = json[JSON_AVATAR_JOINT_ARRAY].toArray();
jointArray.reserve(jointArrayJson.size());
int i = 0;
for (const auto& jointJson : jointArrayJson) {
auto joint = jointDataFromJsonValue(jointJson);
jointArray.push_back(joint);
result.setJointData(i, joint.rotation, joint.translation);
result._jointData[i].rotationSet = true; // Have to do that to broadcast the avatar new pose
setJointData(i, joint.rotation, joint.translation);
_jointData[i].rotationSet = true; // Have to do that to broadcast the avatar new pose
i++;
}
result.setRawJointData(jointArray);
setRawJointData(jointArray);
}
#if 0
// Most head data is relative to the avatar, and needs no basis correction,
// but the lookat vector does need correction
HeadData* head = result._headData;
if (head && root.contains(JSON_AVATAR_HEAD)) {
QJsonObject headJson = root[JSON_AVATAR_HEAD].toObject();
if (headJson.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) {
QVector<float> blendshapeCoefficients;
QJsonArray blendshapeCoefficientsJson = headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS].toArray();
for (const auto& blendshapeCoefficient : blendshapeCoefficientsJson) {
blendshapeCoefficients.push_back((float)blendshapeCoefficient.toDouble());
}
head->setBlendshapeCoefficients(blendshapeCoefficients);
}
if (headJson.contains(JSON_AVATAR_HEAD_ROTATION)) {
head->setOrientation(quatFromJsonValue(headJson[JSON_AVATAR_HEAD_ROTATION]));
}
if (headJson.contains(JSON_AVATAR_HEAD_LEAN_FORWARD)) {
head->setLeanForward((float)headJson[JSON_AVATAR_HEAD_LEAN_FORWARD].toDouble());
}
if (headJson.contains(JSON_AVATAR_HEAD_LEAN_SIDEWAYS)) {
head->setLeanSideways((float)headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS].toDouble());
}
if (headJson.contains(JSON_AVATAR_HEAD_LOOKAT)) {
auto relativeLookAt = vec3FromJsonValue(headJson[JSON_AVATAR_HEAD_LOOKAT]);
if (glm::length2(relativeLookAt) > 0.01) {
head->setLookAtPosition((result.getOrientation() * relativeLookAt) + result.getPosition());
}
if (json.contains(JSON_AVATAR_HEAD)) {
if (!_headData) {
_headData = new HeadData(this);
}
_headData->fromJson(json[JSON_AVATAR_HEAD].toObject());
}
}
// Every frame will store both a basis for the recording and a relative transform
// This allows the application to decide whether playback should be relative to an avatar's
// transform at the start of playback, or relative to the transform of the recorded
// avatar
QByteArray AvatarData::toFrame(const AvatarData& avatar) {
QJsonObject root = avatar.toJson();
#ifdef WANT_JSON_DEBUG
{
QJsonObject obj = root;
obj.remove(JSON_AVATAR_JOINT_ARRAY);
qDebug().noquote() << QJsonDocument(obj).toJson(QJsonDocument::JsonFormat::Indented);
}
#endif
return QJsonDocument(root).toBinaryData();
}
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
#ifdef WANT_JSON_DEBUG
{
QJsonObject obj = doc.object();
obj.remove(JSON_AVATAR_JOINT_ARRAY);
qDebug().noquote() << QJsonDocument(obj).toJson(QJsonDocument::JsonFormat::Indented);
}
#endif
result.fromJson(doc.object());
}

View file

@ -342,6 +342,8 @@ public:
void clearRecordingBasis();
TransformPointer getRecordingBasis() const;
void setRecordingBasis(TransformPointer recordingBasis = TransformPointer());
QJsonObject toJson() const;
void fromJson(const QJsonObject& json);
public slots:
void sendAvatarDataPacket();
@ -449,13 +451,14 @@ public:
QString jointName;
glm::vec3 translation;
glm::quat rotation;
float scale;
AttachmentData();
float scale { 1.0f };
bool isValid() const { return modelURL.isValid(); }
bool operator==(const AttachmentData& other) const;
QJsonObject toJson() const;
void fromJson(const QJsonObject& json);
};
QDataStream& operator<<(QDataStream& out, const AttachmentData& attachment);

View file

@ -9,13 +9,18 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <glm/gtx/quaternion.hpp>
#include "HeadData.h"
#include <mutex>
#include <QtCore/QJsonObject>
#include <QtCore/QJsonArray>
#include <FaceshiftConstants.h>
#include <GLMHelpers.h>
#include <shared/JSONHelpers.h>
#include "AvatarData.h"
#include "HeadData.h"
/// The names of the blendshapes expected by Faceshift, terminated with an empty string.
extern const char* FACESHIFT_BLENDSHAPES[];
@ -58,6 +63,7 @@ glm::quat HeadData::getOrientation() const {
return _owningAvatar->getOrientation() * getRawOrientation();
}
void HeadData::setOrientation(const glm::quat& orientation) {
// rotate body about vertical axis
glm::quat bodyOrientation = _owningAvatar->getOrientation();
@ -72,19 +78,24 @@ void HeadData::setOrientation(const glm::quat& orientation) {
_baseRoll = eulers.z;
}
void HeadData::setBlendshape(QString name, float val) {
static bool hasInitializedLookupMap = false;
//Lazily construct a lookup map from the blendshapes
static const QMap<QString, int>& getBlendshapesLookupMap() {
static std::once_flag once;
static QMap<QString, int> blendshapeLookupMap;
//Lazily construct a lookup map from the blendshapes
if (!hasInitializedLookupMap) {
std::call_once(once, [&] {
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
blendshapeLookupMap[FACESHIFT_BLENDSHAPES[i]] = i;
blendshapeLookupMap[FACESHIFT_BLENDSHAPES[i]] = i;
}
hasInitializedLookupMap = true;
}
});
return blendshapeLookupMap;
}
void HeadData::setBlendshape(QString name, float val) {
const auto& blendshapeLookupMap = getBlendshapesLookupMap();
//Check to see if the named blendshape exists, and then set its value if it does
QMap<QString, int>::iterator it = blendshapeLookupMap.find(name);
auto it = blendshapeLookupMap.find(name);
if (it != blendshapeLookupMap.end()) {
if (_blendshapeCoefficients.size() <= it.value()) {
_blendshapeCoefficients.resize(it.value() + 1);
@ -92,3 +103,85 @@ void HeadData::setBlendshape(QString name, float val) {
_blendshapeCoefficients[it.value()] = val;
}
}
static const QString JSON_AVATAR_HEAD_ROTATION = QStringLiteral("rotation");
static const QString JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS = QStringLiteral("blendShapes");
static const QString JSON_AVATAR_HEAD_LEAN_FORWARD = QStringLiteral("leanForward");
static const QString JSON_AVATAR_HEAD_LEAN_SIDEWAYS = QStringLiteral("leanSideways");
static const QString JSON_AVATAR_HEAD_LOOKAT = QStringLiteral("lookAt");
QJsonObject HeadData::toJson() const {
QJsonObject headJson;
const auto& blendshapeLookupMap = getBlendshapesLookupMap();
QJsonObject blendshapesJson;
for (auto name : blendshapeLookupMap.keys()) {
auto index = blendshapeLookupMap[name];
if (index >= _blendshapeCoefficients.size()) {
continue;
}
auto value = _blendshapeCoefficients[index];
if (value == 0.0f) {
continue;
}
blendshapesJson[name] = value;
}
if (!blendshapesJson.isEmpty()) {
headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS] = blendshapesJson;
}
if (getRawOrientation() != quat()) {
headJson[JSON_AVATAR_HEAD_ROTATION] = toJsonValue(getRawOrientation());
}
if (getLeanForward() != 0.0f) {
headJson[JSON_AVATAR_HEAD_LEAN_FORWARD] = getLeanForward();
}
if (getLeanSideways() != 0.0f) {
headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS] = getLeanSideways();
}
auto lookat = getLookAtPosition();
if (lookat != vec3()) {
vec3 relativeLookAt = glm::inverse(_owningAvatar->getOrientation()) *
(getLookAtPosition() - _owningAvatar->getPosition());
headJson[JSON_AVATAR_HEAD_LOOKAT] = toJsonValue(relativeLookAt);
}
return headJson;
}
void HeadData::fromJson(const QJsonObject& json) {
if (json.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) {
auto jsonValue = json[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS];
if (jsonValue.isArray()) {
QVector<float> blendshapeCoefficients;
QJsonArray blendshapeCoefficientsJson = jsonValue.toArray();
for (const auto& blendshapeCoefficient : blendshapeCoefficientsJson) {
blendshapeCoefficients.push_back((float)blendshapeCoefficient.toDouble());
setBlendshapeCoefficients(blendshapeCoefficients);
}
} else if (jsonValue.isObject()) {
QJsonObject blendshapeCoefficientsJson = jsonValue.toObject();
for (const QString& name : blendshapeCoefficientsJson.keys()) {
float value = (float)blendshapeCoefficientsJson[name].toDouble();
setBlendshape(name, value);
}
} else {
qWarning() << "Unable to deserialize head json: " << jsonValue;
}
}
if (json.contains(JSON_AVATAR_HEAD_ROTATION)) {
setOrientation(quatFromJsonValue(json[JSON_AVATAR_HEAD_ROTATION]));
}
if (json.contains(JSON_AVATAR_HEAD_LEAN_FORWARD)) {
setLeanForward((float)json[JSON_AVATAR_HEAD_LEAN_FORWARD].toDouble());
}
if (json.contains(JSON_AVATAR_HEAD_LEAN_SIDEWAYS)) {
setLeanSideways((float)json[JSON_AVATAR_HEAD_LEAN_SIDEWAYS].toDouble());
}
if (json.contains(JSON_AVATAR_HEAD_LOOKAT)) {
auto relativeLookAt = vec3FromJsonValue(json[JSON_AVATAR_HEAD_LOOKAT]);
if (glm::length2(relativeLookAt) > 0.01f) {
setLookAtPosition((_owningAvatar->getOrientation() * relativeLookAt) + _owningAvatar->getPosition());
}
}
}

View file

@ -28,6 +28,7 @@ const float MIN_HEAD_ROLL = -50.0f;
const float MAX_HEAD_ROLL = 50.0f;
class AvatarData;
class QJsonObject;
class HeadData {
public:
@ -83,6 +84,9 @@ public:
friend class AvatarData;
QJsonObject toJson() const;
void fromJson(const QJsonObject& json);
protected:
// degrees
float _baseYaw;

View file

@ -1,443 +0,0 @@
//
// Player.cpp
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include <AudioConstants.h>
#include <GLMHelpers.h>
#include <NodeList.h>
#include <StreamUtils.h>
#include "AvatarData.h"
#include "AvatarLogging.h"
#include "Player.h"
static const int INVALID_FRAME = -1;
Player::Player(AvatarData* avatar) :
_avatar(avatar),
_recording(new Recording()),
_currentFrame(INVALID_FRAME),
_frameInterpolationFactor(0.0f),
_pausedFrame(INVALID_FRAME),
_timerOffset(0),
_audioOffset(0),
_audioThread(NULL),
_playFromCurrentPosition(true),
_loop(false),
_useAttachments(true),
_useDisplayName(true),
_useHeadURL(true),
_useSkeletonURL(true)
{
_timer.invalidate();
}
bool Player::isPlaying() const {
return _timer.isValid();
}
bool Player::isPaused() const {
return (_pausedFrame != INVALID_FRAME);
}
qint64 Player::elapsed() const {
if (isPlaying()) {
return _timerOffset + _timer.elapsed();
} else if (isPaused()) {
return _timerOffset;
} else {
return 0;
}
}
void Player::startPlaying() {
if (!_recording || _recording->getFrameNumber() <= 1) {
return;
}
if (!isPaused()) {
_currentContext.globalTimestamp = usecTimestampNow();
_currentContext.domain = DependencyManager::get<NodeList>()->getDomainHandler().getHostname();
_currentContext.position = _avatar->getPosition();
_currentContext.orientation = _avatar->getOrientation();
_currentContext.scale = _avatar->getTargetScale();
_currentContext.headModel = _avatar->getFaceModelURL().toString();
_currentContext.skeletonModel = _avatar->getSkeletonModelURL().toString();
_currentContext.displayName = _avatar->getDisplayName();
_currentContext.attachments = _avatar->getAttachmentData();
_currentContext.orientationInv = glm::inverse(_currentContext.orientation);
RecordingContext& context = _recording->getContext();
if (_useAttachments) {
_avatar->setAttachmentData(context.attachments);
}
if (_useDisplayName) {
_avatar->setDisplayName(context.displayName);
}
if (_useHeadURL) {
_avatar->setFaceModelURL(context.headModel);
}
if (_useSkeletonURL) {
_avatar->setSkeletonModelURL(context.skeletonModel);
}
bool wantDebug = false;
if (wantDebug) {
qCDebug(avatars) << "Player::startPlaying(): Recording Context";
qCDebug(avatars) << "Domain:" << _currentContext.domain;
qCDebug(avatars) << "Position:" << _currentContext.position;
qCDebug(avatars) << "Orientation:" << _currentContext.orientation;
qCDebug(avatars) << "Scale:" << _currentContext.scale;
qCDebug(avatars) << "Head URL:" << _currentContext.headModel;
qCDebug(avatars) << "Skeleton URL:" << _currentContext.skeletonModel;
qCDebug(avatars) << "Display Name:" << _currentContext.displayName;
qCDebug(avatars) << "Num Attachments:" << _currentContext.attachments.size();
for (int i = 0; i < _currentContext.attachments.size(); ++i) {
qCDebug(avatars) << "Model URL:" << _currentContext.attachments[i].modelURL;
qCDebug(avatars) << "Joint Name:" << _currentContext.attachments[i].jointName;
qCDebug(avatars) << "Translation:" << _currentContext.attachments[i].translation;
qCDebug(avatars) << "Rotation:" << _currentContext.attachments[i].rotation;
qCDebug(avatars) << "Scale:" << _currentContext.attachments[i].scale;
}
}
// Fake faceshift connection
_avatar->setForceFaceTrackerConnected(true);
qCDebug(avatars) << "Recorder::startPlaying()";
setupAudioThread();
_currentFrame = 0;
_timerOffset = 0;
_timer.start();
} else {
qCDebug(avatars) << "Recorder::startPlaying(): Unpause";
setupAudioThread();
_timer.start();
setCurrentFrame(_pausedFrame);
_pausedFrame = INVALID_FRAME;
}
}
void Player::stopPlaying() {
if (!isPlaying()) {
return;
}
_pausedFrame = INVALID_FRAME;
_timer.invalidate();
cleanupAudioThread();
_avatar->clearJointsData();
// Turn off fake face tracker connection
_avatar->setForceFaceTrackerConnected(false);
if (_useAttachments) {
_avatar->setAttachmentData(_currentContext.attachments);
}
if (_useDisplayName) {
_avatar->setDisplayName(_currentContext.displayName);
}
if (_useHeadURL) {
_avatar->setFaceModelURL(_currentContext.headModel);
}
if (_useSkeletonURL) {
_avatar->setSkeletonModelURL(_currentContext.skeletonModel);
}
qCDebug(avatars) << "Recorder::stopPlaying()";
}
void Player::pausePlayer() {
_timerOffset = elapsed();
_timer.invalidate();
cleanupAudioThread();
_pausedFrame = _currentFrame;
qCDebug(avatars) << "Recorder::pausePlayer()";
}
void Player::setupAudioThread() {
_audioThread = new QThread();
_audioThread->setObjectName("Player Audio Thread");
_options.position = _avatar->getPosition();
_options.orientation = _avatar->getOrientation();
_options.stereo = _recording->numberAudioChannel() == 2;
_injector.reset(new AudioInjector(_recording->getAudioData(), _options), &QObject::deleteLater);
_injector->moveToThread(_audioThread);
_audioThread->start();
QMetaObject::invokeMethod(_injector.data(), "injectAudio", Qt::QueuedConnection);
}
void Player::cleanupAudioThread() {
_injector->stop();
QObject::connect(_injector.data(), &AudioInjector::finished,
_injector.data(), &AudioInjector::deleteLater);
QObject::connect(_injector.data(), &AudioInjector::destroyed,
_audioThread, &QThread::quit);
QObject::connect(_audioThread, &QThread::finished,
_audioThread, &QThread::deleteLater);
_injector.clear();
_audioThread = NULL;
}
void Player::loopRecording() {
cleanupAudioThread();
setupAudioThread();
_currentFrame = 0;
_timerOffset = 0;
_timer.restart();
}
void Player::loadFromFile(const QString& file) {
if (_recording) {
_recording->clear();
} else {
_recording = QSharedPointer<Recording>();
}
readRecordingFromFile(_recording, file);
_pausedFrame = INVALID_FRAME;
}
void Player::loadRecording(RecordingPointer recording) {
_recording = recording;
_pausedFrame = INVALID_FRAME;
}
void Player::play() {
computeCurrentFrame();
if (_currentFrame < 0 || (_currentFrame >= _recording->getFrameNumber() - 2)) { // -2 because of interpolation
if (_loop) {
loopRecording();
} else {
stopPlaying();
}
return;
}
const RecordingContext* context = &_recording->getContext();
if (_playFromCurrentPosition) {
context = &_currentContext;
}
const RecordingFrame& currentFrame = _recording->getFrame(_currentFrame);
const RecordingFrame& nextFrame = _recording->getFrame(_currentFrame + 1);
glm::vec3 translation = glm::mix(currentFrame.getTranslation(),
nextFrame.getTranslation(),
_frameInterpolationFactor);
_avatar->setPosition(context->position + context->orientation * translation);
glm::quat rotation = safeMix(currentFrame.getRotation(),
nextFrame.getRotation(),
_frameInterpolationFactor);
_avatar->setOrientation(context->orientation * rotation);
float scale = glm::mix(currentFrame.getScale(),
nextFrame.getScale(),
_frameInterpolationFactor);
_avatar->setTargetScale(context->scale * scale);
// Joint array playback
// FIXME: THis is still using a deprecated path to assign the joint orientation since setting the full RawJointData array doesn't
// work for Avatar. We need to fix this working with the animation team
const auto& prevJointArray = currentFrame.getJointArray();
const auto& nextJointArray = currentFrame.getJointArray();
QVector<JointData> jointArray(prevJointArray.size());
QVector<glm::quat> jointRotations(prevJointArray.size()); // FIXME: remove once the setRawJointData is fixed
QVector<glm::vec3> jointTranslations(prevJointArray.size()); // FIXME: remove once the setRawJointData is fixed
for (int i = 0; i < jointArray.size(); i++) {
const auto& prevJoint = prevJointArray[i];
const auto& nextJoint = nextJointArray[i];
auto& joint = jointArray[i];
// Rotation
joint.rotationSet = prevJoint.rotationSet || nextJoint.rotationSet;
if (joint.rotationSet) {
joint.rotation = safeMix(prevJoint.rotation, nextJoint.rotation, _frameInterpolationFactor);
jointRotations[i] = joint.rotation; // FIXME: remove once the setRawJointData is fixed
}
joint.translationSet = prevJoint.translationSet || nextJoint.translationSet;
if (joint.translationSet) {
joint.translation = glm::mix(prevJoint.translation, nextJoint.translation, _frameInterpolationFactor);
jointTranslations[i] = joint.translation; // FIXME: remove once the setRawJointData is fixed
}
}
// _avatar->setRawJointData(jointArray); // FIXME: Enable once the setRawJointData is fixed
_avatar->setJointRotations(jointRotations); // FIXME: remove once the setRawJointData is fixed
// _avatar->setJointTranslations(jointTranslations); // FIXME: remove once the setRawJointData is fixed
HeadData* head = const_cast<HeadData*>(_avatar->getHeadData());
if (head) {
// Make sure fake face tracker connection doesn't get turned off
_avatar->setForceFaceTrackerConnected(true);
QVector<float> blendCoef(currentFrame.getBlendshapeCoefficients().size());
for (int i = 0; i < currentFrame.getBlendshapeCoefficients().size(); ++i) {
blendCoef[i] = glm::mix(currentFrame.getBlendshapeCoefficients()[i],
nextFrame.getBlendshapeCoefficients()[i],
_frameInterpolationFactor);
}
head->setBlendshapeCoefficients(blendCoef);
float leanSideways = glm::mix(currentFrame.getLeanSideways(),
nextFrame.getLeanSideways(),
_frameInterpolationFactor);
head->setLeanSideways(leanSideways);
float leanForward = glm::mix(currentFrame.getLeanForward(),
nextFrame.getLeanForward(),
_frameInterpolationFactor);
head->setLeanForward(leanForward);
glm::quat headRotation = safeMix(currentFrame.getHeadRotation(),
nextFrame.getHeadRotation(),
_frameInterpolationFactor);
glm::vec3 eulers = glm::degrees(safeEulerAngles(headRotation));
head->setFinalPitch(eulers.x);
head->setFinalYaw(eulers.y);
head->setFinalRoll(eulers.z);
glm::vec3 lookAt = glm::mix(currentFrame.getLookAtPosition(),
nextFrame.getLookAtPosition(),
_frameInterpolationFactor);
head->setLookAtPosition(context->position + context->orientation * lookAt);
} else {
qCDebug(avatars) << "WARNING: Player couldn't find head data.";
}
_options.position = _avatar->getPosition();
_options.orientation = _avatar->getOrientation();
_injector->setOptions(_options);
}
void Player::setCurrentFrame(int currentFrame) {
if (_recording && currentFrame >= _recording->getFrameNumber()) {
stopPlaying();
return;
}
_currentFrame = currentFrame;
_timerOffset = _recording->getFrameTimestamp(_currentFrame);
if (isPlaying()) {
_timer.start();
setAudioInjectorPosition();
} else {
_pausedFrame = _currentFrame;
}
}
void Player::setCurrentTime(int currentTime) {
if (currentTime >= _recording->getLength()) {
stopPlaying();
return;
}
// Find correct frame
int lowestBound = 0;
int highestBound = _recording->getFrameNumber() - 1;
while (lowestBound + 1 != highestBound) {
assert(lowestBound < highestBound);
int bestGuess = lowestBound +
(highestBound - lowestBound) *
(float)(currentTime - _recording->getFrameTimestamp(lowestBound)) /
(float)(_recording->getFrameTimestamp(highestBound) - _recording->getFrameTimestamp(lowestBound));
if (_recording->getFrameTimestamp(bestGuess) <= currentTime) {
if (currentTime < _recording->getFrameTimestamp(bestGuess + 1)) {
lowestBound = bestGuess;
highestBound = bestGuess + 1;
} else {
lowestBound = bestGuess + 1;
}
} else {
if (_recording->getFrameTimestamp(bestGuess - 1) <= currentTime) {
lowestBound = bestGuess - 1;
highestBound = bestGuess;
} else {
highestBound = bestGuess - 1;
}
}
}
setCurrentFrame(lowestBound);
}
void Player::setVolume(float volume) {
_options.volume = volume;
if (_injector) {
_injector->setOptions(_options);
}
qCDebug(avatars) << "New volume: " << volume;
}
void Player::setAudioOffset(int audioOffset) {
_audioOffset = audioOffset;
}
void Player::setAudioInjectorPosition() {
int MSEC_PER_SEC = 1000;
int FRAME_SIZE = sizeof(AudioConstants::AudioSample) * _recording->numberAudioChannel();
int currentAudioFrame = elapsed() * FRAME_SIZE * (AudioConstants::SAMPLE_RATE / MSEC_PER_SEC);
_injector->setCurrentSendOffset(currentAudioFrame);
}
void Player::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
_playFromCurrentPosition = playFromCurrentLocation;
}
bool Player::computeCurrentFrame() {
if (!isPlaying()) {
_currentFrame = INVALID_FRAME;
return false;
}
if (_currentFrame < 0) {
_currentFrame = 0;
}
qint64 elapsed = glm::clamp(Player::elapsed() - _audioOffset, (qint64)0, (qint64)_recording->getLength());
while (_currentFrame < _recording->getFrameNumber() &&
_recording->getFrameTimestamp(_currentFrame) < elapsed) {
++_currentFrame;
}
while(_currentFrame > 0 &&
_recording->getFrameTimestamp(_currentFrame) > elapsed) {
--_currentFrame;
}
if (_currentFrame == _recording->getFrameNumber() - 1) {
--_currentFrame;
_frameInterpolationFactor = 1.0f;
} else {
qint64 currentTimestamps = _recording->getFrameTimestamp(_currentFrame);
qint64 nextTimestamps = _recording->getFrameTimestamp(_currentFrame + 1);
_frameInterpolationFactor = (float)(elapsed - currentTimestamps) /
(float)(nextTimestamps - currentTimestamps);
}
if (_frameInterpolationFactor < 0.0f || _frameInterpolationFactor > 1.0f) {
_frameInterpolationFactor = 0.0f;
qCDebug(avatars) << "Invalid frame interpolation value: overriding";
}
return true;
}
#endif

View file

@ -1,94 +0,0 @@
//
// Player.h
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Player_h
#define hifi_Player_h
#include <recording/Forward.h>
#if 0
#include <AudioInjector.h>
#include <QElapsedTimer>
#include "Recording.h"
class AvatarData;
class Player;
typedef QSharedPointer<Player> PlayerPointer;
typedef QWeakPointer<Player> WeakPlayerPointer;
/// Plays back a recording
class Player {
public:
Player(AvatarData* avatar);
bool isPlaying() const;
bool isPaused() const;
qint64 elapsed() const;
RecordingPointer getRecording() const { return _recording; }
int getCurrentFrame() const { return _currentFrame; }
public slots:
void startPlaying();
void stopPlaying();
void pausePlayer();
void loadFromFile(const QString& file);
void loadRecording(RecordingPointer recording);
void play();
void setCurrentFrame(int currentFrame);
void setCurrentTime(int currentTime);
void setVolume(float volume);
void setAudioOffset(int audioOffset);
void setPlayFromCurrentLocation(bool playFromCurrentPosition);
void setLoop(bool loop) { _loop = loop; }
void useAttachements(bool useAttachments) { _useAttachments = useAttachments; }
void useDisplayName(bool useDisplayName) { _useDisplayName = useDisplayName; }
void useHeadModel(bool useHeadURL) { _useHeadURL = useHeadURL; }
void useSkeletonModel(bool useSkeletonURL) { _useSkeletonURL = useSkeletonURL; }
private:
void setupAudioThread();
void cleanupAudioThread();
void loopRecording();
void setAudioInjectorPosition();
bool computeCurrentFrame();
AvatarData* _avatar;
RecordingPointer _recording;
int _currentFrame;
float _frameInterpolationFactor;
int _pausedFrame;
QElapsedTimer _timer;
int _timerOffset;
int _audioOffset;
QThread* _audioThread;
QSharedPointer<AudioInjector> _injector;
AudioInjectorOptions _options;
RecordingContext _currentContext;
bool _playFromCurrentPosition;
bool _loop;
bool _useAttachments;
bool _useDisplayName;
bool _useHeadURL;
bool _useSkeletonURL;
};
#endif
#endif // hifi_Player_h

View file

@ -1,147 +0,0 @@
//
// Recorder.cpp
//
//
// Created by Clement on 8/7/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include <GLMHelpers.h>
#include <NodeList.h>
#include <StreamUtils.h>
#include "AvatarData.h"
#include "AvatarLogging.h"
#include "Recorder.h"
Recorder::Recorder(AvatarData* avatar) :
_recording(new Recording()),
_avatar(avatar)
{
_timer.invalidate();
}
bool Recorder::isRecording() const {
return _timer.isValid();
}
qint64 Recorder::elapsed() const {
if (isRecording()) {
return _timer.elapsed();
} else {
return 0;
}
}
void Recorder::startRecording() {
qCDebug(avatars) << "Recorder::startRecording()";
_recording->clear();
RecordingContext& context = _recording->getContext();
context.globalTimestamp = usecTimestampNow();
context.domain = DependencyManager::get<NodeList>()->getDomainHandler().getHostname();
context.position = _avatar->getPosition();
context.orientation = _avatar->getOrientation();
context.scale = _avatar->getTargetScale();
context.headModel = _avatar->getFaceModelURL().toString();
context.skeletonModel = _avatar->getSkeletonModelURL().toString();
context.displayName = _avatar->getDisplayName();
context.attachments = _avatar->getAttachmentData();
context.orientationInv = glm::inverse(context.orientation);
bool wantDebug = false;
if (wantDebug) {
qCDebug(avatars) << "Recorder::startRecording(): Recording Context";
qCDebug(avatars) << "Global timestamp:" << context.globalTimestamp;
qCDebug(avatars) << "Domain:" << context.domain;
qCDebug(avatars) << "Position:" << context.position;
qCDebug(avatars) << "Orientation:" << context.orientation;
qCDebug(avatars) << "Scale:" << context.scale;
qCDebug(avatars) << "Head URL:" << context.headModel;
qCDebug(avatars) << "Skeleton URL:" << context.skeletonModel;
qCDebug(avatars) << "Display Name:" << context.displayName;
qCDebug(avatars) << "Num Attachments:" << context.attachments.size();
for (int i = 0; i < context.attachments.size(); ++i) {
qCDebug(avatars) << "Model URL:" << context.attachments[i].modelURL;
qCDebug(avatars) << "Joint Name:" << context.attachments[i].jointName;
qCDebug(avatars) << "Translation:" << context.attachments[i].translation;
qCDebug(avatars) << "Rotation:" << context.attachments[i].rotation;
qCDebug(avatars) << "Scale:" << context.attachments[i].scale;
}
}
_timer.start();
record();
}
void Recorder::stopRecording() {
qCDebug(avatars) << "Recorder::stopRecording()";
_timer.invalidate();
qCDebug(avatars).nospace() << "Recorded " << _recording->getFrameNumber() << " during " << _recording->getLength() << " msec (" << _recording->getFrameNumber() / (_recording->getLength() / 1000.0f) << " fps)";
}
void Recorder::saveToFile(const QString& file) {
if (_recording->isEmpty()) {
qCDebug(avatars) << "Cannot save recording to file, recording is empty.";
}
writeRecordingToFile(_recording, file);
}
void Recorder::record() {
if (isRecording()) {
const RecordingContext& context = _recording->getContext();
RecordingFrame frame;
frame.setBlendshapeCoefficients(_avatar->getHeadData()->getBlendshapeCoefficients());
// Capture the full skeleton joint data
auto& jointData = _avatar->getRawJointData();
frame.setJointArray(jointData);
frame.setTranslation(context.orientationInv * (_avatar->getPosition() - context.position));
frame.setRotation(context.orientationInv * _avatar->getOrientation());
frame.setScale(_avatar->getTargetScale() / context.scale);
const HeadData* head = _avatar->getHeadData();
if (head) {
glm::vec3 rotationDegrees = glm::vec3(head->getFinalPitch(),
head->getFinalYaw(),
head->getFinalRoll());
frame.setHeadRotation(glm::quat(glm::radians(rotationDegrees)));
frame.setLeanForward(head->getLeanForward());
frame.setLeanSideways(head->getLeanSideways());
glm::vec3 relativeLookAt = context.orientationInv *
(head->getLookAtPosition() - context.position);
frame.setLookAtPosition(relativeLookAt);
}
bool wantDebug = false;
if (wantDebug) {
qCDebug(avatars) << "Recording frame #" << _recording->getFrameNumber();
qCDebug(avatars) << "Blendshapes:" << frame.getBlendshapeCoefficients().size();
qCDebug(avatars) << "JointArray:" << frame.getJointArray().size();
qCDebug(avatars) << "Translation:" << frame.getTranslation();
qCDebug(avatars) << "Rotation:" << frame.getRotation();
qCDebug(avatars) << "Scale:" << frame.getScale();
qCDebug(avatars) << "Head rotation:" << frame.getHeadRotation();
qCDebug(avatars) << "Lean Forward:" << frame.getLeanForward();
qCDebug(avatars) << "Lean Sideways:" << frame.getLeanSideways();
qCDebug(avatars) << "LookAtPosition:" << frame.getLookAtPosition();
}
_recording->addFrame(_timer.elapsed(), frame);
}
}
void Recorder::recordAudio(const QByteArray& audioByteArray) {
_recording->addAudioPacket(audioByteArray);
}
#endif

View file

@ -1,57 +0,0 @@
//
// Recorder.h
// libraries/avatars/src
//
// Created by Clement on 8/7/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Recorder_h
#define hifi_Recorder_h
#include <recording/Forward.h>
#if 0
#include "Recording.h"
template<class C>
class QSharedPointer;
class AttachmentData;
class AvatarData;
class Recorder;
class Recording;
typedef QSharedPointer<Recorder> RecorderPointer;
typedef QWeakPointer<Recorder> WeakRecorderPointer;
/// Records a recording
class Recorder : public QObject {
Q_OBJECT
public:
Recorder(AvatarData* avatar);
bool isRecording() const;
qint64 elapsed() const;
RecordingPointer getRecording() const { return _recording; }
public slots:
void startRecording();
void stopRecording();
void saveToFile(const QString& file);
void record();
void recordAudio(const QByteArray& audioArray);
private:
QElapsedTimer _timer;
RecordingPointer _recording;
AvatarData* _avatar;
};
#endif
#endif // hifi_Recorder_h

View file

@ -1,663 +0,0 @@
//
// Recording.cpp
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include <AudioConstants.h>
#include <GLMHelpers.h>
#include <NetworkAccessManager.h>
#include <NodeList.h>
#include <Sound.h>
#include <StreamUtils.h>
#include <QBitArray>
#include <QElapsedTimer>
#include <QEventLoop>
#include <QFile>
#include <QFileInfo>
#include <QPair>
#include "AvatarData.h"
#include "AvatarLogging.h"
#include "Recording.h"
// HFR file format magic number (Inspired by PNG)
// (decimal) 17 72 70 82 13 10 26 10
// (hexadecimal) 11 48 46 52 0d 0a 1a 0a
// (ASCII C notation) \021 H F R \r \n \032 \n
static const int MAGIC_NUMBER_SIZE = 8;
static const char MAGIC_NUMBER[MAGIC_NUMBER_SIZE] = {17, 72, 70, 82, 13, 10, 26, 10};
// Version (Major, Minor)
static const QPair<quint8, quint8> VERSION(0, 2);
int SCALE_RADIX = 10;
int BLENDSHAPE_RADIX = 15;
int LEAN_RADIX = 7;
void RecordingFrame::setBlendshapeCoefficients(QVector<float> blendshapeCoefficients) {
_blendshapeCoefficients = blendshapeCoefficients;
}
int Recording::getLength() const {
if (_timestamps.isEmpty()) {
return 0;
}
return _timestamps.last();
}
qint32 Recording::getFrameTimestamp(int i) const {
if (i >= _timestamps.size()) {
return getLength();
}
if (i < 0) {
return 0;
}
return _timestamps[i];
}
const RecordingFrame& Recording::getFrame(int i) const {
assert(i < _timestamps.size());
return _frames[i];
}
int Recording::numberAudioChannel() const {
// Check for stereo audio
float MSEC_PER_SEC = 1000.0f;
float channelLength = ((float)getLength() / MSEC_PER_SEC) * AudioConstants::SAMPLE_RATE *
sizeof(AudioConstants::AudioSample);
return glm::round((float)getAudioData().size() / channelLength);
}
void Recording::addFrame(int timestamp, RecordingFrame &frame) {
_timestamps << timestamp;
_frames << frame;
}
void Recording::clear() {
_timestamps.clear();
_frames.clear();
_audioData.clear();
}
void writeVec3(QDataStream& stream, const glm::vec3& value) {
unsigned char buffer[sizeof(value)];
memcpy(buffer, &value, sizeof(value));
stream.writeRawData(reinterpret_cast<char*>(buffer), sizeof(value));
}
bool readVec3(QDataStream& stream, glm::vec3& value) {
unsigned char buffer[sizeof(value)];
stream.readRawData(reinterpret_cast<char*>(buffer), sizeof(value));
memcpy(&value, buffer, sizeof(value));
return true;
}
void writeQuat(QDataStream& stream, const glm::quat& value) {
unsigned char buffer[256];
int writtenToBuffer = packOrientationQuatToBytes(buffer, value);
stream.writeRawData(reinterpret_cast<char*>(buffer), writtenToBuffer);
}
bool readQuat(QDataStream& stream, glm::quat& value) {
int quatByteSize = 4 * 2; // 4 floats * 2 bytes
unsigned char buffer[256];
stream.readRawData(reinterpret_cast<char*>(buffer), quatByteSize);
int readFromBuffer = unpackOrientationQuatFromBytes(buffer, value);
if (readFromBuffer != quatByteSize) {
return false;
}
return true;
}
bool readFloat(QDataStream& stream, float& value, int radix) {
int floatByteSize = 2; // 1 floats * 2 bytes
int16_t buffer[256];
stream.readRawData(reinterpret_cast<char*>(buffer), floatByteSize);
int readFromBuffer = unpackFloatScalarFromSignedTwoByteFixed(buffer, &value, radix);
if (readFromBuffer != floatByteSize) {
return false;
}
return true;
}
void writeRecordingToFile(RecordingPointer recording, const QString& filename) {
if (!recording || recording->getFrameNumber() < 1) {
qCDebug(avatars) << "Can't save empty recording";
return;
}
QElapsedTimer timer;
QFile file(filename);
if (!file.open(QIODevice::ReadWrite | QIODevice::Truncate)){
qCDebug(avatars) << "Couldn't open " << filename;
return;
}
timer.start();
qCDebug(avatars) << "Writing recording to " << filename << ".";
QDataStream fileStream(&file);
// HEADER
file.write(MAGIC_NUMBER, MAGIC_NUMBER_SIZE); // Magic number
fileStream << VERSION; // File format version
const qint64 dataOffsetPos = file.pos();
fileStream << (quint16)0; // Save two empty bytes for the data offset
const qint64 dataLengthPos = file.pos();
fileStream << (quint32)0; // Save four empty bytes for the data offset
const quint64 crc16Pos = file.pos();
fileStream << (quint16)0; // Save two empty bytes for the CRC-16
// METADATA
// TODO
// Write data offset
quint16 dataOffset = file.pos();
file.seek(dataOffsetPos);
fileStream << dataOffset;
file.seek(dataOffset);
// CONTEXT
RecordingContext& context = recording->getContext();
// Global Timestamp
fileStream << context.globalTimestamp;
// Domain
fileStream << context.domain;
// Position
writeVec3(fileStream, context.position);
// Orientation
writeQuat(fileStream, context.orientation);
// Scale
fileStream << context.scale;
// Head model
fileStream << context.headModel;
// Skeleton model
fileStream << context.skeletonModel;
// Display name
fileStream << context.displayName;
// Attachements
fileStream << (quint8)context.attachments.size();
foreach (AttachmentData data, context.attachments) {
// Model
fileStream << data.modelURL.toString();
// Joint name
fileStream << data.jointName;
// Position
writeVec3(fileStream, data.translation);
// Orientation
writeQuat(fileStream, data.rotation);
// Scale
fileStream << data.scale;
}
// RECORDING
fileStream << recording->_timestamps;
QBitArray mask;
quint32 numBlendshapes = 0;
quint32 numJoints = 0;
for (int i = 0; i < recording->_timestamps.size(); ++i) {
mask.fill(false);
int maskIndex = 0;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::WriteOnly);
RecordingFrame& previousFrame = recording->_frames[(i != 0) ? i - 1 : i];
RecordingFrame& frame = recording->_frames[i];
// Blendshape Coefficients
if (i == 0) {
numBlendshapes = frame.getBlendshapeCoefficients().size();
stream << numBlendshapes;
mask.resize(mask.size() + numBlendshapes);
}
for (quint32 j = 0; j < numBlendshapes; ++j) {
if (i == 0 ||
frame._blendshapeCoefficients[j] != previousFrame._blendshapeCoefficients[j]) {
stream << frame.getBlendshapeCoefficients()[j];
mask.setBit(maskIndex);
}
++maskIndex;
}
const auto& jointArray = frame.getJointArray();
if (i == 0) {
numJoints = jointArray.size();
stream << numJoints;
// 2 fields per joints
mask.resize(mask.size() + numJoints * 2);
}
for (quint32 j = 0; j < numJoints; j++) {
const auto& joint = jointArray[j];
if (true) { //(joint.rotationSet) {
writeQuat(stream, joint.rotation);
mask.setBit(maskIndex);
}
maskIndex++;
if (joint.translationSet) {
writeVec3(stream, joint.translation);
mask.setBit(maskIndex);
}
maskIndex++;
}
// Translation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._translation != previousFrame._translation) {
writeVec3(stream, frame._translation);
mask.setBit(maskIndex);
}
maskIndex++;
// Rotation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._rotation != previousFrame._rotation) {
writeQuat(stream, frame._rotation);
mask.setBit(maskIndex);
}
maskIndex++;
// Scale
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._scale != previousFrame._scale) {
stream << frame._scale;
mask.setBit(maskIndex);
}
maskIndex++;
// Head Rotation
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._headRotation != previousFrame._headRotation) {
writeQuat(stream, frame._headRotation);
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Sideways
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._leanSideways != previousFrame._leanSideways) {
stream << frame._leanSideways;
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Forward
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._leanForward != previousFrame._leanForward) {
stream << frame._leanForward;
mask.setBit(maskIndex);
}
maskIndex++;
// LookAt Position
if (i == 0) {
mask.resize(mask.size() + 1);
}
if (i == 0 || frame._lookAtPosition != previousFrame._lookAtPosition) {
writeVec3(stream, frame._lookAtPosition);
mask.setBit(maskIndex);
}
maskIndex++;
fileStream << mask;
fileStream << buffer;
}
fileStream << recording->getAudioData();
qint64 writingTime = timer.restart();
// Write data length and CRC-16
quint32 dataLength = file.pos() - dataOffset;
file.seek(dataOffset); // Go to beginning of data for checksum
quint16 crc16 = qChecksum(file.readAll().constData(), dataLength);
file.seek(dataLengthPos);
fileStream << dataLength;
file.seek(crc16Pos);
fileStream << crc16;
file.seek(dataOffset + dataLength);
bool wantDebug = true;
if (wantDebug) {
qCDebug(avatars) << "[DEBUG] WRITE recording";
qCDebug(avatars) << "Header:";
qCDebug(avatars) << "File Format version:" << VERSION;
qCDebug(avatars) << "Data length:" << dataLength;
qCDebug(avatars) << "Data offset:" << dataOffset;
qCDebug(avatars) << "CRC-16:" << crc16;
qCDebug(avatars) << "Context block:";
qCDebug(avatars) << "Global timestamp:" << context.globalTimestamp;
qCDebug(avatars) << "Domain:" << context.domain;
qCDebug(avatars) << "Position:" << context.position;
qCDebug(avatars) << "Orientation:" << context.orientation;
qCDebug(avatars) << "Scale:" << context.scale;
qCDebug(avatars) << "Head Model:" << context.headModel;
qCDebug(avatars) << "Skeleton Model:" << context.skeletonModel;
qCDebug(avatars) << "Display Name:" << context.displayName;
qCDebug(avatars) << "Num Attachments:" << context.attachments.size();
for (int i = 0; i < context.attachments.size(); ++i) {
qCDebug(avatars) << "Model URL:" << context.attachments[i].modelURL;
qCDebug(avatars) << "Joint Name:" << context.attachments[i].jointName;
qCDebug(avatars) << "Translation:" << context.attachments[i].translation;
qCDebug(avatars) << "Rotation:" << context.attachments[i].rotation;
qCDebug(avatars) << "Scale:" << context.attachments[i].scale;
}
qCDebug(avatars) << "Recording:";
qCDebug(avatars) << "Total frames:" << recording->getFrameNumber();
qCDebug(avatars) << "Audio array:" << recording->getAudioData().size();
}
qint64 checksumTime = timer.elapsed();
qCDebug(avatars) << "Wrote" << file.size() << "bytes in" << writingTime + checksumTime << "ms. (" << checksumTime << "ms for checksum)";
}
RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& filename) {
QByteArray byteArray;
QUrl url(filename);
QElapsedTimer timer;
timer.start(); // timer used for debug informations (download/parsing time)
// Aquire the data and place it in byteArray
// Return if data unavailable
if (url.scheme() == "http" || url.scheme() == "https" || url.scheme() == "ftp") {
// Download file if necessary
qCDebug(avatars) << "Downloading recording at" << url;
QNetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance();
QNetworkRequest networkRequest = QNetworkRequest(url);
networkRequest.setHeader(QNetworkRequest::UserAgentHeader, HIGH_FIDELITY_USER_AGENT);
QNetworkReply* reply = networkAccessManager.get(networkRequest);
QEventLoop loop;
QObject::connect(reply, SIGNAL(finished()), &loop, SLOT(quit()));
loop.exec(); // wait for file
if (reply->error() != QNetworkReply::NoError) {
qCDebug(avatars) << "Error while downloading recording: " << reply->error();
reply->deleteLater();
return recording;
}
byteArray = reply->readAll();
reply->deleteLater();
// print debug + restart timer
qCDebug(avatars) << "Downloaded " << byteArray.size() << " bytes in " << timer.restart() << " ms.";
} else {
// If local file, just read it.
qCDebug(avatars) << "Reading recording from " << filename << ".";
QFile file(filename);
if (!file.open(QIODevice::ReadOnly)){
qCDebug(avatars) << "Could not open local file: " << url;
return recording;
}
byteArray = file.readAll();
file.close();
}
if (!filename.endsWith(".hfr") && !filename.endsWith(".HFR")) {
qCDebug(avatars) << "File extension not recognized";
}
// Reset the recording passed in the arguments
if (!recording) {
recording = QSharedPointer<Recording>::create();
}
QDataStream fileStream(byteArray);
// HEADER
QByteArray magicNumber(MAGIC_NUMBER, MAGIC_NUMBER_SIZE);
if (!byteArray.startsWith(magicNumber)) {
qCDebug(avatars) << "ERROR: This is not a .HFR file. (Magic Number incorrect)";
return recording;
}
fileStream.skipRawData(MAGIC_NUMBER_SIZE);
QPair<quint8, quint8> version;
fileStream >> version; // File format version
if (version != VERSION && version != QPair<quint8, quint8>(0,1)) {
qCDebug(avatars) << "ERROR: This file format version is not supported.";
return recording;
}
quint16 dataOffset = 0;
fileStream >> dataOffset;
quint32 dataLength = 0;
fileStream >> dataLength;
quint16 crc16 = 0;
fileStream >> crc16;
// Check checksum
quint16 computedCRC16 = qChecksum(byteArray.constData() + dataOffset, dataLength);
if (computedCRC16 != crc16) {
qCDebug(avatars) << "Checksum does not match. Bailling!";
recording.clear();
return recording;
}
// METADATA
// TODO
// CONTEXT
RecordingContext& context = recording->getContext();
// Global Timestamp
fileStream >> context.globalTimestamp;
// Domain
fileStream >> context.domain;
// Position
if (!readVec3(fileStream, context.position)) {
qCDebug(avatars) << "Couldn't read file correctly. (Invalid vec3)";
recording.clear();
return recording;
}
// Orientation
if (!readQuat(fileStream, context.orientation)) {
qCDebug(avatars) << "Couldn't read file correctly. (Invalid quat)";
recording.clear();
return recording;
}
// Scale
if (version == QPair<quint8, quint8>(0,1)) {
readFloat(fileStream, context.scale, SCALE_RADIX);
} else {
fileStream >> context.scale;
}
// Head model
fileStream >> context.headModel;
// Skeleton model
fileStream >> context.skeletonModel;
// Display Name
fileStream >> context.displayName;
// Attachements
quint8 numAttachments = 0;
fileStream >> numAttachments;
for (int i = 0; i < numAttachments; ++i) {
AttachmentData data;
// Model
QString modelURL;
fileStream >> modelURL;
data.modelURL = modelURL;
// Joint name
fileStream >> data.jointName;
// Translation
if (!readVec3(fileStream, data.translation)) {
qCDebug(avatars) << "Couldn't read attachment correctly. (Invalid vec3)";
continue;
}
// Rotation
if (!readQuat(fileStream, data.rotation)) {
qCDebug(avatars) << "Couldn't read attachment correctly. (Invalid quat)";
continue;
}
// Scale
if (version == QPair<quint8, quint8>(0,1)) {
readFloat(fileStream, data.scale, SCALE_RADIX);
} else {
fileStream >> data.scale;
}
context.attachments << data;
}
quint32 numBlendshapes = 0;
quint32 numJoints = 0;
// RECORDING
fileStream >> recording->_timestamps;
for (int i = 0; i < recording->_timestamps.size(); ++i) {
QBitArray mask;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::ReadOnly);
RecordingFrame frame;
RecordingFrame& previousFrame = (i == 0) ? frame : recording->_frames.last();
fileStream >> mask;
fileStream >> buffer;
int maskIndex = 0;
// Blendshape Coefficients
if (i == 0) {
stream >> numBlendshapes;
}
frame._blendshapeCoefficients.resize(numBlendshapes);
for (quint32 j = 0; j < numBlendshapes; ++j) {
if (!mask[maskIndex++]) {
frame._blendshapeCoefficients[j] = previousFrame._blendshapeCoefficients[j];
} else if (version == QPair<quint8, quint8>(0,1)) {
readFloat(stream, frame._blendshapeCoefficients[j], BLENDSHAPE_RADIX);
} else {
stream >> frame._blendshapeCoefficients[j];
}
}
// Joint Array
if (i == 0) {
stream >> numJoints;
}
frame._jointArray.resize(numJoints);
for (quint32 j = 0; j < numJoints; ++j) {
auto& joint = frame._jointArray[2];
if (mask[maskIndex++] && readQuat(stream, joint.rotation)) {
joint.rotationSet = true;
} else {
joint.rotationSet = false;
}
if (mask[maskIndex++] || readVec3(stream, joint.translation)) {
joint.translationSet = true;
} else {
joint.translationSet = false;
}
}
if (!mask[maskIndex++] || !readVec3(stream, frame._translation)) {
frame._translation = previousFrame._translation;
}
if (!mask[maskIndex++] || !readQuat(stream, frame._rotation)) {
frame._rotation = previousFrame._rotation;
}
if (!mask[maskIndex++]) {
frame._scale = previousFrame._scale;
} else if (version == QPair<quint8, quint8>(0,1)) {
readFloat(stream, frame._scale, SCALE_RADIX);
} else {
stream >> frame._scale;
}
if (!mask[maskIndex++] || !readQuat(stream, frame._headRotation)) {
frame._headRotation = previousFrame._headRotation;
}
if (!mask[maskIndex++]) {
frame._leanSideways = previousFrame._leanSideways;
} else if (version == QPair<quint8, quint8>(0,1)) {
readFloat(stream, frame._leanSideways, LEAN_RADIX);
} else {
stream >> frame._leanSideways;
}
if (!mask[maskIndex++]) {
frame._leanForward = previousFrame._leanForward;
} else if (version == QPair<quint8, quint8>(0,1)) {
readFloat(stream, frame._leanForward, LEAN_RADIX);
} else {
stream >> frame._leanForward;
}
if (!mask[maskIndex++] || !readVec3(stream, frame._lookAtPosition)) {
frame._lookAtPosition = previousFrame._lookAtPosition;
}
recording->_frames << frame;
}
QByteArray audioArray;
fileStream >> audioArray;
recording->addAudioPacket(audioArray);
bool wantDebug = true;
if (wantDebug) {
qCDebug(avatars) << "[DEBUG] READ recording";
qCDebug(avatars) << "Header:";
qCDebug(avatars) << "File Format version:" << VERSION;
qCDebug(avatars) << "Data length:" << dataLength;
qCDebug(avatars) << "Data offset:" << dataOffset;
qCDebug(avatars) << "CRC-16:" << crc16;
qCDebug(avatars) << "Context block:";
qCDebug(avatars) << "Global timestamp:" << context.globalTimestamp;
qCDebug(avatars) << "Domain:" << context.domain;
qCDebug(avatars) << "Position:" << context.position;
qCDebug(avatars) << "Orientation:" << context.orientation;
qCDebug(avatars) << "Scale:" << context.scale;
qCDebug(avatars) << "Head Model:" << context.headModel;
qCDebug(avatars) << "Skeleton Model:" << context.skeletonModel;
qCDebug(avatars) << "Display Name:" << context.displayName;
qCDebug(avatars) << "Num Attachments:" << numAttachments;
for (int i = 0; i < numAttachments; ++i) {
qCDebug(avatars) << "Model URL:" << context.attachments[i].modelURL;
qCDebug(avatars) << "Joint Name:" << context.attachments[i].jointName;
qCDebug(avatars) << "Translation:" << context.attachments[i].translation;
qCDebug(avatars) << "Rotation:" << context.attachments[i].rotation;
qCDebug(avatars) << "Scale:" << context.attachments[i].scale;
}
qCDebug(avatars) << "Recording:";
qCDebug(avatars) << "Total frames:" << recording->getFrameNumber();
qCDebug(avatars) << "Audio array:" << recording->getAudioData().size();
}
qCDebug(avatars) << "Read " << byteArray.size() << " bytes in " << timer.elapsed() << " ms.";
return recording;
}
#endif

View file

@ -1,131 +0,0 @@
//
// Recording.h
//
//
// Created by Clement on 9/17/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_Recording_h
#define hifi_Recording_h
#if 0
#include <QString>
#include <QVector>
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
template<class C>
class QSharedPointer;
class AttachmentData;
class Recording;
class RecordingFrame;
class Sound;
class JointData;
typedef QSharedPointer<Recording> RecordingPointer;
/// Stores recordings static data
class RecordingContext {
public:
quint64 globalTimestamp;
QString domain;
glm::vec3 position;
glm::quat orientation;
float scale;
QString headModel;
QString skeletonModel;
QString displayName;
QVector<AttachmentData> attachments;
// This avoids recomputation every frame while recording.
glm::quat orientationInv;
};
/// Stores a recording
class Recording {
public:
bool isEmpty() const { return _timestamps.isEmpty(); }
int getLength() const; // in ms
RecordingContext& getContext() { return _context; }
int getFrameNumber() const { return _frames.size(); }
qint32 getFrameTimestamp(int i) const;
const RecordingFrame& getFrame(int i) const;
const QByteArray& getAudioData() const { return _audioData; }
int numberAudioChannel() const;
protected:
void addFrame(int timestamp, RecordingFrame& frame);
void addAudioPacket(const QByteArray& byteArray) { _audioData.append(byteArray); }
void clear();
private:
RecordingContext _context;
QVector<qint32> _timestamps;
QVector<RecordingFrame> _frames;
QByteArray _audioData;
friend class Recorder;
friend class Player;
friend void writeRecordingToFile(RecordingPointer recording, const QString& file);
friend RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& file);
friend RecordingPointer readRecordingFromRecFile(RecordingPointer recording, const QString& filename,
const QByteArray& byteArray);
};
/// Stores the different values associated to one recording frame
class RecordingFrame {
public:
QVector<float> getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
QVector<JointData> getJointArray() const { return _jointArray; }
glm::vec3 getTranslation() const { return _translation; }
glm::quat getRotation() const { return _rotation; }
float getScale() const { return _scale; }
glm::quat getHeadRotation() const { return _headRotation; }
float getLeanSideways() const { return _leanSideways; }
float getLeanForward() const { return _leanForward; }
glm::vec3 getLookAtPosition() const { return _lookAtPosition; }
protected:
void setBlendshapeCoefficients(QVector<float> blendshapeCoefficients);
void setJointArray(const QVector<JointData>& jointArray) { _jointArray = jointArray; }
void setTranslation(const glm::vec3& translation) { _translation = translation; }
void setRotation(const glm::quat& rotation) { _rotation = rotation; }
void setScale(float scale) { _scale = scale; }
void setHeadRotation(glm::quat headRotation) { _headRotation = headRotation; }
void setLeanSideways(float leanSideways) { _leanSideways = leanSideways; }
void setLeanForward(float leanForward) { _leanForward = leanForward; }
void setLookAtPosition(const glm::vec3& lookAtPosition) { _lookAtPosition = lookAtPosition; }
private:
QVector<float> _blendshapeCoefficients;
QVector<JointData> _jointArray;
glm::vec3 _translation;
glm::quat _rotation;
float _scale;
glm::quat _headRotation;
float _leanSideways;
float _leanForward;
glm::vec3 _lookAtPosition;
friend class Recorder;
friend void writeRecordingToFile(RecordingPointer recording, const QString& file);
friend RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& file);
friend RecordingPointer readRecordingFromRecFile(RecordingPointer recording, const QString& filename,
const QByteArray& byteArray);
};
void writeRecordingToFile(RecordingPointer recording, const QString& filename);
RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& filename);
RecordingPointer readRecordingFromRecFile(RecordingPointer recording, const QString& filename, const QByteArray& byteArray);
#endif
#endif // hifi_Recording_h

View file

@ -122,7 +122,7 @@ void EntityTreeRenderer::init() {
}
void EntityTreeRenderer::shutdown() {
_entitiesScriptEngine->disconnect(); // disconnect all slots/signals from the script engine
_entitiesScriptEngine->disconnectNonEssentialSignals(); // disconnect all slots/signals from the script engine, except essential
_shuttingDown = true;
}

View file

@ -219,9 +219,11 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
if (hasModel()) {
if (_model) {
if (getModelURL() != _model->getURL().toString()) {
qDebug() << "Updating model URL: " << getModelURL();
_model->setURL(getModelURL());
// check if the URL has changed
auto& currentURL = getParsedModelURL();
if (currentURL != _model->getURL()) {
qDebug().noquote() << "Updating model URL: " << currentURL.toDisplayString();
_model->setURL(currentURL);
}
render::ScenePointer scene = AbstractViewStateInterface::instance()->getMain3DScene();

View file

@ -31,6 +31,7 @@ EntityItemPointer RenderablePolyLineEntityItem::factory(const EntityItemID& enti
RenderablePolyLineEntityItem::RenderablePolyLineEntityItem(const EntityItemID& entityItemID, const EntityItemProperties& properties) :
PolyLineEntityItem(entityItemID, properties) {
_numVertices = 0;
_vertices = QVector<glm::vec3>(0.0f);
}
@ -114,13 +115,56 @@ void RenderablePolyLineEntityItem::updateGeometry() {
_numVertices += 2;
}
_pointsChanged = false;
_normalsChanged = false;
_strokeWidthsChanged = false;
}
void RenderablePolyLineEntityItem::updateVertices() {
// Calculate the minimum vector size out of normals, points, and stroke widths
int minVectorSize = _normals.size();
if (_points.size() < minVectorSize) {
minVectorSize = _points.size();
}
if (_strokeWidths.size() < minVectorSize) {
minVectorSize = _strokeWidths.size();
}
_vertices.clear();
glm::vec3 v1, v2, tangent, binormal, point;
int finalIndex = minVectorSize - 1;
for (int i = 0; i < finalIndex; i++) {
float width = _strokeWidths.at(i);
point = _points.at(i);
tangent = _points.at(i);
tangent = _points.at(i + 1) - point;
glm::vec3 normal = _normals.at(i);
binormal = glm::normalize(glm::cross(tangent, normal)) * width;
// Check to make sure binormal is not a NAN. If it is, don't add to vertices vector
if (binormal.x != binormal.x) {
continue;
}
v1 = point + binormal;
v2 = point - binormal;
_vertices << v1 << v2;
}
// For last point we can assume binormals are the same since it represents the last two vertices of quad
point = _points.at(finalIndex);
v1 = point + binormal;
v2 = point - binormal;
_vertices << v1 << v2;
}
void RenderablePolyLineEntityItem::render(RenderArgs* args) {
QWriteLocker lock(&_quadReadWriteLock);
if (_points.size() < 2 || _normals.size () < 2 || _vertices.size() < 2) {
if (_points.size() < 2 || _normals.size () < 2 || _strokeWidths.size() < 2) {
return;
}
@ -139,7 +183,8 @@ void RenderablePolyLineEntityItem::render(RenderArgs* args) {
Q_ASSERT(getType() == EntityTypes::PolyLine);
Q_ASSERT(args->_batch);
if (_pointsChanged) {
if (_pointsChanged || _strokeWidthsChanged || _normalsChanged) {
updateVertices();
updateGeometry();
}

View file

@ -40,8 +40,10 @@ public:
protected:
void updateGeometry();
void updateVertices();
gpu::BufferPointer _verticesBuffer;
unsigned int _numVertices;
QVector<glm::vec3> _vertices;
};

View file

@ -622,7 +622,7 @@ int EntityItem::readEntityDataFromBuffer(const unsigned char* data, int bytesLef
auto nodeList = DependencyManager::get<NodeList>();
const QUuid& myNodeID = nodeList->getSessionUUID();
bool weOwnSimulation = _simulationOwner.matchesValidID(myNodeID);
if (args.bitstreamVersion >= VERSION_ENTITIES_HAVE_SIMULATION_OWNER_AND_ACTIONS_OVER_WIRE) {
// pack SimulationOwner and terse update properties near each other
@ -799,17 +799,11 @@ void EntityItem::setDensity(float density) {
_density = glm::max(glm::min(density, ENTITY_ITEM_MAX_DENSITY), ENTITY_ITEM_MIN_DENSITY);
}
const float ACTIVATION_RELATIVE_DENSITY_DELTA = 0.01f; // 1 percent
void EntityItem::updateDensity(float density) {
float clampedDensity = glm::max(glm::min(density, ENTITY_ITEM_MAX_DENSITY), ENTITY_ITEM_MIN_DENSITY);
if (_density != clampedDensity) {
_density = clampedDensity;
if (fabsf(_density - clampedDensity) / _density > ACTIVATION_RELATIVE_DENSITY_DELTA) {
// the density has changed enough that we should update the physics simulation
_dirtyFlags |= Simulation::DIRTY_MASS;
}
_dirtyFlags |= Simulation::DIRTY_MASS;
}
}
@ -822,11 +816,16 @@ void EntityItem::setMass(float mass) {
// compute new density
const float MIN_VOLUME = 1.0e-6f; // 0.001mm^3
float newDensity = 1.0f;
if (volume < 1.0e-6f) {
// avoid divide by zero
_density = glm::min(mass / MIN_VOLUME, ENTITY_ITEM_MAX_DENSITY);
newDensity = glm::min(mass / MIN_VOLUME, ENTITY_ITEM_MAX_DENSITY);
} else {
_density = glm::max(glm::min(mass / volume, ENTITY_ITEM_MAX_DENSITY), ENTITY_ITEM_MIN_DENSITY);
newDensity = glm::max(glm::min(mass / volume, ENTITY_ITEM_MAX_DENSITY), ENTITY_ITEM_MIN_DENSITY);
}
if (_density != newDensity) {
_density = newDensity;
_dirtyFlags |= Simulation::DIRTY_MASS;
}
}
@ -884,12 +883,12 @@ void EntityItem::simulateKinematicMotion(float timeElapsed, bool setFlags) {
#ifdef WANT_DEBUG
qCDebug(entities) << "EntityItem::simulateKinematicMotion timeElapsed" << timeElapsed;
#endif
const float MIN_TIME_SKIP = 0.0f;
const float MAX_TIME_SKIP = 1.0f; // in seconds
timeElapsed = glm::clamp(timeElapsed, MIN_TIME_SKIP, MAX_TIME_SKIP);
if (hasActions()) {
return;
}
@ -1312,24 +1311,16 @@ void EntityItem::updatePosition(const glm::vec3& value) {
if (shouldSuppressLocationEdits()) {
return;
}
auto delta = glm::distance(getPosition(), value);
if (delta > IGNORE_POSITION_DELTA) {
_dirtyFlags |= Simulation::DIRTY_POSITION;
if (getPosition() != value) {
setPosition(value);
if (delta > ACTIVATION_POSITION_DELTA) {
_dirtyFlags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
_dirtyFlags |= Simulation::DIRTY_POSITION;
}
}
void EntityItem::updateDimensions(const glm::vec3& value) {
auto delta = glm::distance(getDimensions(), value);
if (delta > IGNORE_DIMENSIONS_DELTA) {
if (getDimensions() != value) {
setDimensions(value);
if (delta > ACTIVATION_DIMENSIONS_DELTA) {
// rebuilding the shape will always activate
_dirtyFlags |= (Simulation::DIRTY_SHAPE | Simulation::DIRTY_MASS);
}
_dirtyFlags |= (Simulation::DIRTY_SHAPE | Simulation::DIRTY_MASS);
}
}
@ -1339,14 +1330,7 @@ void EntityItem::updateRotation(const glm::quat& rotation) {
}
if (getRotation() != rotation) {
setRotation(rotation);
auto alignmentDot = glm::abs(glm::dot(getRotation(), rotation));
if (alignmentDot < IGNORE_ALIGNMENT_DOT) {
_dirtyFlags |= Simulation::DIRTY_ROTATION;
}
if (alignmentDot < ACTIVATION_ALIGNMENT_DOT) {
_dirtyFlags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
_dirtyFlags |= Simulation::DIRTY_ROTATION;
}
}
@ -1367,11 +1351,8 @@ void EntityItem::updateMass(float mass) {
newDensity = glm::max(glm::min(mass / volume, ENTITY_ITEM_MAX_DENSITY), ENTITY_ITEM_MIN_DENSITY);
}
float oldDensity = _density;
_density = newDensity;
if (fabsf(_density - oldDensity) / _density > ACTIVATION_RELATIVE_DENSITY_DELTA) {
// the density has changed enough that we should update the physics simulation
if (_density != newDensity) {
_density = newDensity;
_dirtyFlags |= Simulation::DIRTY_MASS;
}
}
@ -1380,38 +1361,29 @@ void EntityItem::updateVelocity(const glm::vec3& value) {
if (shouldSuppressLocationEdits()) {
return;
}
auto delta = glm::distance(_velocity, value);
if (delta > IGNORE_LINEAR_VELOCITY_DELTA) {
_dirtyFlags |= Simulation::DIRTY_LINEAR_VELOCITY;
if (_velocity != value) {
const float MIN_LINEAR_SPEED = 0.001f;
if (glm::length(value) < MIN_LINEAR_SPEED) {
_velocity = ENTITY_ITEM_ZERO_VEC3;
} else {
_velocity = value;
// only activate when setting non-zero velocity
if (delta > ACTIVATION_LINEAR_VELOCITY_DELTA) {
_dirtyFlags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
}
_dirtyFlags |= Simulation::DIRTY_LINEAR_VELOCITY;
}
}
void EntityItem::updateDamping(float value) {
auto clampedDamping = glm::clamp(value, 0.0f, 1.0f);
if (fabsf(_damping - clampedDamping) > IGNORE_DAMPING_DELTA) {
if (_damping != clampedDamping) {
_damping = clampedDamping;
_dirtyFlags |= Simulation::DIRTY_MATERIAL;
}
}
void EntityItem::updateGravity(const glm::vec3& value) {
auto delta = glm::distance(_gravity, value);
if (delta > IGNORE_GRAVITY_DELTA) {
if (_gravity != value) {
_gravity = value;
_dirtyFlags |= Simulation::DIRTY_LINEAR_VELOCITY;
if (delta > ACTIVATION_GRAVITY_DELTA) {
_dirtyFlags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
}
}
@ -1419,25 +1391,20 @@ void EntityItem::updateAngularVelocity(const glm::vec3& value) {
if (shouldSuppressLocationEdits()) {
return;
}
auto delta = glm::distance(_angularVelocity, value);
if (delta > IGNORE_ANGULAR_VELOCITY_DELTA) {
_dirtyFlags |= Simulation::DIRTY_ANGULAR_VELOCITY;
if (_angularVelocity != value) {
const float MIN_ANGULAR_SPEED = 0.0002f;
if (glm::length(value) < MIN_ANGULAR_SPEED) {
_angularVelocity = ENTITY_ITEM_ZERO_VEC3;
} else {
_angularVelocity = value;
// only activate when setting non-zero velocity
if (delta > ACTIVATION_ANGULAR_VELOCITY_DELTA) {
_dirtyFlags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
}
_dirtyFlags |= Simulation::DIRTY_ANGULAR_VELOCITY;
}
}
void EntityItem::updateAngularDamping(float value) {
auto clampedDamping = glm::clamp(value, 0.0f, 1.0f);
if (fabsf(_angularDamping - clampedDamping) > IGNORE_DAMPING_DELTA) {
if (_angularDamping != clampedDamping) {
_angularDamping = clampedDamping;
_dirtyFlags |= Simulation::DIRTY_MATERIAL;
}

View file

@ -48,6 +48,7 @@ namespace render {
class PendingChanges;
}
/*
// these thesholds determine what updates will be ignored (client and server)
const float IGNORE_POSITION_DELTA = 0.0001f;
const float IGNORE_DIMENSIONS_DELTA = 0.0005f;
@ -64,6 +65,7 @@ const float ACTIVATION_ALIGNMENT_DOT = 0.99990f;
const float ACTIVATION_LINEAR_VELOCITY_DELTA = 0.01f;
const float ACTIVATION_GRAVITY_DELTA = 0.1f;
const float ACTIVATION_ANGULAR_VELOCITY_DELTA = 0.03f;
*/
#define DONT_ALLOW_INSTANTIATION virtual void pureVirtualFunctionPlaceHolder() = 0;
#define ALLOW_INSTANTIATION virtual void pureVirtualFunctionPlaceHolder() { };

View file

@ -63,6 +63,7 @@ public:
static const QString DEFAULT_MODEL_URL;
const QString& getModelURL() const { return _modelURL; }
const QUrl& getParsedModelURL() const { return _parsedModelURL; }
static const QString DEFAULT_COMPOUND_SHAPE_URL;
const QString& getCompoundShapeURL() const { return _compoundShapeURL; }
@ -75,7 +76,7 @@ public:
}
// model related properties
void setModelURL(const QString& url) { _modelURL = url; }
void setModelURL(const QString& url) { _modelURL = url; _parsedModelURL = QUrl(url); }
virtual void setCompoundShapeURL(const QString& url);
@ -134,6 +135,7 @@ protected:
rgbColor _color;
QString _modelURL;
QUrl _parsedModelURL;
QString _compoundShapeURL;
AnimationPropertyGroup _animationProperties;

View file

@ -34,8 +34,9 @@ PolyLineEntityItem::PolyLineEntityItem(const EntityItemID& entityItemID, const E
EntityItem(entityItemID),
_lineWidth(DEFAULT_LINE_WIDTH),
_pointsChanged(true),
_normalsChanged(true),
_strokeWidthsChanged(true),
_points(QVector<glm::vec3>(0.0f)),
_vertices(QVector<glm::vec3>(0.0f)),
_normals(QVector<glm::vec3>(0.0f)),
_strokeWidths(QVector<float>(0.0f)),
_textures("")
@ -106,47 +107,13 @@ bool PolyLineEntityItem::appendPoint(const glm::vec3& point) {
bool PolyLineEntityItem::setStrokeWidths(const QVector<float>& strokeWidths) {
_strokeWidths = strokeWidths;
_strokeWidthsChanged = true;
return true;
}
bool PolyLineEntityItem::setNormals(const QVector<glm::vec3>& normals) {
_normals = normals;
if (_points.size() < 2 || _normals.size() < 2 || _strokeWidths.size() < 2) {
return false;
}
int minVectorSize = _normals.size();
if (_points.size() < minVectorSize) {
minVectorSize = _points.size();
}
if (_strokeWidths.size() < minVectorSize) {
minVectorSize = _strokeWidths.size();
}
_vertices.clear();
glm::vec3 v1, v2, tangent, binormal, point;
int finalIndex = minVectorSize -1;
for (int i = 0; i < finalIndex; i++) {
float width = _strokeWidths.at(i);
point = _points.at(i);
tangent = _points.at(i + 1) - point;
glm::vec3 normal = normals.at(i);
binormal = glm::normalize(glm::cross(tangent, normal)) * width;
//This checks to make sure binormal is not a NAN
assert(binormal.x == binormal.x);
v1 = point + binormal;
v2 = point - binormal;
_vertices << v1 << v2;
}
//for last point we can just assume binormals are same since it represents last two vertices of quad
point = _points.at(finalIndex);
v1 = point + binormal;
v2 = point - binormal;
_vertices << v1 << v2;
_normalsChanged = true;
return true;
}

View file

@ -93,8 +93,9 @@ class PolyLineEntityItem : public EntityItem {
rgbColor _color;
float _lineWidth;
bool _pointsChanged;
bool _normalsChanged;
bool _strokeWidthsChanged;
QVector<glm::vec3> _points;
QVector<glm::vec3> _vertices;
QVector<glm::vec3> _normals;
QVector<float> _strokeWidths;
QString _textures;

View file

@ -95,7 +95,7 @@ void EntityMotionState::updateServerPhysicsVariables(const QUuid& sessionID) {
}
// virtual
bool EntityMotionState::handleEasyChanges(uint32_t flags, PhysicsEngine* engine) {
bool EntityMotionState::handleEasyChanges(uint32_t& flags, PhysicsEngine* engine) {
assert(entityTreeIsLocked());
updateServerPhysicsVariables(engine->getSessionID());
ObjectMotionState::handleEasyChanges(flags, engine);
@ -120,7 +120,7 @@ bool EntityMotionState::handleEasyChanges(uint32_t flags, PhysicsEngine* engine)
}
if (flags & Simulation::DIRTY_SIMULATOR_OWNERSHIP) {
// (DIRTY_SIMULATOR_OWNERSHIP really means "we should bid for ownership with SCRIPT priority")
// we're manipulating this object directly via script, so we artificially
// we're manipulating this object directly via script, so we artificially
// manipulate the logic to trigger an immediate bid for ownership
setOutgoingPriority(SCRIPT_EDIT_SIMULATION_PRIORITY);
}
@ -133,7 +133,7 @@ bool EntityMotionState::handleEasyChanges(uint32_t flags, PhysicsEngine* engine)
// virtual
bool EntityMotionState::handleHardAndEasyChanges(uint32_t flags, PhysicsEngine* engine) {
bool EntityMotionState::handleHardAndEasyChanges(uint32_t& flags, PhysicsEngine* engine) {
updateServerPhysicsVariables(engine->getSessionID());
return ObjectMotionState::handleHardAndEasyChanges(flags, engine);
}

View file

@ -29,8 +29,8 @@ public:
virtual ~EntityMotionState();
void updateServerPhysicsVariables(const QUuid& sessionID);
virtual bool handleEasyChanges(uint32_t flags, PhysicsEngine* engine);
virtual bool handleHardAndEasyChanges(uint32_t flags, PhysicsEngine* engine);
virtual bool handleEasyChanges(uint32_t& flags, PhysicsEngine* engine);
virtual bool handleHardAndEasyChanges(uint32_t& flags, PhysicsEngine* engine);
/// \return MOTION_TYPE_DYNAMIC or MOTION_TYPE_STATIC based on params set in EntityItem
virtual MotionType computeObjectMotionType() const;

View file

@ -17,6 +17,14 @@
#include "PhysicsHelpers.h"
#include "PhysicsLogging.h"
// these thresholds determine what updates (object-->body) will activate the physical object
const float ACTIVATION_POSITION_DELTA = 0.005f;
const float ACTIVATION_ALIGNMENT_DOT = 0.99990f;
const float ACTIVATION_LINEAR_VELOCITY_DELTA = 0.01f;
const float ACTIVATION_GRAVITY_DELTA = 0.1f;
const float ACTIVATION_ANGULAR_VELOCITY_DELTA = 0.03f;
// origin of physics simulation in world-frame
glm::vec3 _worldOffset(0.0f);
@ -115,6 +123,31 @@ void ObjectMotionState::setMotionType(MotionType motionType) {
_motionType = motionType;
}
// Update the Continuous Collision Detection (CCD) configuration settings of our RigidBody so that
// CCD will be enabled automatically when its speed surpasses a certain threshold.
void ObjectMotionState::updateCCDConfiguration() {
if (_body) {
if (_shape) {
// If this object moves faster than its bounding radius * RADIUS_MOTION_THRESHOLD_MULTIPLIER,
// CCD will be enabled for this object.
const auto RADIUS_MOTION_THRESHOLD_MULTIPLIER = 0.5f;
btVector3 center;
btScalar radius;
_shape->getBoundingSphere(center, radius);
_body->setCcdMotionThreshold(radius * RADIUS_MOTION_THRESHOLD_MULTIPLIER);
// TODO: Ideally the swept sphere radius would be contained by the object. Using the bounding sphere
// radius works well for spherical objects, but may cause issues with other shapes. For arbitrary
// objects we may want to consider a different approach, such as grouping rigid bodies together.
_body->setCcdSweptSphereRadius(radius);
} else {
// Disable CCD
_body->setCcdMotionThreshold(0);
}
}
}
void ObjectMotionState::setRigidBody(btRigidBody* body) {
// give the body a (void*) back-pointer to this ObjectMotionState
if (_body != body) {
@ -125,31 +158,69 @@ void ObjectMotionState::setRigidBody(btRigidBody* body) {
if (_body) {
_body->setUserPointer(this);
}
updateCCDConfiguration();
}
}
bool ObjectMotionState::handleEasyChanges(uint32_t flags, PhysicsEngine* engine) {
bool ObjectMotionState::handleEasyChanges(uint32_t& flags, PhysicsEngine* engine) {
if (flags & Simulation::DIRTY_POSITION) {
btTransform worldTrans;
if (flags & Simulation::DIRTY_ROTATION) {
worldTrans.setRotation(glmToBullet(getObjectRotation()));
} else {
worldTrans = _body->getWorldTransform();
btTransform worldTrans = _body->getWorldTransform();
btVector3 newPosition = glmToBullet(getObjectPosition());
float delta = (newPosition - worldTrans.getOrigin()).length();
if (delta > ACTIVATION_POSITION_DELTA) {
flags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
worldTrans.setOrigin(newPosition);
if (flags & Simulation::DIRTY_ROTATION) {
btQuaternion newRotation = glmToBullet(getObjectRotation());
float alignmentDot = fabsf(worldTrans.getRotation().dot(newRotation));
if (alignmentDot < ACTIVATION_ALIGNMENT_DOT) {
flags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
worldTrans.setRotation(newRotation);
}
worldTrans.setOrigin(glmToBullet(getObjectPosition()));
_body->setWorldTransform(worldTrans);
} else if (flags & Simulation::DIRTY_ROTATION) {
btTransform worldTrans = _body->getWorldTransform();
worldTrans.setRotation(glmToBullet(getObjectRotation()));
btQuaternion newRotation = glmToBullet(getObjectRotation());
float alignmentDot = fabsf(worldTrans.getRotation().dot(newRotation));
if (alignmentDot < ACTIVATION_ALIGNMENT_DOT) {
flags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
worldTrans.setRotation(newRotation);
_body->setWorldTransform(worldTrans);
}
if (flags & Simulation::DIRTY_LINEAR_VELOCITY) {
_body->setLinearVelocity(glmToBullet(getObjectLinearVelocity()));
_body->setGravity(glmToBullet(getObjectGravity()));
btVector3 newLinearVelocity = glmToBullet(getObjectLinearVelocity());
if (!(flags & Simulation::DIRTY_PHYSICS_ACTIVATION)) {
float delta = (newLinearVelocity - _body->getLinearVelocity()).length();
if (delta > ACTIVATION_LINEAR_VELOCITY_DELTA) {
flags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
}
_body->setLinearVelocity(newLinearVelocity);
btVector3 newGravity = glmToBullet(getObjectGravity());
if (!(flags & Simulation::DIRTY_PHYSICS_ACTIVATION)) {
float delta = (newGravity - _body->getGravity()).length();
if (delta > ACTIVATION_GRAVITY_DELTA ||
(delta > 0.0f && _body->getGravity().length2() == 0.0f)) {
flags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
}
_body->setGravity(newGravity);
}
if (flags & Simulation::DIRTY_ANGULAR_VELOCITY) {
_body->setAngularVelocity(glmToBullet(getObjectAngularVelocity()));
btVector3 newAngularVelocity = glmToBullet(getObjectAngularVelocity());
if (!(flags & Simulation::DIRTY_PHYSICS_ACTIVATION)) {
float delta = (newAngularVelocity - _body->getAngularVelocity()).length();
if (delta > ACTIVATION_ANGULAR_VELOCITY_DELTA) {
flags |= Simulation::DIRTY_PHYSICS_ACTIVATION;
}
}
_body->setAngularVelocity(newAngularVelocity);
}
if (flags & Simulation::DIRTY_MATERIAL) {
@ -163,7 +234,7 @@ bool ObjectMotionState::handleEasyChanges(uint32_t flags, PhysicsEngine* engine)
return true;
}
bool ObjectMotionState::handleHardAndEasyChanges(uint32_t flags, PhysicsEngine* engine) {
bool ObjectMotionState::handleHardAndEasyChanges(uint32_t& flags, PhysicsEngine* engine) {
if (flags & Simulation::DIRTY_SHAPE) {
// make sure the new shape is valid
if (!isReadyToComputeShape()) {
@ -187,6 +258,8 @@ bool ObjectMotionState::handleHardAndEasyChanges(uint32_t flags, PhysicsEngine*
if (_shape != newShape) {
_shape = newShape;
_body->setCollisionShape(_shape);
updateCCDConfiguration();
} else {
// huh... the shape didn't actually change, so we clear the DIRTY_SHAPE flag
flags &= ~Simulation::DIRTY_SHAPE;
@ -195,8 +268,8 @@ bool ObjectMotionState::handleHardAndEasyChanges(uint32_t flags, PhysicsEngine*
if (flags & EASY_DIRTY_PHYSICS_FLAGS) {
handleEasyChanges(flags, engine);
}
// it is possible that there are no HARD flags at this point (if DIRTY_SHAPE was removed)
// so we check again befoe we reinsert:
// it is possible there are no HARD flags at this point (if DIRTY_SHAPE was removed)
// so we check again before we reinsert:
if (flags & HARD_DIRTY_PHYSICS_FLAGS) {
engine->reinsertObject(this);
}

View file

@ -80,8 +80,8 @@ public:
ObjectMotionState(btCollisionShape* shape);
~ObjectMotionState();
virtual bool handleEasyChanges(uint32_t flags, PhysicsEngine* engine);
virtual bool handleHardAndEasyChanges(uint32_t flags, PhysicsEngine* engine);
virtual bool handleEasyChanges(uint32_t& flags, PhysicsEngine* engine);
virtual bool handleHardAndEasyChanges(uint32_t& flags, PhysicsEngine* engine);
void updateBodyMaterialProperties();
void updateBodyVelocities();
@ -151,6 +151,7 @@ protected:
virtual bool isReadyToComputeShape() = 0;
virtual btCollisionShape* computeNewShape() = 0;
void setMotionType(MotionType motionType);
void updateCCDConfiguration();
// clearObjectBackPointer() overrrides should call the base method, then actually clear the object back pointer.
virtual void clearObjectBackPointer() { _type = MOTIONSTATE_TYPE_INVALID; }

View file

@ -244,14 +244,14 @@ void PhysicsEngine::stepSimulation() {
float timeStep = btMin(dt, MAX_TIMESTEP);
if (_myAvatarController) {
// ADEBUG TODO: move this stuff outside and in front of stepSimulation, because
// TODO: move this stuff outside and in front of stepSimulation, because
// the updateShapeIfNecessary() call needs info from MyAvatar and should
// be done on the main thread during the pre-simulation stuff
if (_myAvatarController->needsRemoval()) {
_myAvatarController->setDynamicsWorld(nullptr);
// We must remove any existing contacts for the avatar so that any new contacts will have
// valid data. MyAvatar's RigidBody is the ONLY one in the simulation that does not yet
// valid data. MyAvatar's RigidBody is the ONLY one in the simulation that does not yet
// have a MotionState so we pass nullptr to removeContacts().
removeContacts(nullptr);
}

View file

@ -56,10 +56,12 @@ bool RecordingScriptingInterface::loadRecording(const QString& url) {
using namespace recording;
auto loader = ClipCache::instance().getClipLoader(url);
QEventLoop loop;
QObject::connect(loader.data(), &Resource::loaded, &loop, &QEventLoop::quit);
QObject::connect(loader.data(), &Resource::failed, &loop, &QEventLoop::quit);
loop.exec();
if (!loader->isLoaded()) {
QEventLoop loop;
QObject::connect(loader.data(), &Resource::loaded, &loop, &QEventLoop::quit);
QObject::connect(loader.data(), &Resource::failed, &loop, &QEventLoop::quit);
loop.exec();
}
if (!loader->isLoaded()) {
qWarning() << "Clip failed to load from " << url;

View file

@ -124,14 +124,9 @@ static bool hadUncaughtExceptions(QScriptEngine& engine, const QString& fileName
ScriptEngine::ScriptEngine(const QString& scriptContents, const QString& fileNameString, bool wantSignals) :
_scriptContents(scriptContents),
_isFinished(false),
_isRunning(false),
_isInitialized(false),
_timerFunctionMap(),
_wantSignals(wantSignals),
_fileNameString(fileNameString),
_isUserLoaded(false),
_isReloading(false),
_arrayBufferClass(new ArrayBufferClass(this))
{
_allScriptsMutex.lock();
@ -140,6 +135,8 @@ ScriptEngine::ScriptEngine(const QString& scriptContents, const QString& fileNam
}
ScriptEngine::~ScriptEngine() {
qCDebug(scriptengine) << "Script Engine shutting down (destructor) for script:" << getFilename();
// If we're not already in the middle of stopping all scripts, then we should remove ourselves
// from the list of running scripts. We don't do this if we're in the process of stopping all scripts
// because that method removes scripts from its list as it iterates them
@ -150,11 +147,21 @@ ScriptEngine::~ScriptEngine() {
}
}
void ScriptEngine::disconnectNonEssentialSignals() {
disconnect();
connect(this, &ScriptEngine::doneRunning, thread(), &QThread::quit);
}
void ScriptEngine::runInThread() {
_isThreaded = true;
QThread* workerThread = new QThread(); // thread is not owned, so we need to manage the delete
QString scriptEngineName = QString("Script Thread:") + getFilename();
workerThread->setObjectName(scriptEngineName);
// NOTE: If you connect any essential signals for proper shutdown or cleanup of
// the script engine, make sure to add code to "reconnect" them to the
// disconnectNonEssentialSignals() method
// when the worker thread is started, call our engine's run..
connect(workerThread, &QThread::started, this, &ScriptEngine::run);
@ -176,12 +183,13 @@ void ScriptEngine::runInThread() {
QSet<ScriptEngine*> ScriptEngine::_allKnownScriptEngines;
QMutex ScriptEngine::_allScriptsMutex;
bool ScriptEngine::_stoppingAllScripts = false;
bool ScriptEngine::_doneRunningThisScript = false;
void ScriptEngine::stopAllScripts(QObject* application) {
_allScriptsMutex.lock();
_stoppingAllScripts = true;
qCDebug(scriptengine) << "Stopping all scripts.... currently known scripts:" << _allKnownScriptEngines.size();
QMutableSetIterator<ScriptEngine*> i(_allKnownScriptEngines);
while (i.hasNext()) {
ScriptEngine* scriptEngine = i.next();
@ -219,7 +227,9 @@ void ScriptEngine::stopAllScripts(QObject* application) {
// We need to wait for the engine to be done running before we proceed, because we don't
// want any of the scripts final "scriptEnding()" or pending "update()" methods from accessing
// any application state after we leave this stopAllScripts() method
qCDebug(scriptengine) << "waiting on script:" << scriptName;
scriptEngine->waitTillDoneRunning();
qCDebug(scriptengine) << "done waiting on script:" << scriptName;
// If the script is stopped, we can remove it from our set
i.remove();
@ -227,21 +237,19 @@ void ScriptEngine::stopAllScripts(QObject* application) {
}
_stoppingAllScripts = false;
_allScriptsMutex.unlock();
qCDebug(scriptengine) << "DONE Stopping all scripts....";
}
void ScriptEngine::waitTillDoneRunning() {
// If the script never started running or finished running before we got here, we don't need to wait for it
if (_isRunning) {
_doneRunningThisScript = false; // NOTE: this is static, we serialize our waiting for scripts to finish
if (_isRunning && _isThreaded) {
// NOTE: waitTillDoneRunning() will be called on the main Application thread, inside of stopAllScripts()
// we want the application thread to continue to process events, because the scripts will likely need to
// marshall messages across to the main thread. For example if they access Settings or Meny in any of their
// shutdown code.
while (!_doneRunningThisScript) {
while (thread()->isRunning()) {
// process events for the main application thread, allowing invokeMethod calls to pass between threads
QCoreApplication::processEvents();
}
@ -752,8 +760,6 @@ void ScriptEngine::run() {
emit runningStateChanged();
emit doneRunning();
}
_doneRunningThisScript = true;
}
// NOTE: This is private because it must be called on the same thread that created the timers, which is why
@ -1168,7 +1174,7 @@ void ScriptEngine::refreshFileScript(const EntityItemID& entityID) {
QString filePath = QUrl(details.scriptText).toLocalFile();
auto lastModified = QFileInfo(filePath).lastModified().toMSecsSinceEpoch();
if (lastModified > details.lastModified) {
qDebug() << "Reloading modified script " << details.scriptText;
qCDebug(scriptengine) << "Reloading modified script " << details.scriptText;
QFile file(filePath);
file.open(QIODevice::ReadOnly);

View file

@ -128,6 +128,7 @@ public:
bool isFinished() const { return _isFinished; } // used by Application and ScriptWidget
bool isRunning() const { return _isRunning; } // used by ScriptWidget
void disconnectNonEssentialSignals();
static void stopAllScripts(QObject* application); // used by Application on shutdown
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -165,15 +166,16 @@ signals:
protected:
QString _scriptContents;
QString _parentURL;
bool _isFinished;
bool _isRunning;
int _evaluatesPending = 0;
bool _isInitialized;
bool _isFinished { false };
bool _isRunning { false };
int _evaluatesPending { 0 };
bool _isInitialized { false };
QHash<QTimer*, QScriptValue> _timerFunctionMap;
QSet<QUrl> _includedURLs;
bool _wantSignals = true;
bool _wantSignals { true };
QHash<EntityItemID, EntityScriptDetails> _entityScripts;
private:
bool _isThreaded { false };
void init();
QString getFilename() const;
void waitTillDoneRunning();
@ -191,8 +193,8 @@ private:
Quat _quatLibrary;
Vec3 _vec3Library;
ScriptUUID _uuidLibrary;
bool _isUserLoaded;
bool _isReloading;
bool _isUserLoaded { false };
bool _isReloading { false };
ArrayBufferClass* _arrayBufferClass;
@ -205,8 +207,6 @@ private:
static QSet<ScriptEngine*> _allKnownScriptEngines;
static QMutex _allScriptsMutex;
static bool _stoppingAllScripts;
static bool _doneRunningThisScript;
};
#endif // hifi_ScriptEngine_h

View file

@ -0,0 +1,78 @@
//
// PIDController.cpp
// libraries/shared/src
//
// Created by Howard Stearns 11/13/15.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <glm/glm.hpp>
#include <QDebug>
#include "SharedLogging.h"
#include "PIDController.h"
float PIDController::update(float measuredValue, float dt, bool resetAccumulator) {
const float error = getMeasuredValueSetpoint() - measuredValue; // Sign is the direction we want measuredValue to go. Positive means go higher.
const float p = getKP() * error; // term is Proportional to error
const float accumulatedError = glm::clamp(error * dt + (resetAccumulator ? 0 : _lastAccumulation), // integrate error
getAccumulatedValueLowLimit(), // but clamp by anti-windup limits
getAccumulatedValueHighLimit());
const float i = getKI() * accumulatedError; // term is Integral of error
const float changeInError = (error - _lastError) / dt; // positive value denotes increasing deficit
const float d = getKD() * changeInError; // term is Derivative of Error
const float computedValue = glm::clamp(p + i + d,
getControlledValueLowLimit(),
getControlledValueHighLimit());
if (getIsLogging()) { // if logging/reporting
updateHistory(measuredValue, dt, error, accumulatedError, changeInError, p, i, d, computedValue);
}
Q_ASSERT(!isnan(computedValue));
// update state for next time
_lastError = error;
_lastAccumulation = accumulatedError;
return computedValue;
}
// Just for logging/reporting. Used when picking/verifying the operational parameters.
void PIDController::updateHistory(float measuredValue, float dt, float error, float accumulatedError, float changeInError, float p, float i, float d, float computedValue) {
// Don't report each update(), as the I/O messes with the results a lot.
// Instead, add to history, and then dump out at once when full.
// Typically, the first few values reported in each batch should be ignored.
const int n = _history.size();
_history.resize(n + 1);
Row& next = _history[n];
next.measured = measuredValue;
next.dt = dt;
next.error = error;
next.accumulated = accumulatedError;
next.changed = changeInError;
next.p = p;
next.i = i;
next.d = d;
next.computed = computedValue;
if (_history.size() == _history.capacity()) { // report when buffer is full
reportHistory();
_history.resize(0);
}
}
void PIDController::reportHistory() {
qCDebug(shared) << _label << "measured dt FIXME || error accumulated changed || p i d controlled";
for (int i = 0; i < _history.size(); i++) {
Row& row = _history[i];
qCDebug(shared) << row.measured << row.dt <<
"||" << row.error << row.accumulated << row.changed <<
"||" << row.p << row.i << row.d << row.computed << 1.0f/row.computed;
}
qCDebug(shared) << "Limits: setpoint" << getMeasuredValueSetpoint() << "accumulate" << getAccumulatedValueLowLimit() << getAccumulatedValueHighLimit() <<
"controlled" << getControlledValueLowLimit() << getControlledValueHighLimit() <<
"kp/ki/kd" << getKP() << getKI() << getKD();
}

View file

@ -0,0 +1,89 @@
//
// PIDController.h
// libraries/shared/src
//
// Given a measure of system performance (such as frame rate, where bigger denotes more system work),
// compute a value that the system can take as input to control the amount of work done (such as an 1/LOD-distance,
// where bigger tends to give a higher measured system performance value). The controller's job is to compute a
// controlled value such that the measured value stays near the specified setpoint, even as system load changes.
// See http://www.wetmachine.com/inventing-the-future/mostly-reliable-performance-of-software-processes-by-dynamic-control-of-quality-parameters/
//
// Created by Howard Stearns 11/13/15.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_PIDController_h
#define hifi_PIDController_h
#include <limits>
#include <QVector>
// Although our coding standard shuns abbreviations, the control systems literature uniformly uses p, i, d, and dt rather than
// proportionalTerm, integralTerm, derivativeTerm, and deltaTime. Here we will be consistent with the literature.
class PIDController {
public:
// These are the main interfaces:
void setMeasuredValueSetpoint(float newValue) { _measuredValueSetpoint = newValue; }
float update(float measuredValue, float dt, bool resetAccumulator = false); // returns the new computedValue
void setHistorySize(QString label = QString(""), int size = 0) { _history.reserve(size); _history.resize(0); _label = label; } // non-empty does logging
bool getIsLogging() { return _history.capacity(); }
float getMeasuredValueSetpoint() const { return _measuredValueSetpoint; }
// In normal operation (where we can easily reach setpoint), controlledValue is typcially pinned at max.
// Defaults to [0, max float], but for 1/LODdistance, it might be, say, [0, 0.2 or 0.1]
float getControlledValueLowLimit() const { return _controlledValueLowLimit; }
float getControlledValueHighLimit() const { return _controlledValueHighLimit; }
float getAntiWindupFactor() const { return _antiWindupFactor; } // default 10
float getKP() const { return _kp; } // proportional to error. See comment above class.
float getKI() const { return _ki; } // to time integral of error
float getKD() const { return _kd; } // to time derivative of error
float getAccumulatedValueHighLimit() const { return getAntiWindupFactor() * getMeasuredValueSetpoint(); }
float getAccumulatedValueLowLimit() const { return -getAntiWindupFactor() * getMeasuredValueSetpoint(); }
// There are several values that rarely change and might be thought of as "constants", but which do change during tuning, debugging, or other
// special-but-expected circumstances. Thus the instance vars are not const.
void setControlledValueLowLimit(float newValue) { _controlledValueLowLimit = newValue; }
void setControlledValueHighLimit(float newValue) { _controlledValueHighLimit = newValue; }
void setAntiWindupFactor(float newValue) { _antiWindupFactor = newValue; }
void setKP(float newValue) { _kp = newValue; }
void setKI(float newValue) { _ki = newValue; }
void setKD(float newValue) { _kd = newValue; }
class Row { // one row of accumulated history, used only for logging (if at all)
public:
float measured;
float dt;
float error;
float accumulated;
float changed;
float p;
float i;
float d;
float computed;
};
protected:
void reportHistory();
void updateHistory(float measured, float dt, float error, float accumulatedError, float changeInErro, float p, float i, float d, float computedValue);
float _measuredValueSetpoint { 0.0f };
float _controlledValueLowLimit { 0.0f };
float _controlledValueHighLimit { std::numeric_limits<float>::max() };
float _antiWindupFactor { 10.0f };
float _kp { 0.0f };
float _ki { 0.0f };
float _kd { 0.0f };
// Controller operating state
float _lastError{ 0.0f };
float _lastAccumulation{ 0.0f };
// reporting
QVector<Row> _history{};
QString _label{ "" };
};
#endif // hifi_PIDController_h

View file

@ -128,17 +128,25 @@ QJsonObject Transform::toJson(const Transform& transform) {
}
QJsonObject result;
auto json = toJsonValue(transform.getTranslation());
if (!json.isNull()) {
result[JSON_TRANSLATION] = json;
if (transform.getTranslation() != vec3()) {
auto json = toJsonValue(transform.getTranslation());
if (!json.isNull()) {
result[JSON_TRANSLATION] = json;
}
}
json = toJsonValue(transform.getRotation());
if (!json.isNull()) {
result[JSON_ROTATION] = json;
if (transform.getRotation() != quat()) {
auto json = toJsonValue(transform.getRotation());
if (!json.isNull()) {
result[JSON_ROTATION] = json;
}
}
json = toJsonValue(transform.getScale());
if (!json.isNull()) {
result[JSON_SCALE] = json;
if (transform.getScale() != vec3(1.0f)) {
auto json = toJsonValue(transform.getScale());
if (!json.isNull()) {
result[JSON_SCALE] = json;
}
}
return result;
}