mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-13 22:27:13 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into editVoxels_zFighting
Conflicts: examples/editVoxels.js
This commit is contained in:
commit
1849ec8e07
45 changed files with 789 additions and 368 deletions
|
@ -229,7 +229,8 @@ void Agent::run() {
|
|||
|
||||
// setup an Avatar for the script to use
|
||||
AvatarData scriptedAvatar;
|
||||
|
||||
scriptedAvatar.setForceFaceshiftConnected(true);
|
||||
|
||||
// call model URL setters with empty URLs so our avatar, if user, will have the default models
|
||||
scriptedAvatar.setFaceModelURL(QUrl());
|
||||
scriptedAvatar.setSkeletonModelURL(QUrl());
|
||||
|
|
|
@ -237,14 +237,6 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
|||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayBufferSample[1];
|
||||
}
|
||||
|
||||
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
|
||||
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
|
||||
// Basically we try to take the samples in batches of four, and then handle the remainder
|
||||
// conditionally to get rid of the rest.
|
||||
|
||||
const int DOUBLE_STEREO_OFFSET = 4;
|
||||
const int TRIPLE_STEREO_OFFSET = 6;
|
||||
|
||||
if (numSamplesDelay > 0) {
|
||||
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
|
||||
// to stick at the beginning
|
||||
|
|
|
@ -90,12 +90,12 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
switch (packetArrivalInfo._status) {
|
||||
case SequenceNumberStats::Early: {
|
||||
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
||||
avatarRingBuffer->parseData(packet, packetsLost);
|
||||
avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost);
|
||||
break;
|
||||
}
|
||||
case SequenceNumberStats::OnTime: {
|
||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||
avatarRingBuffer->parseData(packet);
|
||||
avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, 0);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
|
@ -134,12 +134,12 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
switch (packetArrivalInfo._status) {
|
||||
case SequenceNumberStats::Early: {
|
||||
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
||||
matchingInjectedRingBuffer->parseData(packet, packetsLost);
|
||||
matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost);
|
||||
break;
|
||||
}
|
||||
case SequenceNumberStats::OnTime: {
|
||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||
matchingInjectedRingBuffer->parseData(packet);
|
||||
matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, 0);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
|
|
|
@ -18,7 +18,7 @@ AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBu
|
|||
|
||||
}
|
||||
|
||||
int AvatarAudioRingBuffer::parseData(const QByteArray& packet, int packetsSkipped) {
|
||||
int AvatarAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) {
|
||||
frameReceivedUpdateTimingStats();
|
||||
|
||||
_shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho);
|
||||
|
|
|
@ -20,7 +20,7 @@ class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
|
|||
public:
|
||||
AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false);
|
||||
|
||||
int parseData(const QByteArray& packet, int packetsSkipped = 0);
|
||||
int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped);
|
||||
private:
|
||||
// disallow copying of AvatarAudioRingBuffer objects
|
||||
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
||||
|
|
144
examples/bot_randomExpression.js
Normal file
144
examples/bot_randomExpression.js
Normal file
|
@ -0,0 +1,144 @@
|
|||
//
|
||||
// bot_randomExpression.js
|
||||
// examples
|
||||
//
|
||||
// Created by Ben Arnold on 7/23/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// This is an example script that demonstrates an NPC avatar with
|
||||
// random facial expressions.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
function getRandomFloat(min, max) {
|
||||
return Math.random() * (max - min) + min;
|
||||
}
|
||||
|
||||
function getRandomInt (min, max) {
|
||||
return Math.floor(Math.random() * (max - min + 1)) + min;
|
||||
}
|
||||
|
||||
function printVector(string, vector) {
|
||||
print(string + " " + vector.x + ", " + vector.y + ", " + vector.z);
|
||||
}
|
||||
|
||||
var timePassed = 0.0;
|
||||
var updateSpeed = 3.0;
|
||||
|
||||
var X_MIN = 5.0;
|
||||
var X_MAX = 15.0;
|
||||
var Z_MIN = 5.0;
|
||||
var Z_MAX = 15.0;
|
||||
var Y_PELVIS = 1.0;
|
||||
|
||||
// pick an integer between 1 and 100 for the body model for this bot
|
||||
botNumber = getRandomInt(1, 100);
|
||||
|
||||
newFaceFilePrefix = "ron";
|
||||
|
||||
newBodyFilePrefix = "bot" + botNumber;
|
||||
|
||||
// set the face model fst using the bot number
|
||||
// there is no need to change the body model - we're using the default
|
||||
Avatar.faceModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newFaceFilePrefix + ".fst";
|
||||
Avatar.skeletonModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newBodyFilePrefix + ".fst";
|
||||
Avatar.billboardURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/billboards/bot" + botNumber + ".png";
|
||||
|
||||
Agent.isAvatar = true;
|
||||
Agent.isListeningToAudioStream = true;
|
||||
|
||||
// change the avatar's position to the random one
|
||||
Avatar.position = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) };;
|
||||
printVector("New bot, position = ", Avatar.position);
|
||||
|
||||
var allBlendShapes = [];
|
||||
var targetBlendCoefficient = [];
|
||||
var currentBlendCoefficient = [];
|
||||
|
||||
function addBlendShape(s) {
|
||||
allBlendShapes[allBlendShapes.length] = s;
|
||||
}
|
||||
|
||||
//It is imperative that the following blendshapes are all present and are in the correct order
|
||||
addBlendShape("EyeBlink_L");
|
||||
addBlendShape("EyeBlink_R");
|
||||
addBlendShape("EyeSquint_L");
|
||||
addBlendShape("EyeSquint_R");
|
||||
addBlendShape("EyeDown_L");
|
||||
addBlendShape("EyeDown_R");
|
||||
addBlendShape("EyeIn_L");
|
||||
addBlendShape("EyeIn_R");
|
||||
addBlendShape("EyeOpen_L");
|
||||
addBlendShape("EyeOpen_R");
|
||||
addBlendShape("EyeOut_L");
|
||||
addBlendShape("EyeOut_R");
|
||||
addBlendShape("EyeUp_L");
|
||||
addBlendShape("EyeUp_R");
|
||||
addBlendShape("BrowsD_L");
|
||||
addBlendShape("BrowsD_R");
|
||||
addBlendShape("BrowsU_C");
|
||||
addBlendShape("BrowsU_L");
|
||||
addBlendShape("BrowsU_R");
|
||||
addBlendShape("JawFwd");
|
||||
addBlendShape("JawLeft");
|
||||
addBlendShape("JawOpen");
|
||||
addBlendShape("JawChew");
|
||||
addBlendShape("JawRight");
|
||||
addBlendShape("MouthLeft");
|
||||
addBlendShape("MouthRight");
|
||||
addBlendShape("MouthFrown_L");
|
||||
addBlendShape("MouthFrown_R");
|
||||
addBlendShape("MouthSmile_L");
|
||||
addBlendShape("MouthSmile_R");
|
||||
addBlendShape("MouthDimple_L");
|
||||
addBlendShape("MouthDimple_R");
|
||||
addBlendShape("LipsStretch_L");
|
||||
addBlendShape("LipsStretch_R");
|
||||
addBlendShape("LipsUpperClose");
|
||||
addBlendShape("LipsLowerClose");
|
||||
addBlendShape("LipsUpperUp");
|
||||
addBlendShape("LipsLowerDown");
|
||||
addBlendShape("LipsUpperOpen");
|
||||
addBlendShape("LipsLowerOpen");
|
||||
addBlendShape("LipsFunnel");
|
||||
addBlendShape("LipsPucker");
|
||||
addBlendShape("ChinLowerRaise");
|
||||
addBlendShape("ChinUpperRaise");
|
||||
addBlendShape("Sneer");
|
||||
addBlendShape("Puff");
|
||||
addBlendShape("CheekSquint_L");
|
||||
addBlendShape("CheekSquint_R");
|
||||
|
||||
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||
targetBlendCoefficient[i] = 0;
|
||||
currentBlendCoefficient[i] = 0;
|
||||
}
|
||||
|
||||
function setRandomExpression() {
|
||||
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||
targetBlendCoefficient[i] = Math.random();
|
||||
}
|
||||
}
|
||||
|
||||
var expressionChangeSpeed = 0.1;
|
||||
|
||||
function updateBlendShapes(deltaTime) {
|
||||
|
||||
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||
currentBlendCoefficient[i] += (targetBlendCoefficient[i] - currentBlendCoefficient[i]) * expressionChangeSpeed;
|
||||
Avatar.setBlendshape(allBlendShapes[i], currentBlendCoefficient[i]);
|
||||
}
|
||||
}
|
||||
|
||||
function update(deltaTime) {
|
||||
timePassed += deltaTime;
|
||||
if (timePassed > updateSpeed) {
|
||||
timePassed = 0;
|
||||
setRandomExpression();
|
||||
}
|
||||
updateBlendShapes(deltaTime);
|
||||
}
|
||||
|
||||
Script.update.connect(update);
|
|
@ -1168,7 +1168,6 @@ function handeMenuEvent(menuItem){
|
|||
angles.z = array[7].value;
|
||||
selectedModelProperties.modelRotation = Quat.fromVec3Degrees(angles);
|
||||
selectedModelProperties.radius = array[8].value / 2;
|
||||
print(selectedModelProperties.radius);
|
||||
|
||||
Models.editModel(selectedModelID, selectedModelProperties);
|
||||
}
|
||||
|
|
|
@ -51,7 +51,6 @@ var lastVoxelScale = 0;
|
|||
var dragStart = { x: 0, y: 0 };
|
||||
var wheelPixelsMoved = 0;
|
||||
|
||||
|
||||
var mouseX = 0;
|
||||
var mouseY = 0;
|
||||
|
||||
|
@ -168,7 +167,16 @@ var voxelPreview = Overlays.addOverlay("cube", {
|
|||
lineWidth: 4
|
||||
});
|
||||
|
||||
var linePreviewTop = Overlays.addOverlay("line3d", {
|
||||
var linePreviewTop = [];
|
||||
var linePreviewBottom = [];
|
||||
var linePreviewLeft = [];
|
||||
var linePreviewRight = [];
|
||||
|
||||
// Currend cursor index
|
||||
var currentCursor = 0;
|
||||
|
||||
function addLineOverlay() {
|
||||
return Overlays.addOverlay("line3d", {
|
||||
position: { x: 0, y: 0, z: 0},
|
||||
end: { x: 0, y: 0, z: 0},
|
||||
color: { red: 255, green: 255, blue: 255},
|
||||
|
@ -176,34 +184,24 @@ var linePreviewTop = Overlays.addOverlay("line3d", {
|
|||
visible: false,
|
||||
lineWidth: previewLineWidth
|
||||
});
|
||||
}
|
||||
|
||||
//Cursor line previews for up to three cursors
|
||||
linePreviewTop[0] = addLineOverlay();
|
||||
linePreviewTop[1] = addLineOverlay();
|
||||
linePreviewTop[2] = addLineOverlay();
|
||||
|
||||
var linePreviewBottom = Overlays.addOverlay("line3d", {
|
||||
position: { x: 0, y: 0, z: 0},
|
||||
end: { x: 0, y: 0, z: 0},
|
||||
color: { red: 255, green: 255, blue: 255},
|
||||
alpha: 1,
|
||||
visible: false,
|
||||
lineWidth: previewLineWidth
|
||||
});
|
||||
|
||||
var linePreviewLeft = Overlays.addOverlay("line3d", {
|
||||
position: { x: 0, y: 0, z: 0},
|
||||
end: { x: 0, y: 0, z: 0},
|
||||
color: { red: 255, green: 255, blue: 255},
|
||||
alpha: 1,
|
||||
visible: false,
|
||||
lineWidth: previewLineWidth
|
||||
});
|
||||
|
||||
var linePreviewRight = Overlays.addOverlay("line3d", {
|
||||
position: { x: 0, y: 0, z: 0},
|
||||
end: { x: 0, y: 0, z: 0},
|
||||
color: { red: 255, green: 255, blue: 255},
|
||||
alpha: 1,
|
||||
visible: false,
|
||||
lineWidth: previewLineWidth
|
||||
});
|
||||
|
||||
linePreviewBottom[0] = addLineOverlay();
|
||||
linePreviewBottom[1] = addLineOverlay();
|
||||
linePreviewBottom[2] = addLineOverlay();
|
||||
|
||||
linePreviewLeft[0] = addLineOverlay();
|
||||
linePreviewLeft[1] = addLineOverlay();
|
||||
linePreviewLeft[2] = addLineOverlay();
|
||||
|
||||
linePreviewRight[0] = addLineOverlay();
|
||||
linePreviewRight[1] = addLineOverlay();
|
||||
linePreviewRight[2] = addLineOverlay();
|
||||
|
||||
// these will be used below
|
||||
var scaleSelectorWidth = 144;
|
||||
|
@ -829,21 +827,21 @@ function showPreviewLines() {
|
|||
var pasteVoxel = getNewPasteVoxel(pickRay);
|
||||
|
||||
// X axis
|
||||
Overlays.editOverlay(linePreviewBottom, {
|
||||
Overlays.editOverlay(linePreviewBottom[currentCursor], {
|
||||
position: pasteVoxel.origin,
|
||||
end: {x: pasteVoxel.origin.x + pasteVoxel.voxelSize, y: pasteVoxel.origin.y, z: pasteVoxel.origin.z },
|
||||
visible: true
|
||||
});
|
||||
|
||||
// Y axis
|
||||
Overlays.editOverlay(linePreviewRight, {
|
||||
Overlays.editOverlay(linePreviewRight[currentCursor], {
|
||||
position: pasteVoxel.origin,
|
||||
end: {x: pasteVoxel.origin.x, y: pasteVoxel.origin.y + pasteVoxel.voxelSize, z: pasteVoxel.origin.z },
|
||||
visible: true
|
||||
});
|
||||
|
||||
// Z axis
|
||||
Overlays.editOverlay(linePreviewTop, {
|
||||
Overlays.editOverlay(linePreviewTop[currentCursor], {
|
||||
position: pasteVoxel.origin,
|
||||
end: {x: pasteVoxel.origin.x, y: pasteVoxel.origin.y, z: pasteVoxel.origin.z - pasteVoxel.voxelSize },
|
||||
visible: true
|
||||
|
@ -857,10 +855,10 @@ function showPreviewLines() {
|
|||
if (intersection.intersects) {
|
||||
resultVoxel = calculateVoxelFromIntersection(intersection,"");
|
||||
Overlays.editOverlay(voxelPreview, { visible: false });
|
||||
Overlays.editOverlay(linePreviewTop, { position: resultVoxel.topLeft, end: resultVoxel.topRight, visible: true });
|
||||
Overlays.editOverlay(linePreviewBottom, { position: resultVoxel.bottomLeft, end: resultVoxel.bottomRight, visible: true });
|
||||
Overlays.editOverlay(linePreviewLeft, { position: resultVoxel.topLeft, end: resultVoxel.bottomLeft, visible: true });
|
||||
Overlays.editOverlay(linePreviewRight, { position: resultVoxel.topRight, end: resultVoxel.bottomRight, visible: true });
|
||||
Overlays.editOverlay(linePreviewTop[currentCursor], { position: resultVoxel.topLeft, end: resultVoxel.topRight, visible: true });
|
||||
Overlays.editOverlay(linePreviewBottom[currentCursor], { position: resultVoxel.bottomLeft, end: resultVoxel.bottomRight, visible: true });
|
||||
Overlays.editOverlay(linePreviewLeft[currentCursor], { position: resultVoxel.topLeft, end: resultVoxel.bottomLeft, visible: true });
|
||||
Overlays.editOverlay(linePreviewRight[currentCursor], { position: resultVoxel.topRight, end: resultVoxel.bottomRight, visible: true });
|
||||
colors[0] = {red: intersection.voxel.red, green: intersection.voxel.green , blue: intersection.voxel.blue };
|
||||
|
||||
if (copyScale) {
|
||||
|
@ -869,10 +867,10 @@ function showPreviewLines() {
|
|||
moveTools();
|
||||
} else if (intersection.accurate) {
|
||||
Overlays.editOverlay(voxelPreview, { visible: false });
|
||||
Overlays.editOverlay(linePreviewTop, { visible: false });
|
||||
Overlays.editOverlay(linePreviewBottom, { visible: false });
|
||||
Overlays.editOverlay(linePreviewLeft, { visible: false });
|
||||
Overlays.editOverlay(linePreviewRight, { visible: false });
|
||||
Overlays.editOverlay(linePreviewTop[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewBottom[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewLeft[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewRight[currentCursor], { visible: false });
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -882,20 +880,20 @@ function showPreviewGuides() {
|
|||
showPreviewVoxel();
|
||||
|
||||
// make sure alternative is hidden
|
||||
Overlays.editOverlay(linePreviewTop, { visible: false });
|
||||
Overlays.editOverlay(linePreviewBottom, { visible: false });
|
||||
Overlays.editOverlay(linePreviewLeft, { visible: false });
|
||||
Overlays.editOverlay(linePreviewRight, { visible: false });
|
||||
Overlays.editOverlay(linePreviewTop[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewBottom[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewLeft[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewRight[currentCursor], { visible: false });
|
||||
} else {
|
||||
showPreviewLines();
|
||||
}
|
||||
} else {
|
||||
// make sure all previews are off
|
||||
Overlays.editOverlay(voxelPreview, { visible: false });
|
||||
Overlays.editOverlay(linePreviewTop, { visible: false });
|
||||
Overlays.editOverlay(linePreviewBottom, { visible: false });
|
||||
Overlays.editOverlay(linePreviewLeft, { visible: false });
|
||||
Overlays.editOverlay(linePreviewRight, { visible: false });
|
||||
Overlays.editOverlay(linePreviewTop[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewBottom[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewLeft[currentCursor], { visible: false });
|
||||
Overlays.editOverlay(linePreviewRight[currentCursor], { visible: false });
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -988,6 +986,14 @@ function mousePressEvent(event) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (event.deviceID == 1500) { // Left Hydra Controller
|
||||
currentCursor = 0;
|
||||
} else if (event.deviceID == 1501) { // Right Hydra Controller
|
||||
currentCursor = 1;
|
||||
} else {
|
||||
currentCursor = 2;
|
||||
}
|
||||
|
||||
var clickedOnSomething = false;
|
||||
var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y});
|
||||
|
||||
|
@ -1239,9 +1245,17 @@ function menuItemEvent(menuItem) {
|
|||
}
|
||||
}
|
||||
|
||||
function mouseMoveEvent(event, deviceID) {
|
||||
if (deviceID != 0 || !editToolsOn || inspectJsIsRunning) {
|
||||
return;
|
||||
function mouseMoveEvent(event) {
|
||||
if (!editToolsOn || inspectJsIsRunning) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (event.deviceID == 1500) { // Left Hydra Controller
|
||||
currentCursor = 0;
|
||||
} else if (event.deviceID == 1501) { // Right Hydra Controller
|
||||
currentCursor = 1;
|
||||
} else {
|
||||
currentCursor = 2;
|
||||
}
|
||||
|
||||
// Move Import Preview
|
||||
|
@ -1492,10 +1506,12 @@ Controller.captureKeyEvents({ text: "-" });
|
|||
|
||||
function scriptEnding() {
|
||||
Overlays.deleteOverlay(voxelPreview);
|
||||
Overlays.deleteOverlay(linePreviewTop);
|
||||
Overlays.deleteOverlay(linePreviewBottom);
|
||||
Overlays.deleteOverlay(linePreviewLeft);
|
||||
Overlays.deleteOverlay(linePreviewRight);
|
||||
for (var i = 0; i < linePreviewTop.length; i++) {
|
||||
Overlays.deleteOverlay(linePreviewTop[i]);
|
||||
Overlays.deleteOverlay(linePreviewBottom[i]);
|
||||
Overlays.deleteOverlay(linePreviewLeft[i]);
|
||||
Overlays.deleteOverlay(linePreviewRight[i]);
|
||||
}
|
||||
for (s = 0; s < numColors; s++) {
|
||||
Overlays.deleteOverlay(swatches[s]);
|
||||
}
|
||||
|
|
23
examples/laserPointer.js
Normal file
23
examples/laserPointer.js
Normal file
|
@ -0,0 +1,23 @@
|
|||
//
|
||||
// laserPointer.js
|
||||
// examples
|
||||
//
|
||||
// Created by Clément Brisset on 7/18/14.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
var LEFT = 0;
|
||||
var RIGHT = 1;
|
||||
var LEFT_HAND_FLAG = 1;
|
||||
var RIGHT_HAND_FLAG = 2;
|
||||
|
||||
function update() {
|
||||
var state = ((Controller.getTriggerValue(LEFT) > 0.9) ? LEFT_HAND_FLAG : 0) +
|
||||
((Controller.getTriggerValue(RIGHT) > 0.9) ? RIGHT_HAND_FLAG : 0);
|
||||
MyAvatar.setHandState(state);
|
||||
}
|
||||
|
||||
Script.update.connect(update);
|
|
@ -592,7 +592,7 @@ void Application::paintGL() {
|
|||
|
||||
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
||||
_myCamera.setTightness(0.0f); // In first person, camera follows (untweaked) head exactly without delay
|
||||
_myCamera.setTargetPosition(_myAvatar->getHead()->calculateAverageEyePosition());
|
||||
_myCamera.setTargetPosition(_myAvatar->getHead()->getFilteredEyePosition());
|
||||
_myCamera.setTargetRotation(_myAvatar->getHead()->getCameraOrientation());
|
||||
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
||||
|
@ -611,10 +611,10 @@ void Application::paintGL() {
|
|||
if (OculusManager::isConnected()) {
|
||||
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||
_myCamera.setTargetRotation(_myAvatar->getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
|
||||
_myCamera.setTargetPosition(_myAvatar->getHead()->calculateAverageEyePosition() + glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0));
|
||||
_myCamera.setTargetPosition(_myAvatar->getHead()->getEyePosition() + glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0));
|
||||
} else {
|
||||
_myCamera.setTightness(0.0f);
|
||||
glm::vec3 eyePosition = _myAvatar->getHead()->calculateAverageEyePosition();
|
||||
glm::vec3 eyePosition = _myAvatar->getHead()->getFilteredEyePosition();
|
||||
float headHeight = eyePosition.y - _myAvatar->getPosition().y;
|
||||
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||
_myCamera.setTargetPosition(_myAvatar->getPosition() + glm::vec3(0, headHeight + (_raiseMirror * _myAvatar->getScale()), 0));
|
||||
|
@ -1911,17 +1911,9 @@ void Application::updateMyAvatarLookAtPosition() {
|
|||
}
|
||||
} else {
|
||||
// I am not looking at anyone else, so just look forward
|
||||
lookAtSpot = _myAvatar->getHead()->calculateAverageEyePosition() +
|
||||
lookAtSpot = _myAvatar->getHead()->getEyePosition() +
|
||||
(_myAvatar->getHead()->getFinalOrientationInWorldFrame() * glm::vec3(0.f, 0.f, -TREE_SCALE));
|
||||
}
|
||||
// TODO: Add saccade to mouse pointer when stable, IF not looking at someone (since we know we are looking at it)
|
||||
/*
|
||||
const float FIXED_MIN_EYE_DISTANCE = 0.3f;
|
||||
float minEyeDistance = FIXED_MIN_EYE_DISTANCE + (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON ? 0.0f :
|
||||
glm::distance(_mouseRayOrigin, _myAvatar->getHead()->calculateAverageEyePosition()));
|
||||
lookAtSpot = _mouseRayOrigin + _mouseRayDirection * qMax(minEyeDistance, distance);
|
||||
*/
|
||||
|
||||
}
|
||||
//
|
||||
// Deflect the eyes a bit to match the detected Gaze from 3D camera if active
|
||||
|
@ -1931,7 +1923,7 @@ void Application::updateMyAvatarLookAtPosition() {
|
|||
float eyeYaw = tracker->getEstimatedEyeYaw();
|
||||
const float GAZE_DEFLECTION_REDUCTION_DURING_EYE_CONTACT = 0.1f;
|
||||
// deflect using Faceshift gaze data
|
||||
glm::vec3 origin = _myAvatar->getHead()->calculateAverageEyePosition();
|
||||
glm::vec3 origin = _myAvatar->getHead()->getEyePosition();
|
||||
float pitchSign = (_myCamera.getMode() == CAMERA_MODE_MIRROR) ? -1.0f : 1.0f;
|
||||
float deflection = Menu::getInstance()->getFaceshiftEyeDeflection();
|
||||
if (isLookingAtSomeone) {
|
||||
|
@ -2935,7 +2927,7 @@ void Application::renderRearViewMirror(const QRect& region, bool billboard) {
|
|||
_mirrorCamera.setTargetPosition(glm::vec3());
|
||||
|
||||
} else {
|
||||
_mirrorCamera.setTargetPosition(_myAvatar->getHead()->calculateAverageEyePosition());
|
||||
_mirrorCamera.setTargetPosition(_myAvatar->getHead()->getEyePosition());
|
||||
}
|
||||
}
|
||||
_mirrorCamera.setAspectRatio((float)region.width() / region.height());
|
||||
|
@ -2964,7 +2956,7 @@ void Application::renderRearViewMirror(const QRect& region, bool billboard) {
|
|||
_myAvatar->getSkeletonModel().getNeckPosition(neckPosition);
|
||||
|
||||
// get the eye position relative to the body
|
||||
glm::vec3 eyePosition = _myAvatar->getHead()->calculateAverageEyePosition();
|
||||
glm::vec3 eyePosition = _myAvatar->getHead()->getEyePosition();
|
||||
float eyeHeight = eyePosition.y - _myAvatar->getPosition().y;
|
||||
|
||||
// set the translation of the face relative to the neck position
|
||||
|
@ -3355,7 +3347,7 @@ void Application::nodeKilled(SharedNodePointer node) {
|
|||
_modelEditSender.nodeKilled(node);
|
||||
|
||||
if (node->getType() == NodeType::AudioMixer) {
|
||||
QMetaObject::invokeMethod(&_audio, "resetIncomingMixedAudioSequenceNumberStats");
|
||||
QMetaObject::invokeMethod(&_audio, "audioMixerKilled");
|
||||
}
|
||||
|
||||
if (node->getType() == NodeType::VoxelServer) {
|
||||
|
@ -3641,7 +3633,7 @@ ScriptEngine* Application::loadScript(const QString& scriptName, bool loadScript
|
|||
scriptEngine->getModelsScriptingInterface()->setModelTree(_models.getTree());
|
||||
|
||||
// model has some custom types
|
||||
Model::registerMetaTypes(scriptEngine->getEngine());
|
||||
Model::registerMetaTypes(scriptEngine);
|
||||
|
||||
// hook our avatar object into this script engine
|
||||
scriptEngine->setAvatarData(_myAvatar, "MyAvatar"); // leave it as a MyAvatar class to expose thrust features
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include "Audio.h"
|
||||
#include "Menu.h"
|
||||
#include "Util.h"
|
||||
#include "AudioRingBuffer.h"
|
||||
|
||||
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
||||
|
||||
|
@ -125,14 +126,16 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
|||
_scopeInput(0),
|
||||
_scopeOutputLeft(0),
|
||||
_scopeOutputRight(0),
|
||||
_statsEnabled(false),
|
||||
_starveCount(0),
|
||||
_consecutiveNotMixedCount(0),
|
||||
_outgoingAvatarAudioSequenceNumber(0),
|
||||
_incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH),
|
||||
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
||||
_inputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AUDIO_CALLBACK_MSECS * CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_outputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_audioOutputBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
||||
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
||||
{
|
||||
// clear the array of locally injected samples
|
||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
@ -148,15 +151,34 @@ void Audio::init(QGLWidget *parent) {
|
|||
|
||||
void Audio::reset() {
|
||||
_ringBuffer.reset();
|
||||
|
||||
|
||||
// we don't want to reset seq numbers when space-bar reset occurs.
|
||||
//_outgoingAvatarAudioSequenceNumber = 0;
|
||||
|
||||
resetStats();
|
||||
}
|
||||
|
||||
void Audio::resetStats() {
|
||||
_starveCount = 0;
|
||||
_consecutiveNotMixedCount = 0;
|
||||
|
||||
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
||||
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||
|
||||
//_outgoingAvatarAudioSequenceNumber = 0;
|
||||
_incomingMixedAudioSequenceNumberStats.reset();
|
||||
|
||||
_interframeTimeGapStats.reset();
|
||||
|
||||
_audioInputMsecsReadStats.reset();
|
||||
_inputRingBufferMsecsAvailableStats.reset();
|
||||
|
||||
_outputRingBufferFramesAvailableStats.reset();
|
||||
_audioOutputMsecsUnplayedStats.reset();
|
||||
}
|
||||
|
||||
void Audio::audioMixerKilled() {
|
||||
_outgoingAvatarAudioSequenceNumber = 0;
|
||||
resetStats();
|
||||
}
|
||||
|
||||
QAudioDeviceInfo getNamedAudioDeviceForMode(QAudio::Mode mode, const QString& deviceName) {
|
||||
|
@ -499,8 +521,11 @@ void Audio::handleAudioInput() {
|
|||
}
|
||||
|
||||
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
||||
|
||||
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||
_audioInputMsecsReadStats.update(audioInputMsecsRead);
|
||||
|
||||
while (_inputRingBuffer.samplesAvailable() > inputSamplesRequired) {
|
||||
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
||||
|
||||
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
|
||||
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
||||
|
@ -811,11 +836,12 @@ AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
|||
|
||||
void Audio::sendDownstreamAudioStatsPacket() {
|
||||
|
||||
_inputRingBufferFramesAvailableStats.update(getInputRingBufferFramesAvailable());
|
||||
// since this function is called every second, we'll sample some of our stats here
|
||||
|
||||
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
|
||||
|
||||
// since this function is called every second, we'll sample the number of audio frames available here.
|
||||
_outputRingBufferFramesAvailableStats.update(_ringBuffer.framesAvailable());
|
||||
_audioOutputBufferFramesAvailableStats.update(getOutputRingBufferFramesAvailable());
|
||||
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
|
||||
|
||||
// push the current seq number stats into history, which moves the history window forward 1s
|
||||
// (since that's how often pushStatsToHistory() is called)
|
||||
|
@ -1286,6 +1312,10 @@ void Audio::toggleScopePause() {
|
|||
_scopeEnabledPause = !_scopeEnabledPause;
|
||||
}
|
||||
|
||||
void Audio::toggleStats() {
|
||||
_statsEnabled = !_statsEnabled;
|
||||
}
|
||||
|
||||
void Audio::selectAudioScopeFiveFrames() {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiveFrames)) {
|
||||
reallocateScope(5);
|
||||
|
@ -1365,6 +1395,174 @@ void Audio::addBufferToScope(
|
|||
}
|
||||
}
|
||||
|
||||
void Audio::renderStats(const float* color, int width, int height) {
|
||||
if (!_statsEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
const int LINES_WHEN_CENTERED = 30;
|
||||
const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * LINES_WHEN_CENTERED;
|
||||
|
||||
int lines = _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 23;
|
||||
int statsHeight = STATS_HEIGHT_PER_LINE * lines;
|
||||
|
||||
|
||||
static const float backgroundColor[4] = { 0.2f, 0.2f, 0.2f, 0.6f };
|
||||
|
||||
int x = std::max((width - (int)STATS_WIDTH) / 2, 0);
|
||||
int y = std::max((height - CENTERED_BACKGROUND_HEIGHT) / 2, 0);
|
||||
int w = STATS_WIDTH;
|
||||
int h = statsHeight;
|
||||
renderBackground(backgroundColor, x, y, w, h);
|
||||
|
||||
|
||||
int horizontalOffset = x + 5;
|
||||
int verticalOffset = y;
|
||||
|
||||
float scale = 0.10f;
|
||||
float rotation = 0.0f;
|
||||
int font = 2;
|
||||
|
||||
|
||||
char latencyStatString[512];
|
||||
|
||||
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||
|
||||
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
|
||||
|
||||
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
|
||||
if (!audioMixerNodePointer.isNull()) {
|
||||
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
|
||||
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
|
||||
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
|
||||
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
|
||||
outputRingBufferLatency = _outputRingBufferFramesAvailableStats.getWindowAverage() * BUFFER_SEND_INTERVAL_MSECS;
|
||||
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
|
||||
}
|
||||
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
|
||||
|
||||
sprintf(latencyStatString, " Audio input buffer: %7.2fms - avg msecs of samples read to the input ring buffer in last 10s", audioInputBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Input ring buffer: %7.2fms - avg msecs of samples in input ring buffer in last 10s", inputRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Network to mixer: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " AudioMixer ring buffer: %7.2fms - avg msecs of samples in audio mixer's ring buffer in last 10s", mixerRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Network to client: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Output ring buffer: %7.2fms - avg msecs of samples in output ring buffer in last 10s", outputRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Audio output buffer: %7.2fms - avg msecs of samples in audio output buffer in last 10s", audioOutputBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " TOTAL: %7.2fms\n", totalLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
|
||||
char downstreamLabelString[] = "Downstream mixed audio stats:";
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||
|
||||
renderAudioStreamStats(getDownstreamAudioStreamStats(), horizontalOffset, verticalOffset, scale, rotation, font, color, true);
|
||||
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
char upstreamMicLabelString[] = "Upstream mic audio stats:";
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamMicLabelString, color);
|
||||
|
||||
renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||
|
||||
|
||||
foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) {
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
char upstreamInjectedLabelString[512];
|
||||
sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s",
|
||||
injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color);
|
||||
|
||||
renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
|
||||
float scale, float rotation, int font, const float* color, bool isDownstreamStats) {
|
||||
|
||||
char stringBuffer[512];
|
||||
|
||||
sprintf(stringBuffer, " Packet loss | overall: %5.2f%% (%d lost), last_30s: %5.2f%% (%d lost)",
|
||||
streamStats._packetStreamStats.getLostRate() * 100.0f,
|
||||
streamStats._packetStreamStats._numLost,
|
||||
streamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
streamStats._packetStreamWindowStats._numLost);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
if (isDownstreamStats) {
|
||||
|
||||
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
|
||||
streamStats._ringBufferDesiredJitterBufferFrames,
|
||||
streamStats._ringBufferFramesAvailableAverage,
|
||||
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
|
||||
streamStats._ringBufferFramesAvailable,
|
||||
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
|
||||
} else {
|
||||
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
|
||||
streamStats._ringBufferDesiredJitterBufferFrames,
|
||||
streamStats._ringBufferFramesAvailableAverage,
|
||||
streamStats._ringBufferFramesAvailable);
|
||||
}
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
|
||||
streamStats._ringBufferStarveCount,
|
||||
streamStats._ringBufferConsecutiveNotMixedCount,
|
||||
streamStats._ringBufferSilentFramesDropped,
|
||||
streamStats._ringBufferOverflowCount);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
|
||||
formatUsecTime(streamStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapAverage).toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
|
||||
formatUsecTime(streamStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapWindowAverage).toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
}
|
||||
|
||||
|
||||
void Audio::renderScope(int width, int height) {
|
||||
|
||||
if (!_scopeEnabled)
|
||||
|
@ -1622,15 +1820,14 @@ int Audio::calculateNumberOfFrameSamples(int numBytes) const {
|
|||
return frameSamples;
|
||||
}
|
||||
|
||||
int Audio::getOutputRingBufferFramesAvailable() const {
|
||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
|
||||
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
|
||||
|
||||
return (_audioOutput->bufferSize() - _audioOutput->bytesFree()) * networkOutputToOutputRatio
|
||||
/ (sizeof(int16_t) * _ringBuffer.getNumFrameSamples());
|
||||
float Audio::getAudioOutputMsecsUnplayed() const {
|
||||
int bytesAudioOutputUnplayed = _audioOutput->bufferSize() - _audioOutput->bytesFree();
|
||||
float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_outputFormat.bytesForDuration(USECS_PER_MSEC);
|
||||
return msecsAudioOutputUnplayed;
|
||||
}
|
||||
|
||||
int Audio::getInputRingBufferFramesAvailable() const {
|
||||
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
|
||||
return _inputRingBuffer.samplesAvailable() / inputToNetworkInputRatio / _inputRingBuffer.getNumFrameSamples();
|
||||
float Audio::getInputRingBufferMsecsAvailable() const {
|
||||
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t);
|
||||
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||
return msecsInInputRingBuffer;
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ public:
|
|||
|
||||
void renderToolBox(int x, int y, bool boxed);
|
||||
void renderScope(int width, int height);
|
||||
void renderStats(const float* color, int width, int height);
|
||||
|
||||
int getNetworkSampleRate() { return SAMPLE_RATE; }
|
||||
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
||||
|
@ -78,12 +79,12 @@ public:
|
|||
bool getProcessSpatialAudio() const { return _processSpatialAudio; }
|
||||
|
||||
const SequenceNumberStats& getIncomingMixedAudioSequenceNumberStats() const { return _incomingMixedAudioSequenceNumberStats; }
|
||||
|
||||
float getInputRingBufferMsecsAvailable() const;
|
||||
float getInputRingBufferAverageMsecsAvailable() const { return (float)_inputRingBufferMsecsAvailableStats.getWindowAverage(); }
|
||||
|
||||
int getInputRingBufferFramesAvailable() const;
|
||||
int getInputRingBufferAverageFramesAvailable() const { return (int)_inputRingBufferFramesAvailableStats.getWindowAverage(); }
|
||||
|
||||
int getOutputRingBufferFramesAvailable() const;
|
||||
int getOutputRingBufferAverageFramesAvailable() const { return (int)_audioOutputBufferFramesAvailableStats.getWindowAverage(); }
|
||||
float getAudioOutputMsecsUnplayed() const;
|
||||
float getAudioOutputAverageMsecsUnplayed() const { return (float)_audioOutputMsecsUnplayedStats.getWindowAverage(); }
|
||||
|
||||
public slots:
|
||||
void start();
|
||||
|
@ -93,12 +94,14 @@ public slots:
|
|||
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||
void handleAudioInput();
|
||||
void reset();
|
||||
void resetIncomingMixedAudioSequenceNumberStats() { _incomingMixedAudioSequenceNumberStats.reset(); }
|
||||
void resetStats();
|
||||
void audioMixerKilled();
|
||||
void toggleMute();
|
||||
void toggleAudioNoiseReduction();
|
||||
void toggleToneInjection();
|
||||
void toggleScope();
|
||||
void toggleScopePause();
|
||||
void toggleStats();
|
||||
void toggleAudioSpatialProcessing();
|
||||
void toggleStereoInput();
|
||||
void selectAudioScopeFiveFrames();
|
||||
|
@ -245,6 +248,10 @@ private:
|
|||
void renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols);
|
||||
void renderLineStrip(const float* color, int x, int y, int n, int offset, const QByteArray* byteArray);
|
||||
|
||||
// audio stats methods for rendering
|
||||
void renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
|
||||
float scale, float rotation, int font, const float* color, bool isDownstreamStats = false);
|
||||
|
||||
// Audio scope data
|
||||
static const unsigned int NETWORK_SAMPLES_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
static const unsigned int DEFAULT_FRAMES_PER_SCOPE = 5;
|
||||
|
@ -261,6 +268,13 @@ private:
|
|||
QByteArray* _scopeInput;
|
||||
QByteArray* _scopeOutputLeft;
|
||||
QByteArray* _scopeOutputRight;
|
||||
#ifdef _WIN32
|
||||
static const unsigned int STATS_WIDTH = 1500;
|
||||
#else
|
||||
static const unsigned int STATS_WIDTH = 650;
|
||||
#endif
|
||||
static const unsigned int STATS_HEIGHT_PER_LINE = 20;
|
||||
bool _statsEnabled;
|
||||
|
||||
int _starveCount;
|
||||
int _consecutiveNotMixedCount;
|
||||
|
@ -273,10 +287,11 @@ private:
|
|||
|
||||
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
||||
|
||||
MovingMinMaxAvg<int> _inputRingBufferFramesAvailableStats;
|
||||
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
|
||||
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
|
||||
|
||||
MovingMinMaxAvg<int> _outputRingBufferFramesAvailableStats;
|
||||
MovingMinMaxAvg<int> _audioOutputBufferFramesAvailableStats;
|
||||
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -593,6 +593,12 @@ Menu::Menu() :
|
|||
Qt::CTRL | Qt::SHIFT | Qt::Key_U,
|
||||
false);
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStats,
|
||||
0,
|
||||
false,
|
||||
appInstance->getAudio(),
|
||||
SLOT(toggleStats()));
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, true);
|
||||
|
||||
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
||||
|
|
|
@ -315,6 +315,7 @@ namespace MenuOption {
|
|||
const QString AudioScopeFrames = "Display Frames";
|
||||
const QString AudioScopePause = "Pause Audio Scope";
|
||||
const QString AudioScopeTwentyFrames = "Twenty";
|
||||
const QString AudioStats = "Audio Stats";
|
||||
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
|
||||
const QString AudioSpatialProcessing = "Audio Spatial Processing";
|
||||
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
|
||||
|
|
|
@ -218,6 +218,52 @@ static TextRenderer* textRenderer(TextRendererType type) {
|
|||
}
|
||||
|
||||
void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
|
||||
|
||||
if (glm::distance(Application::getInstance()->getAvatar()->getPosition(),
|
||||
_position) < 10.0f) {
|
||||
// render pointing lasers
|
||||
glm::vec3 laserColor = glm::vec3(1.0f, 0.0f, 1.0f);
|
||||
float laserLength = 50.0f;
|
||||
if (_handState == HAND_STATE_LEFT_POINTING ||
|
||||
_handState == HAND_STATE_BOTH_POINTING) {
|
||||
int leftIndex = _skeletonModel.getLeftHandJointIndex();
|
||||
glm::vec3 leftPosition;
|
||||
glm::quat leftRotation;
|
||||
_skeletonModel.getJointPositionInWorldFrame(leftIndex, leftPosition);
|
||||
_skeletonModel.getJointRotationInWorldFrame(leftIndex, leftRotation);
|
||||
glPushMatrix(); {
|
||||
glTranslatef(leftPosition.x, leftPosition.y, leftPosition.z);
|
||||
float angle = glm::degrees(glm::angle(leftRotation));
|
||||
glm::vec3 axis = glm::axis(leftRotation);
|
||||
glRotatef(angle, axis.x, axis.y, axis.z);
|
||||
glBegin(GL_LINES);
|
||||
glColor3f(laserColor.x, laserColor.y, laserColor.z);
|
||||
glVertex3f(0.0f, 0.0f, 0.0f);
|
||||
glVertex3f(0.0f, laserLength, 0.0f);
|
||||
glEnd();
|
||||
} glPopMatrix();
|
||||
}
|
||||
if (_handState == HAND_STATE_RIGHT_POINTING ||
|
||||
_handState == HAND_STATE_BOTH_POINTING) {
|
||||
int rightIndex = _skeletonModel.getRightHandJointIndex();
|
||||
glm::vec3 rightPosition;
|
||||
glm::quat rightRotation;
|
||||
_skeletonModel.getJointPositionInWorldFrame(rightIndex, rightPosition);
|
||||
_skeletonModel.getJointRotationInWorldFrame(rightIndex, rightRotation);
|
||||
glPushMatrix(); {
|
||||
glTranslatef(rightPosition.x, rightPosition.y, rightPosition.z);
|
||||
float angle = glm::degrees(glm::angle(rightRotation));
|
||||
glm::vec3 axis = glm::axis(rightRotation);
|
||||
glRotatef(angle, axis.x, axis.y, axis.z);
|
||||
glBegin(GL_LINES);
|
||||
glColor3f(laserColor.x, laserColor.y, laserColor.z);
|
||||
glVertex3f(0.0f, 0.0f, 0.0f);
|
||||
glVertex3f(0.0f, laserLength, 0.0f);
|
||||
glEnd();
|
||||
} glPopMatrix();
|
||||
}
|
||||
}
|
||||
|
||||
// simple frustum check
|
||||
float boundingRadius = getBillboardSize();
|
||||
ViewFrustum* frustum = (renderMode == Avatar::SHADOW_RENDER_MODE) ?
|
||||
|
|
|
@ -125,7 +125,7 @@ void Hand::render(bool isMine, Model::RenderMode renderMode) {
|
|||
|
||||
glEnable(GL_DEPTH_TEST);
|
||||
glEnable(GL_RESCALE_NORMAL);
|
||||
}
|
||||
}
|
||||
|
||||
void Hand::renderHandTargets(bool isMine) {
|
||||
glPushMatrix();
|
||||
|
|
|
@ -159,6 +159,10 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
|
|||
}
|
||||
}
|
||||
_eyePosition = calculateAverageEyePosition();
|
||||
|
||||
float velocityFilter = glm::clamp(1.0f - glm::length(_filteredEyePosition - _eyePosition), 0.0f, 1.0f);
|
||||
_filteredEyePosition = velocityFilter * _filteredEyePosition + (1.0f - velocityFilter) * _eyePosition;
|
||||
|
||||
}
|
||||
|
||||
void Head::relaxLean(float deltaTime) {
|
||||
|
|
|
@ -88,8 +88,7 @@ public:
|
|||
|
||||
const bool getReturnToCenter() const { return _returnHeadToCenter; } // Do you want head to try to return to center (depends on interface detected)
|
||||
float getAverageLoudness() const { return _averageLoudness; }
|
||||
glm::vec3 calculateAverageEyePosition() const { return _leftEyePosition + (_rightEyePosition - _leftEyePosition ) * ONE_HALF; }
|
||||
|
||||
glm::vec3 getFilteredEyePosition() const { return _filteredEyePosition; }
|
||||
/// \return the point about which scaling occurs.
|
||||
glm::vec3 getScalePivot() const;
|
||||
|
||||
|
@ -110,6 +109,8 @@ public:
|
|||
void addLeanDeltas(float sideways, float forward);
|
||||
|
||||
private:
|
||||
glm::vec3 calculateAverageEyePosition() const { return _leftEyePosition + (_rightEyePosition - _leftEyePosition ) * ONE_HALF; }
|
||||
|
||||
// disallow copies of the Head, copy of owning Avatar is disallowed too
|
||||
Head(const Head&);
|
||||
Head& operator= (const Head&);
|
||||
|
@ -120,6 +121,8 @@ private:
|
|||
glm::vec3 _leftEyePosition;
|
||||
glm::vec3 _rightEyePosition;
|
||||
glm::vec3 _eyePosition;
|
||||
glm::vec3 _filteredEyePosition; // velocity filtered world space eye position
|
||||
|
||||
float _scale;
|
||||
float _lastLoudness;
|
||||
float _audioAttack;
|
||||
|
|
|
@ -137,9 +137,6 @@ void MyAvatar::simulate(float deltaTime) {
|
|||
}
|
||||
_skeletonModel.setShowTrueJointTransforms(! Menu::getInstance()->isOptionChecked(MenuOption::CollideAsRagdoll));
|
||||
|
||||
// no extra movement of the hand here any more ...
|
||||
_handState = HAND_STATE_NULL;
|
||||
|
||||
{
|
||||
PerformanceTimer perfTimer("transform");
|
||||
updateOrientation(deltaTime);
|
||||
|
@ -165,9 +162,16 @@ void MyAvatar::simulate(float deltaTime) {
|
|||
PerformanceTimer perfTimer("joints");
|
||||
// copy out the skeleton joints from the model
|
||||
_jointData.resize(_skeletonModel.getJointStateCount());
|
||||
for (int i = 0; i < _jointData.size(); i++) {
|
||||
JointData& data = _jointData[i];
|
||||
data.valid = _skeletonModel.getJointState(i, data.rotation);
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::CollideAsRagdoll)) {
|
||||
for (int i = 0; i < _jointData.size(); i++) {
|
||||
JointData& data = _jointData[i];
|
||||
data.valid = _skeletonModel.getVisibleJointState(i, data.rotation);
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < _jointData.size(); i++) {
|
||||
JointData& data = _jointData[i];
|
||||
data.valid = _skeletonModel.getJointState(i, data.rotation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -908,7 +912,7 @@ const float RENDER_HEAD_CUTOFF_DISTANCE = 0.50f;
|
|||
bool MyAvatar::shouldRenderHead(const glm::vec3& cameraPosition, RenderMode renderMode) const {
|
||||
const Head* head = getHead();
|
||||
return (renderMode != NORMAL_RENDER_MODE) ||
|
||||
(glm::length(cameraPosition - head->calculateAverageEyePosition()) > RENDER_HEAD_CUTOFF_DISTANCE * _scale);
|
||||
(glm::length(cameraPosition - head->getEyePosition()) > RENDER_HEAD_CUTOFF_DISTANCE * _scale);
|
||||
}
|
||||
|
||||
float MyAvatar::computeDistanceToFloor(const glm::vec3& startPoint) {
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
enum AvatarHandState
|
||||
{
|
||||
HAND_STATE_NULL = 0,
|
||||
HAND_STATE_OPEN,
|
||||
HAND_STATE_GRASPING,
|
||||
HAND_STATE_POINTING,
|
||||
HAND_STATE_LEFT_POINTING,
|
||||
HAND_STATE_RIGHT_POINTING,
|
||||
HAND_STATE_BOTH_POINTING,
|
||||
NUM_HAND_STATES
|
||||
};
|
||||
|
||||
|
|
|
@ -533,6 +533,7 @@ void SkeletonModel::buildRagdollConstraints() {
|
|||
_ragdollConstraints.push_back(anchor);
|
||||
} else {
|
||||
DistanceConstraint* bone = new DistanceConstraint(&(_ragdollPoints[i]), &(_ragdollPoints[parentIndex]));
|
||||
bone->setDistance(state.getDistanceToParent());
|
||||
_ragdollConstraints.push_back(bone);
|
||||
families.insert(parentIndex, i);
|
||||
}
|
||||
|
@ -597,11 +598,7 @@ void SkeletonModel::updateVisibleJointStates() {
|
|||
|
||||
// virtual
|
||||
void SkeletonModel::stepRagdollForward(float deltaTime) {
|
||||
// NOTE: increasing this timescale reduces vibrations in the ragdoll solution and reduces tunneling
|
||||
// but makes the shapes slower to follow the body (introduces lag).
|
||||
const float RAGDOLL_FOLLOWS_JOINTS_TIMESCALE = 0.05f;
|
||||
float fraction = glm::clamp(deltaTime / RAGDOLL_FOLLOWS_JOINTS_TIMESCALE, 0.0f, 1.0f);
|
||||
moveShapesTowardJoints(fraction);
|
||||
moveShapesTowardJoints(deltaTime);
|
||||
}
|
||||
|
||||
float DENSITY_OF_WATER = 1000.0f; // kg/m^3
|
||||
|
@ -676,17 +673,52 @@ void SkeletonModel::buildShapes() {
|
|||
enforceRagdollConstraints();
|
||||
}
|
||||
|
||||
void SkeletonModel::moveShapesTowardJoints(float fraction) {
|
||||
void SkeletonModel::moveShapesTowardJoints(float deltaTime) {
|
||||
const int numStates = _jointStates.size();
|
||||
assert(_jointStates.size() == _ragdollPoints.size());
|
||||
assert(fraction >= 0.0f && fraction <= 1.0f);
|
||||
if (_ragdollPoints.size() == numStates) {
|
||||
float oneMinusFraction = 1.0f - fraction;
|
||||
int numJoints = _jointStates.size();
|
||||
for (int i = 0; i < numJoints; ++i) {
|
||||
_ragdollPoints[i]._lastPosition = _ragdollPoints[i]._position;
|
||||
_ragdollPoints[i]._position = oneMinusFraction * _ragdollPoints[i]._position + fraction * _jointStates.at(i).getPosition();
|
||||
if (_ragdollPoints.size() != numStates) {
|
||||
return;
|
||||
}
|
||||
|
||||
// fraction = 0 means keep old position, = 1 means slave 100% to target position
|
||||
const float RAGDOLL_FOLLOWS_JOINTS_TIMESCALE = 0.05f;
|
||||
float fraction = glm::clamp(deltaTime / RAGDOLL_FOLLOWS_JOINTS_TIMESCALE, 0.0f, 1.0f);
|
||||
|
||||
// SIMPLE LINEAR SLAVING -- KEEP this implementation for reference
|
||||
//float oneMinusFraction = 1.0f - fraction;
|
||||
//for (int i = 0; i < numStates; ++i) {
|
||||
// _ragdollPoints[i]._lastPosition = _ragdollPoints[i]._position;
|
||||
// _ragdollPoints[i]._position = oneMinusFraction * _ragdollPoints[i]._position + fraction * _jointStates.at(i).getPosition();
|
||||
//}
|
||||
// SIMPLE LINEAR SLAVING -- KEEP
|
||||
|
||||
// parent-relative linear slaving
|
||||
for (int i = 0; i < numStates; ++i) {
|
||||
JointState& state = _jointStates[i];
|
||||
_ragdollPoints[i]._lastPosition = _ragdollPoints.at(i)._position;
|
||||
|
||||
int p = state.getParentIndex();
|
||||
if (p == -1) {
|
||||
_ragdollPoints[i]._position = glm::vec3(0.0f);
|
||||
continue;
|
||||
}
|
||||
if (state.getDistanceToParent() < EPSILON) {
|
||||
_ragdollPoints[i]._position = _ragdollPoints.at(p)._position;
|
||||
continue;
|
||||
}
|
||||
|
||||
glm::vec3 bone = _ragdollPoints.at(i)._lastPosition - _ragdollPoints.at(p)._lastPosition;
|
||||
const JointState& parentState = _jointStates.at(p);
|
||||
glm::vec3 targetBone = state.getPosition() - parentState.getPosition();
|
||||
|
||||
glm::vec3 newBone = (1.0f - fraction) * bone + fraction * targetBone;
|
||||
float boneLength = glm::length(newBone);
|
||||
if (boneLength > EPSILON) {
|
||||
// slam newBone's length to that of the joint helps maintain distance constraints
|
||||
newBone *= state.getDistanceToParent() / boneLength;
|
||||
}
|
||||
// set the new position relative to parent's new position
|
||||
_ragdollPoints[i]._position = _ragdollPoints.at(p)._position + newBone;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
|
||||
// Generic client side Octree renderer class.
|
||||
class ModelTreeRenderer : public OctreeRenderer, public ModelItemFBXService {
|
||||
Q_OBJECT
|
||||
public:
|
||||
ModelTreeRenderer();
|
||||
virtual ~ModelTreeRenderer();
|
||||
|
@ -56,7 +57,7 @@ public:
|
|||
|
||||
protected:
|
||||
void clearModelsCache();
|
||||
Model* getModel(const ModelItem& modelItem);
|
||||
Q_INVOKABLE Model* getModel(const ModelItem& modelItem);
|
||||
QMap<uint32_t, Model*> _knownModelsItemModels;
|
||||
QMap<uint32_t, Model*> _unknownModelsItemModels;
|
||||
};
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
JointState::JointState() :
|
||||
_animationPriority(0.0f),
|
||||
_positionInParentFrame(0.0f),
|
||||
_distanceToParent(0.0f),
|
||||
_fbxJoint(NULL),
|
||||
_constraint(NULL) {
|
||||
}
|
||||
|
@ -29,6 +30,7 @@ JointState::JointState(const JointState& other) : _constraint(NULL) {
|
|||
_rotation = other._rotation;
|
||||
_rotationInConstrainedFrame = other._rotationInConstrainedFrame;
|
||||
_positionInParentFrame = other._positionInParentFrame;
|
||||
_distanceToParent = other._distanceToParent;
|
||||
_animationPriority = other._animationPriority;
|
||||
_fbxJoint = other._fbxJoint;
|
||||
// DO NOT copy _constraint
|
||||
|
@ -72,6 +74,7 @@ void JointState::copyState(const JointState& state) {
|
|||
_rotation = extractRotation(_transform);
|
||||
_rotationInConstrainedFrame = state._rotationInConstrainedFrame;
|
||||
_positionInParentFrame = state._positionInParentFrame;
|
||||
_distanceToParent = state._distanceToParent;
|
||||
|
||||
_visibleTransform = state._visibleTransform;
|
||||
_visibleRotation = extractRotation(_visibleTransform);
|
||||
|
@ -82,6 +85,7 @@ void JointState::copyState(const JointState& state) {
|
|||
void JointState::initTransform(const glm::mat4& parentTransform) {
|
||||
computeTransform(parentTransform);
|
||||
_positionInParentFrame = glm::inverse(extractRotation(parentTransform)) * (extractTranslation(_transform) - extractTranslation(parentTransform));
|
||||
_distanceToParent = glm::length(_positionInParentFrame);
|
||||
}
|
||||
|
||||
void JointState::computeTransform(const glm::mat4& parentTransform) {
|
||||
|
@ -214,6 +218,14 @@ void JointState::setVisibleRotationInConstrainedFrame(const glm::quat& targetRot
|
|||
_visibleRotation = parentRotation * _fbxJoint->preRotation * _visibleRotationInConstrainedFrame * _fbxJoint->postRotation;
|
||||
}
|
||||
|
||||
const bool JointState::rotationIsDefault(const glm::quat& rotation, float tolerance) const {
|
||||
glm::quat defaultRotation = _fbxJoint->rotation;
|
||||
return glm::abs(rotation.x - defaultRotation.x) < tolerance &&
|
||||
glm::abs(rotation.y - defaultRotation.y) < tolerance &&
|
||||
glm::abs(rotation.z - defaultRotation.z) < tolerance &&
|
||||
glm::abs(rotation.w - defaultRotation.w) < tolerance;
|
||||
}
|
||||
|
||||
const glm::vec3& JointState::getDefaultTranslationInConstrainedFrame() const {
|
||||
assert(_fbxJoint != NULL);
|
||||
return _fbxJoint->translation;
|
||||
|
|
|
@ -51,6 +51,7 @@ public:
|
|||
glm::quat getRotationInParentFrame() const;
|
||||
glm::quat getVisibleRotationInParentFrame() const;
|
||||
const glm::vec3& getPositionInParentFrame() const { return _positionInParentFrame; }
|
||||
float getDistanceToParent() const { return _distanceToParent; }
|
||||
|
||||
int getParentIndex() const { return _fbxJoint->parentIndex; }
|
||||
|
||||
|
@ -81,6 +82,9 @@ public:
|
|||
void setRotationInConstrainedFrame(const glm::quat& targetRotation);
|
||||
void setVisibleRotationInConstrainedFrame(const glm::quat& targetRotation);
|
||||
const glm::quat& getRotationInConstrainedFrame() const { return _rotationInConstrainedFrame; }
|
||||
const glm::quat& getVisibleRotationInConstrainedFrame() const { return _visibleRotationInConstrainedFrame; }
|
||||
|
||||
const bool rotationIsDefault(const glm::quat& rotation, float tolerance = EPSILON) const;
|
||||
|
||||
const glm::vec3& getDefaultTranslationInConstrainedFrame() const;
|
||||
|
||||
|
@ -104,6 +108,7 @@ private:
|
|||
glm::quat _rotation; // joint- to model-frame
|
||||
glm::quat _rotationInConstrainedFrame; // rotation in frame where angular constraints would be applied
|
||||
glm::vec3 _positionInParentFrame; // only changes when the Model is scaled
|
||||
float _distanceToParent;
|
||||
|
||||
glm::mat4 _visibleTransform;
|
||||
glm::quat _visibleRotation;
|
||||
|
|
|
@ -676,12 +676,18 @@ bool Model::getJointState(int index, glm::quat& rotation) const {
|
|||
if (index == -1 || index >= _jointStates.size()) {
|
||||
return false;
|
||||
}
|
||||
rotation = _jointStates.at(index).getRotationInConstrainedFrame();
|
||||
const glm::quat& defaultRotation = _geometry->getFBXGeometry().joints.at(index).rotation;
|
||||
return glm::abs(rotation.x - defaultRotation.x) >= EPSILON ||
|
||||
glm::abs(rotation.y - defaultRotation.y) >= EPSILON ||
|
||||
glm::abs(rotation.z - defaultRotation.z) >= EPSILON ||
|
||||
glm::abs(rotation.w - defaultRotation.w) >= EPSILON;
|
||||
const JointState& state = _jointStates.at(index);
|
||||
rotation = state.getRotationInConstrainedFrame();
|
||||
return !state.rotationIsDefault(rotation);
|
||||
}
|
||||
|
||||
bool Model::getVisibleJointState(int index, glm::quat& rotation) const {
|
||||
if (index == -1 || index >= _jointStates.size()) {
|
||||
return false;
|
||||
}
|
||||
const JointState& state = _jointStates.at(index);
|
||||
rotation = state.getVisibleRotationInConstrainedFrame();
|
||||
return !state.rotationIsDefault(rotation);
|
||||
}
|
||||
|
||||
void Model::setJointState(int index, bool valid, const glm::quat& rotation, float priority) {
|
||||
|
|
|
@ -113,6 +113,10 @@ public:
|
|||
/// Fetches the joint state at the specified index.
|
||||
/// \return whether or not the joint state is "valid" (that is, non-default)
|
||||
bool getJointState(int index, glm::quat& rotation) const;
|
||||
|
||||
/// Fetches the visible joint state at the specified index.
|
||||
/// \return whether or not the joint state is "valid" (that is, non-default)
|
||||
bool getVisibleJointState(int index, glm::quat& rotation) const;
|
||||
|
||||
/// Sets the joint state at the specified index.
|
||||
void setJointState(int index, bool valid, const glm::quat& rotation = glm::quat(), float priority = 1.0f);
|
||||
|
|
|
@ -178,7 +178,7 @@ void ApplicationOverlay::computeOculusPickRay(float x, float y, glm::vec3& direc
|
|||
float dist = sqrt(x * x + y * y);
|
||||
float z = -sqrt(1.0f - dist * dist);
|
||||
|
||||
glm::vec3 relativePosition = myAvatar->getHead()->calculateAverageEyePosition() +
|
||||
glm::vec3 relativePosition = myAvatar->getHead()->getEyePosition() +
|
||||
glm::normalize(myAvatar->getOrientation() * glm::vec3(x, y, z));
|
||||
|
||||
//Rotate the UI pick ray by the avatar orientation
|
||||
|
@ -274,7 +274,7 @@ QPoint ApplicationOverlay::getPalmClickLocation(const PalmData *palm) const {
|
|||
MyAvatar* myAvatar = application->getAvatar();
|
||||
|
||||
glm::vec3 tip = myAvatar->getLaserPointerTipPosition(palm);
|
||||
glm::vec3 eyePos = myAvatar->getHead()->calculateAverageEyePosition();
|
||||
glm::vec3 eyePos = myAvatar->getHead()->getEyePosition();
|
||||
glm::quat orientation = glm::inverse(myAvatar->getOrientation());
|
||||
glm::vec3 dir = orientation * glm::normalize(application->getCamera()->getPosition() - tip); //direction of ray goes towards camera
|
||||
glm::vec3 tipPos = orientation * (tip - eyePos);
|
||||
|
@ -331,7 +331,7 @@ bool ApplicationOverlay::calculateRayUICollisionPoint(const glm::vec3& position,
|
|||
|
||||
glm::quat orientation = myAvatar->getOrientation();
|
||||
|
||||
glm::vec3 relativePosition = orientation * (position - myAvatar->getHead()->calculateAverageEyePosition());
|
||||
glm::vec3 relativePosition = orientation * (position - myAvatar->getHead()->getEyePosition());
|
||||
glm::vec3 relativeDirection = orientation * direction;
|
||||
|
||||
float t;
|
||||
|
@ -375,7 +375,7 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
|
|||
|
||||
glPushMatrix();
|
||||
const glm::quat& orientation = myAvatar->getOrientation();
|
||||
const glm::vec3& position = myAvatar->getHead()->calculateAverageEyePosition();
|
||||
const glm::vec3& position = myAvatar->getHead()->getEyePosition();
|
||||
|
||||
glm::mat4 rotation = glm::toMat4(orientation);
|
||||
|
||||
|
@ -1022,6 +1022,8 @@ void ApplicationOverlay::renderAudioMeter() {
|
|||
|
||||
audio->renderScope(glWidget->width(), glWidget->height());
|
||||
|
||||
audio->renderStats(WHITE_TEXT, glWidget->width(), glWidget->height());
|
||||
|
||||
glBegin(GL_QUADS);
|
||||
if (isClipping) {
|
||||
glColor3f(1, 0, 0);
|
||||
|
@ -1210,7 +1212,7 @@ void ApplicationOverlay::renderTexturedHemisphere() {
|
|||
Application* application = Application::getInstance();
|
||||
MyAvatar* myAvatar = application->getAvatar();
|
||||
const glm::quat& orientation = myAvatar->getOrientation();
|
||||
const glm::vec3& position = myAvatar->getHead()->calculateAverageEyePosition();
|
||||
const glm::vec3& position = myAvatar->getHead()->getEyePosition();
|
||||
|
||||
glm::mat4 rotation = glm::toMat4(orientation);
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include "Menu.h"
|
||||
#include "ScriptsModel.h"
|
||||
|
||||
|
||||
RunningScriptsWidget::RunningScriptsWidget(QWidget* parent) :
|
||||
FramelessDialog(parent, 0, POSITION_LEFT),
|
||||
ui(new Ui::RunningScriptsWidget),
|
||||
|
@ -104,13 +103,21 @@ void RunningScriptsWidget::setRunningScripts(const QStringList& list) {
|
|||
delete widget->widget();
|
||||
delete widget;
|
||||
}
|
||||
QHash<QString, int> hash;
|
||||
const int CLOSE_ICON_HEIGHT = 12;
|
||||
for (int i = 0; i < list.size(); i++) {
|
||||
if (!hash.contains(list.at(i))) {
|
||||
hash.insert(list.at(i), 1);
|
||||
}
|
||||
QWidget* row = new QWidget(ui->scrollAreaWidgetContents);
|
||||
row->setLayout(new QHBoxLayout(row));
|
||||
|
||||
QUrl url = QUrl(list.at(i));
|
||||
QLabel* name = new QLabel(url.fileName(), row);
|
||||
if (hash.find(list.at(i)).value() != 1) {
|
||||
name->setText(name->text() + "(" + QString::number(hash.find(list.at(i)).value()) + ")");
|
||||
}
|
||||
++hash[list.at(i)];
|
||||
QPushButton* closeButton = new QPushButton(row);
|
||||
closeButton->setFlat(true);
|
||||
closeButton->setIcon(
|
||||
|
|
|
@ -278,9 +278,8 @@ void Stats::display(
|
|||
|
||||
|
||||
Audio* audio = Application::getInstance()->getAudio();
|
||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamAudioStatsMap = audio->getAudioMixerInjectedStreamAudioStatsMap();
|
||||
|
||||
lines = _expanded ? 13 + (audioMixerInjectedStreamAudioStatsMap.size() + 2) * 3 : 3;
|
||||
lines = _expanded ? 4 : 3;
|
||||
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
||||
horizontalOffset += 5;
|
||||
|
||||
|
@ -313,128 +312,6 @@ void Stats::display(
|
|||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
||||
|
||||
char inputAudioLabelString[] = "Input: avail_avg_10s/avail";
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, inputAudioLabelString, color);
|
||||
|
||||
char inputAudioStatsString[512];
|
||||
sprintf(inputAudioStatsString, " %d/%d", audio->getInputRingBufferAverageFramesAvailable(),
|
||||
audio->getInputRingBufferFramesAvailable());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, inputAudioStatsString, color);
|
||||
|
||||
char audioMixerStatsLabelString[] = "AudioMixer stats:";
|
||||
char streamStatsFormatLabelString[] = "lost%/lost_30s%";
|
||||
char streamStatsFormatLabelString2[] = "desired/avail_avg_10s/avail";
|
||||
char streamStatsFormatLabelString3[] = "gaps: min/max/avg, starv/ovfl";
|
||||
char streamStatsFormatLabelString4[] = "gaps_30s: (same), notmix/sdrop";
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioMixerStatsLabelString, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString2, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString3, color);
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString4, color);
|
||||
|
||||
char downstreamLabelString[] = " Downstream:";
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||
|
||||
char downstreamAudioStatsString[512];
|
||||
|
||||
AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats();
|
||||
|
||||
sprintf(downstreamAudioStatsString, " mix: %.2f%%/%.2f%%, %u/%u+%d/%u+%d", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||
downstreamAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames, downstreamAudioStreamStats._ringBufferFramesAvailableAverage,
|
||||
audio->getOutputRingBufferAverageFramesAvailable(),
|
||||
downstreamAudioStreamStats._ringBufferFramesAvailable, audio->getOutputRingBufferFramesAvailable());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(downstreamAudioStreamStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(downstreamAudioStreamStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(downstreamAudioStreamStats._timeGapAverage).toLatin1().data(),
|
||||
downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/?", formatUsecTime(downstreamAudioStreamStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(downstreamAudioStreamStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(downstreamAudioStreamStats._timeGapWindowAverage).toLatin1().data(),
|
||||
downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
||||
|
||||
char upstreamLabelString[] = " Upstream:";
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color);
|
||||
|
||||
char upstreamAudioStatsString[512];
|
||||
|
||||
const AudioStreamStats& audioMixerAvatarAudioStreamStats = audio->getAudioMixerAvatarStreamAudioStats();
|
||||
|
||||
sprintf(upstreamAudioStatsString, " mic: %.2f%%/%.2f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||
audioMixerAvatarAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferDesiredJitterBufferFrames, audioMixerAvatarAudioStreamStats._ringBufferFramesAvailableAverage,
|
||||
audioMixerAvatarAudioStreamStats._ringBufferFramesAvailable);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapAverage).toLatin1().data(),
|
||||
audioMixerAvatarAudioStreamStats._ringBufferStarveCount, audioMixerAvatarAudioStreamStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowAverage).toLatin1().data(),
|
||||
audioMixerAvatarAudioStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarAudioStreamStats._ringBufferSilentFramesDropped);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
foreach(const AudioStreamStats& injectedStreamAudioStats, audioMixerInjectedStreamAudioStatsMap) {
|
||||
|
||||
sprintf(upstreamAudioStatsString, " inj: %.2f%%/%.2f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f,
|
||||
injectedStreamAudioStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
injectedStreamAudioStats._ringBufferDesiredJitterBufferFrames, injectedStreamAudioStats._ringBufferFramesAvailableAverage,
|
||||
injectedStreamAudioStats._ringBufferFramesAvailable);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(injectedStreamAudioStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(injectedStreamAudioStats._timeGapAverage).toLatin1().data(),
|
||||
injectedStreamAudioStats._ringBufferStarveCount, injectedStreamAudioStats._ringBufferOverflowCount);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
|
||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(injectedStreamAudioStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(injectedStreamAudioStats._timeGapWindowAverage).toLatin1().data(),
|
||||
injectedStreamAudioStats._ringBufferConsecutiveNotMixedCount, injectedStreamAudioStats._ringBufferSilentFramesDropped);
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
||||
}
|
||||
}
|
||||
|
||||
verticalOffset = 0;
|
||||
|
|
|
@ -30,7 +30,7 @@ InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier,
|
|||
|
||||
const uchar MAX_INJECTOR_VOLUME = 255;
|
||||
|
||||
int InjectedAudioRingBuffer::parseData(const QByteArray& packet, int packetsSkipped) {
|
||||
int InjectedAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) {
|
||||
frameReceivedUpdateTimingStats();
|
||||
|
||||
// setup a data stream to read from this packet
|
||||
|
|
|
@ -20,7 +20,7 @@ class InjectedAudioRingBuffer : public PositionalAudioRingBuffer {
|
|||
public:
|
||||
InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false);
|
||||
|
||||
int parseData(const QByteArray& packet, int packetsSkipped = 0);
|
||||
int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped);
|
||||
|
||||
const QUuid& getStreamIdentifier() const { return _streamIdentifier; }
|
||||
float getRadius() const { return _radius; }
|
||||
|
|
|
@ -45,7 +45,7 @@ public:
|
|||
|
||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||
|
||||
virtual int parseData(const QByteArray& packet, int packetsSkipped = 0) = 0;
|
||||
virtual int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) = 0;
|
||||
|
||||
int parsePositionalData(const QByteArray& positionalByteArray);
|
||||
int parseListenModeData(const QByteArray& listenModeByteArray);
|
||||
|
|
|
@ -43,6 +43,7 @@ AvatarData::AvatarData() :
|
|||
_handState(0),
|
||||
_keyState(NO_KEY_DOWN),
|
||||
_isChatCirclingEnabled(false),
|
||||
_forceFaceshiftConnected(false),
|
||||
_hasNewJointRotations(true),
|
||||
_headData(NULL),
|
||||
_handData(NULL),
|
||||
|
@ -80,6 +81,9 @@ QByteArray AvatarData::toByteArray() {
|
|||
// lazily allocate memory for HeadData in case we're not an Avatar instance
|
||||
if (!_headData) {
|
||||
_headData = new HeadData(this);
|
||||
if (_forceFaceshiftConnected) {
|
||||
_headData->_isFaceshiftConnected = true;
|
||||
}
|
||||
}
|
||||
|
||||
QByteArray avatarDataByteArray;
|
||||
|
|
|
@ -185,8 +185,8 @@ public:
|
|||
void setClampedTargetScale(float targetScale);
|
||||
|
||||
// Hand State
|
||||
void setHandState(char s) { _handState = s; }
|
||||
char getHandState() const { return _handState; }
|
||||
Q_INVOKABLE void setHandState(char s) { _handState = s; }
|
||||
Q_INVOKABLE char getHandState() const { return _handState; }
|
||||
|
||||
const QVector<JointData>& getJointData() const { return _jointData; }
|
||||
void setJointData(const QVector<JointData>& jointData) { _jointData = jointData; }
|
||||
|
@ -206,6 +206,10 @@ public:
|
|||
|
||||
Q_INVOKABLE virtual QStringList getJointNames() const { return _jointNames; }
|
||||
|
||||
Q_INVOKABLE void setBlendshape(QString name, float val) { _headData->setBlendshape(name, val); }
|
||||
|
||||
void setForceFaceshiftConnected(bool connected) { _forceFaceshiftConnected = connected; }
|
||||
|
||||
// key state
|
||||
void setKeyState(KeyState s) { _keyState = s; }
|
||||
KeyState keyState() const { return _keyState; }
|
||||
|
@ -300,7 +304,7 @@ protected:
|
|||
std::string _chatMessage;
|
||||
|
||||
bool _isChatCirclingEnabled;
|
||||
|
||||
bool _forceFaceshiftConnected;
|
||||
bool _hasNewJointRotations; // set in AvatarData, cleared in Avatar
|
||||
|
||||
HeadData* _headData;
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#include "AvatarData.h"
|
||||
#include "HeadData.h"
|
||||
|
||||
#include "../fbx/src/FBXReader.h"
|
||||
|
||||
HeadData::HeadData(AvatarData* owningAvatar) :
|
||||
_baseYaw(0.0f),
|
||||
_basePitch(0.0f),
|
||||
|
@ -52,6 +54,26 @@ void HeadData::setOrientation(const glm::quat& orientation) {
|
|||
_baseRoll = eulers.z;
|
||||
}
|
||||
|
||||
void HeadData::setBlendshape(QString name, float val) {
|
||||
static bool hasInitializedLookupMap = false;
|
||||
static QMap<QString, int> blendshapeLookupMap;
|
||||
//Lazily construct a lookup map from the blendshapes
|
||||
if (!hasInitializedLookupMap) {
|
||||
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
|
||||
blendshapeLookupMap[FACESHIFT_BLENDSHAPES[i]] = i;
|
||||
}
|
||||
}
|
||||
|
||||
//Check to see if the named blendshape exists, and then set its value if it does
|
||||
QMap<QString, int>::iterator it = blendshapeLookupMap.find(name);
|
||||
if (it != blendshapeLookupMap.end()) {
|
||||
if (_blendshapeCoefficients.size() <= it.value()) {
|
||||
_blendshapeCoefficients.resize(it.value() + 1);
|
||||
}
|
||||
_blendshapeCoefficients[it.value()] = val;
|
||||
}
|
||||
}
|
||||
|
||||
void HeadData::addYaw(float yaw) {
|
||||
setBaseYaw(_baseYaw + yaw);
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ public:
|
|||
float getAudioAverageLoudness() const { return _audioAverageLoudness; }
|
||||
void setAudioAverageLoudness(float audioAverageLoudness) { _audioAverageLoudness = audioAverageLoudness; }
|
||||
|
||||
void setBlendshape(QString name, float val);
|
||||
const QVector<float>& getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
|
||||
|
||||
float getPupilDilation() const { return _pupilDilation; }
|
||||
|
|
|
@ -577,6 +577,8 @@ const char* FACESHIFT_BLENDSHAPES[] = {
|
|||
""
|
||||
};
|
||||
|
||||
const int NUM_FACESHIFT_BLENDSHAPES = sizeof(FACESHIFT_BLENDSHAPES) / sizeof(char*);
|
||||
|
||||
const char* HUMANIK_JOINTS[] = {
|
||||
"RightHand",
|
||||
"RightForeArm",
|
||||
|
|
|
@ -29,6 +29,8 @@ typedef QList<FBXNode> FBXNodeList;
|
|||
|
||||
/// The names of the blendshapes expected by Faceshift, terminated with an empty string.
|
||||
extern const char* FACESHIFT_BLENDSHAPES[];
|
||||
/// The size of FACESHIFT_BLENDSHAPES
|
||||
extern const int NUM_FACESHIFT_BLENDSHAPES;
|
||||
|
||||
/// The names of the joints in the Maya HumanIK rig, terminated with an empty string.
|
||||
extern const char* HUMANIK_JOINTS[];
|
||||
|
|
|
@ -70,7 +70,7 @@ ModelItemProperties ModelsScriptingInterface::getModelProperties(ModelItemID mod
|
|||
if (_modelTree) {
|
||||
_modelTree->lockForRead();
|
||||
ModelItem* model = const_cast<ModelItem*>(_modelTree->findModelByID(identity.id, true));
|
||||
if (model) {
|
||||
if (model && _modelTree->getGeometryForModel(*model)) {
|
||||
model->setSittingPoints(_modelTree->getGeometryForModel(*model)->sittingPoints);
|
||||
results.copyFromModelItem(*model);
|
||||
} else {
|
||||
|
|
|
@ -23,8 +23,8 @@ static const QString CLASS_NAME = "ArrayBuffer";
|
|||
Q_DECLARE_METATYPE(QByteArray*)
|
||||
|
||||
ArrayBufferClass::ArrayBufferClass(ScriptEngine* scriptEngine) :
|
||||
QObject(scriptEngine->getEngine()),
|
||||
QScriptClass(scriptEngine->getEngine()),
|
||||
QObject(scriptEngine),
|
||||
QScriptClass(scriptEngine),
|
||||
_scriptEngine(scriptEngine) {
|
||||
qScriptRegisterMetaType<QByteArray>(engine(), toScriptValue, fromScriptValue);
|
||||
QScriptValue global = engine()->globalObject();
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
Q_DECLARE_METATYPE(QByteArray*)
|
||||
|
||||
ArrayBufferViewClass::ArrayBufferViewClass(ScriptEngine* scriptEngine) :
|
||||
QObject(scriptEngine->getEngine()),
|
||||
QScriptClass(scriptEngine->getEngine()),
|
||||
QObject(scriptEngine),
|
||||
QScriptClass(scriptEngine),
|
||||
_scriptEngine(scriptEngine) {
|
||||
// Save string handles for quick lookup
|
||||
_bufferName = engine()->toStringHandle(BUFFER_PROPERTY_NAME.toLatin1());
|
||||
|
|
|
@ -87,7 +87,6 @@ ScriptEngine::ScriptEngine(const QString& scriptContents, const QString& fileNam
|
|||
_isFinished(false),
|
||||
_isRunning(false),
|
||||
_isInitialized(false),
|
||||
_engine(),
|
||||
_isAvatar(false),
|
||||
_avatarIdentityTimer(NULL),
|
||||
_avatarBillboardTimer(NULL),
|
||||
|
@ -113,7 +112,6 @@ ScriptEngine::ScriptEngine(const QUrl& scriptURL,
|
|||
_isFinished(false),
|
||||
_isRunning(false),
|
||||
_isInitialized(false),
|
||||
_engine(),
|
||||
_isAvatar(false),
|
||||
_avatarIdentityTimer(NULL),
|
||||
_avatarBillboardTimer(NULL),
|
||||
|
@ -194,7 +192,7 @@ void ScriptEngine::setAvatarData(AvatarData* avatarData, const QString& objectNa
|
|||
_avatarData = avatarData;
|
||||
|
||||
// remove the old Avatar property, if it exists
|
||||
_engine.globalObject().setProperty(objectName, QScriptValue());
|
||||
globalObject().setProperty(objectName, QScriptValue());
|
||||
|
||||
// give the script engine the new Avatar script property
|
||||
registerGlobalObject(objectName, _avatarData);
|
||||
|
@ -202,7 +200,7 @@ void ScriptEngine::setAvatarData(AvatarData* avatarData, const QString& objectNa
|
|||
|
||||
void ScriptEngine::setAvatarHashMap(AvatarHashMap* avatarHashMap, const QString& objectName) {
|
||||
// remove the old Avatar property, if it exists
|
||||
_engine.globalObject().setProperty(objectName, QScriptValue());
|
||||
globalObject().setProperty(objectName, QScriptValue());
|
||||
|
||||
// give the script engine the new avatar hash map
|
||||
registerGlobalObject(objectName, avatarHashMap);
|
||||
|
@ -231,48 +229,48 @@ void ScriptEngine::init() {
|
|||
_particlesScriptingInterface.init();
|
||||
|
||||
// register various meta-types
|
||||
registerMetaTypes(&_engine);
|
||||
registerMIDIMetaTypes(&_engine);
|
||||
registerVoxelMetaTypes(&_engine);
|
||||
registerEventTypes(&_engine);
|
||||
registerMenuItemProperties(&_engine);
|
||||
registerAnimationTypes(&_engine);
|
||||
registerAvatarTypes(&_engine);
|
||||
Bitstream::registerTypes(&_engine);
|
||||
registerMetaTypes(this);
|
||||
registerMIDIMetaTypes(this);
|
||||
registerVoxelMetaTypes(this);
|
||||
registerEventTypes(this);
|
||||
registerMenuItemProperties(this);
|
||||
registerAnimationTypes(this);
|
||||
registerAvatarTypes(this);
|
||||
Bitstream::registerTypes(this);
|
||||
|
||||
qScriptRegisterMetaType(&_engine, ParticlePropertiesToScriptValue, ParticlePropertiesFromScriptValue);
|
||||
qScriptRegisterMetaType(&_engine, ParticleIDtoScriptValue, ParticleIDfromScriptValue);
|
||||
qScriptRegisterSequenceMetaType<QVector<ParticleID> >(&_engine);
|
||||
qScriptRegisterMetaType(this, ParticlePropertiesToScriptValue, ParticlePropertiesFromScriptValue);
|
||||
qScriptRegisterMetaType(this, ParticleIDtoScriptValue, ParticleIDfromScriptValue);
|
||||
qScriptRegisterSequenceMetaType<QVector<ParticleID> >(this);
|
||||
|
||||
qScriptRegisterMetaType(&_engine, ModelItemPropertiesToScriptValue, ModelItemPropertiesFromScriptValue);
|
||||
qScriptRegisterMetaType(&_engine, ModelItemIDtoScriptValue, ModelItemIDfromScriptValue);
|
||||
qScriptRegisterMetaType(&_engine, RayToModelIntersectionResultToScriptValue, RayToModelIntersectionResultFromScriptValue);
|
||||
qScriptRegisterSequenceMetaType<QVector<ModelItemID> >(&_engine);
|
||||
qScriptRegisterMetaType(this, ModelItemPropertiesToScriptValue, ModelItemPropertiesFromScriptValue);
|
||||
qScriptRegisterMetaType(this, ModelItemIDtoScriptValue, ModelItemIDfromScriptValue);
|
||||
qScriptRegisterMetaType(this, RayToModelIntersectionResultToScriptValue, RayToModelIntersectionResultFromScriptValue);
|
||||
qScriptRegisterSequenceMetaType<QVector<ModelItemID> >(this);
|
||||
|
||||
qScriptRegisterSequenceMetaType<QVector<glm::vec2> >(&_engine);
|
||||
qScriptRegisterSequenceMetaType<QVector<glm::quat> >(&_engine);
|
||||
qScriptRegisterSequenceMetaType<QVector<QString> >(&_engine);
|
||||
qScriptRegisterSequenceMetaType<QVector<glm::vec2> >(this);
|
||||
qScriptRegisterSequenceMetaType<QVector<glm::quat> >(this);
|
||||
qScriptRegisterSequenceMetaType<QVector<QString> >(this);
|
||||
|
||||
QScriptValue xmlHttpRequestConstructorValue = _engine.newFunction(XMLHttpRequestClass::constructor);
|
||||
_engine.globalObject().setProperty("XMLHttpRequest", xmlHttpRequestConstructorValue);
|
||||
QScriptValue xmlHttpRequestConstructorValue = newFunction(XMLHttpRequestClass::constructor);
|
||||
globalObject().setProperty("XMLHttpRequest", xmlHttpRequestConstructorValue);
|
||||
|
||||
QScriptValue printConstructorValue = _engine.newFunction(debugPrint);
|
||||
_engine.globalObject().setProperty("print", printConstructorValue);
|
||||
QScriptValue printConstructorValue = newFunction(debugPrint);
|
||||
globalObject().setProperty("print", printConstructorValue);
|
||||
|
||||
QScriptValue soundConstructorValue = _engine.newFunction(soundConstructor);
|
||||
QScriptValue soundMetaObject = _engine.newQMetaObject(&Sound::staticMetaObject, soundConstructorValue);
|
||||
_engine.globalObject().setProperty("Sound", soundMetaObject);
|
||||
QScriptValue soundConstructorValue = newFunction(soundConstructor);
|
||||
QScriptValue soundMetaObject = newQMetaObject(&Sound::staticMetaObject, soundConstructorValue);
|
||||
globalObject().setProperty("Sound", soundMetaObject);
|
||||
|
||||
QScriptValue injectionOptionValue = _engine.scriptValueFromQMetaObject<AudioInjectorOptions>();
|
||||
_engine.globalObject().setProperty("AudioInjectionOptions", injectionOptionValue);
|
||||
QScriptValue injectionOptionValue = scriptValueFromQMetaObject<AudioInjectorOptions>();
|
||||
globalObject().setProperty("AudioInjectionOptions", injectionOptionValue);
|
||||
|
||||
QScriptValue localVoxelsValue = _engine.scriptValueFromQMetaObject<LocalVoxels>();
|
||||
_engine.globalObject().setProperty("LocalVoxels", localVoxelsValue);
|
||||
QScriptValue localVoxelsValue = scriptValueFromQMetaObject<LocalVoxels>();
|
||||
globalObject().setProperty("LocalVoxels", localVoxelsValue);
|
||||
|
||||
qScriptRegisterMetaType(&_engine, injectorToScriptValue, injectorFromScriptValue);
|
||||
qScriptRegisterMetaType( &_engine, injectorToScriptValueInputController, injectorFromScriptValueInputController);
|
||||
qScriptRegisterMetaType(this, injectorToScriptValue, injectorFromScriptValue);
|
||||
qScriptRegisterMetaType( this, injectorToScriptValueInputController, injectorFromScriptValueInputController);
|
||||
|
||||
qScriptRegisterMetaType(&_engine, animationDetailsToScriptValue, animationDetailsFromScriptValue);
|
||||
qScriptRegisterMetaType(this, animationDetailsToScriptValue, animationDetailsFromScriptValue);
|
||||
|
||||
registerGlobalObject("Script", this);
|
||||
registerGlobalObject("Audio", &_audioScriptingInterface);
|
||||
|
@ -287,15 +285,14 @@ void ScriptEngine::init() {
|
|||
registerGlobalObject("Voxels", &_voxelsScriptingInterface);
|
||||
|
||||
// constants
|
||||
QScriptValue globalObject = _engine.globalObject();
|
||||
globalObject.setProperty("TREE_SCALE", _engine.newVariant(QVariant(TREE_SCALE)));
|
||||
globalObject.setProperty("COLLISION_GROUP_ENVIRONMENT", _engine.newVariant(QVariant(COLLISION_GROUP_ENVIRONMENT)));
|
||||
globalObject.setProperty("COLLISION_GROUP_AVATARS", _engine.newVariant(QVariant(COLLISION_GROUP_AVATARS)));
|
||||
globalObject.setProperty("COLLISION_GROUP_VOXELS", _engine.newVariant(QVariant(COLLISION_GROUP_VOXELS)));
|
||||
globalObject.setProperty("COLLISION_GROUP_PARTICLES", _engine.newVariant(QVariant(COLLISION_GROUP_PARTICLES)));
|
||||
globalObject().setProperty("TREE_SCALE", newVariant(QVariant(TREE_SCALE)));
|
||||
globalObject().setProperty("COLLISION_GROUP_ENVIRONMENT", newVariant(QVariant(COLLISION_GROUP_ENVIRONMENT)));
|
||||
globalObject().setProperty("COLLISION_GROUP_AVATARS", newVariant(QVariant(COLLISION_GROUP_AVATARS)));
|
||||
globalObject().setProperty("COLLISION_GROUP_VOXELS", newVariant(QVariant(COLLISION_GROUP_VOXELS)));
|
||||
globalObject().setProperty("COLLISION_GROUP_PARTICLES", newVariant(QVariant(COLLISION_GROUP_PARTICLES)));
|
||||
|
||||
globalObject.setProperty("AVATAR_MOTION_OBEY_LOCAL_GRAVITY", _engine.newVariant(QVariant(AVATAR_MOTION_OBEY_LOCAL_GRAVITY)));
|
||||
globalObject.setProperty("AVATAR_MOTION_OBEY_ENVIRONMENTAL_GRAVITY", _engine.newVariant(QVariant(AVATAR_MOTION_OBEY_ENVIRONMENTAL_GRAVITY)));
|
||||
globalObject().setProperty("AVATAR_MOTION_OBEY_LOCAL_GRAVITY", newVariant(QVariant(AVATAR_MOTION_OBEY_LOCAL_GRAVITY)));
|
||||
globalObject().setProperty("AVATAR_MOTION_OBEY_ENVIRONMENTAL_GRAVITY", newVariant(QVariant(AVATAR_MOTION_OBEY_ENVIRONMENTAL_GRAVITY)));
|
||||
|
||||
// let the VoxelPacketSender know how frequently we plan to call it
|
||||
_voxelsScriptingInterface.getVoxelPacketSender()->setProcessCallIntervalHint(SCRIPT_DATA_CALLBACK_USECS);
|
||||
|
@ -304,8 +301,8 @@ void ScriptEngine::init() {
|
|||
|
||||
QScriptValue ScriptEngine::registerGlobalObject(const QString& name, QObject* object) {
|
||||
if (object) {
|
||||
QScriptValue value = _engine.newQObject(object);
|
||||
_engine.globalObject().setProperty(name, value);
|
||||
QScriptValue value = newQObject(object);
|
||||
globalObject().setProperty(name, value);
|
||||
return value;
|
||||
}
|
||||
return QScriptValue::NullValue;
|
||||
|
@ -313,15 +310,15 @@ QScriptValue ScriptEngine::registerGlobalObject(const QString& name, QObject* ob
|
|||
|
||||
void ScriptEngine::registerGetterSetter(const QString& name, QScriptEngine::FunctionSignature getter,
|
||||
QScriptEngine::FunctionSignature setter, QScriptValue object) {
|
||||
QScriptValue setterFunction = _engine.newFunction(setter, 1);
|
||||
QScriptValue getterFunction = _engine.newFunction(getter);
|
||||
QScriptValue setterFunction = newFunction(setter, 1);
|
||||
QScriptValue getterFunction = newFunction(getter);
|
||||
|
||||
if (!object.isNull()) {
|
||||
object.setProperty(name, setterFunction, QScriptValue::PropertySetter);
|
||||
object.setProperty(name, getterFunction, QScriptValue::PropertyGetter);
|
||||
} else {
|
||||
_engine.globalObject().setProperty(name, setterFunction, QScriptValue::PropertySetter);
|
||||
_engine.globalObject().setProperty(name, getterFunction, QScriptValue::PropertyGetter);
|
||||
globalObject().setProperty(name, setterFunction, QScriptValue::PropertySetter);
|
||||
globalObject().setProperty(name, getterFunction, QScriptValue::PropertyGetter);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -330,25 +327,24 @@ void ScriptEngine::evaluate() {
|
|||
init();
|
||||
}
|
||||
|
||||
QScriptValue result = _engine.evaluate(_scriptContents);
|
||||
QScriptValue result = evaluate(_scriptContents);
|
||||
|
||||
if (_engine.hasUncaughtException()) {
|
||||
int line = _engine.uncaughtExceptionLineNumber();
|
||||
if (hasUncaughtException()) {
|
||||
int line = uncaughtExceptionLineNumber();
|
||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << result.toString();
|
||||
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + result.toString());
|
||||
_engine.clearExceptions();
|
||||
clearExceptions();
|
||||
}
|
||||
}
|
||||
|
||||
QScriptValue ScriptEngine::evaluate(const QString& program, const QString& fileName, int lineNumber) {
|
||||
QScriptValue result = _engine.evaluate(program, fileName, lineNumber);
|
||||
bool hasUncaughtException = _engine.hasUncaughtException();
|
||||
if (hasUncaughtException) {
|
||||
int line = _engine.uncaughtExceptionLineNumber();
|
||||
QScriptValue result = QScriptEngine::evaluate(program, fileName, lineNumber);
|
||||
if (hasUncaughtException()) {
|
||||
int line = uncaughtExceptionLineNumber();
|
||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ": " << result.toString();
|
||||
}
|
||||
emit evaluationFinished(result, hasUncaughtException);
|
||||
_engine.clearExceptions();
|
||||
emit evaluationFinished(result, hasUncaughtException());
|
||||
clearExceptions();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -372,12 +368,12 @@ void ScriptEngine::run() {
|
|||
_isFinished = false;
|
||||
emit runningStateChanged();
|
||||
|
||||
QScriptValue result = _engine.evaluate(_scriptContents);
|
||||
if (_engine.hasUncaughtException()) {
|
||||
int line = _engine.uncaughtExceptionLineNumber();
|
||||
QScriptValue result = evaluate(_scriptContents);
|
||||
if (hasUncaughtException()) {
|
||||
int line = uncaughtExceptionLineNumber();
|
||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << result.toString();
|
||||
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + result.toString());
|
||||
_engine.clearExceptions();
|
||||
clearExceptions();
|
||||
}
|
||||
|
||||
QElapsedTimer startTime;
|
||||
|
@ -532,11 +528,11 @@ void ScriptEngine::run() {
|
|||
qint64 now = usecTimestampNow();
|
||||
float deltaTime = (float) (now - lastUpdate) / (float) USECS_PER_SECOND;
|
||||
|
||||
if (_engine.hasUncaughtException()) {
|
||||
int line = _engine.uncaughtExceptionLineNumber();
|
||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << _engine.uncaughtException().toString();
|
||||
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + _engine.uncaughtException().toString());
|
||||
_engine.clearExceptions();
|
||||
if (hasUncaughtException()) {
|
||||
int line = uncaughtExceptionLineNumber();
|
||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << uncaughtException().toString();
|
||||
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + uncaughtException().toString());
|
||||
clearExceptions();
|
||||
}
|
||||
|
||||
emit update(deltaTime);
|
||||
|
@ -694,12 +690,12 @@ void ScriptEngine::include(const QString& includeFile) {
|
|||
}
|
||||
}
|
||||
|
||||
QScriptValue result = _engine.evaluate(includeContents);
|
||||
if (_engine.hasUncaughtException()) {
|
||||
int line = _engine.uncaughtExceptionLineNumber();
|
||||
QScriptValue result = evaluate(includeContents);
|
||||
if (hasUncaughtException()) {
|
||||
int line = uncaughtExceptionLineNumber();
|
||||
qDebug() << "Uncaught exception at (" << includeFile << ") line" << line << ":" << result.toString();
|
||||
emit errorMessage("Uncaught exception at (" + includeFile + ") line" + QString::number(line) + ":" + result.toString());
|
||||
_engine.clearExceptions();
|
||||
clearExceptions();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ const QString NO_SCRIPT("");
|
|||
|
||||
const unsigned int SCRIPT_DATA_CALLBACK_USECS = floor(((1.0 / 60.0f) * 1000 * 1000) + 0.5);
|
||||
|
||||
class ScriptEngine : public QObject {
|
||||
class ScriptEngine : public QScriptEngine {
|
||||
Q_OBJECT
|
||||
public:
|
||||
ScriptEngine(const QUrl& scriptURL,
|
||||
|
@ -57,7 +57,6 @@ public:
|
|||
/// Access the ModelsScriptingInterface in order to initialize it with a custom packet sender and jurisdiction listener
|
||||
static ModelsScriptingInterface* getModelsScriptingInterface() { return &_modelsScriptingInterface; }
|
||||
|
||||
QScriptEngine* getEngine() { return &_engine; }
|
||||
ArrayBufferClass* getArrayBufferClass() { return _arrayBufferClass; }
|
||||
|
||||
/// sets the script contents, will return false if failed, will fail if script is already running
|
||||
|
@ -121,7 +120,6 @@ protected:
|
|||
bool _isFinished;
|
||||
bool _isRunning;
|
||||
bool _isInitialized;
|
||||
QScriptEngine _engine;
|
||||
bool _isAvatar;
|
||||
QTimer* _avatarIdentityTimer;
|
||||
QTimer* _avatarBillboardTimer;
|
||||
|
|
|
@ -28,11 +28,7 @@ public:
|
|||
void applyAccumulatedDelta();
|
||||
|
||||
glm::vec3 getAccumulatedDelta() const {
|
||||
glm::vec3 foo(0.0f);
|
||||
if (_numDeltas > 0) {
|
||||
foo = _accumulatedDelta / (float)_numDeltas;
|
||||
}
|
||||
return foo;
|
||||
return (_numDeltas > 0) ? _accumulatedDelta / (float)_numDeltas : glm::vec3(0.0f);
|
||||
}
|
||||
|
||||
glm::vec3 _position;
|
||||
|
|
|
@ -733,6 +733,13 @@ glm::quat rotationBetween(const glm::vec3& v1, const glm::vec3& v2) {
|
|||
}
|
||||
} else {
|
||||
axis = glm::normalize(glm::cross(v1, v2));
|
||||
// It is possible for axis to be nan even when angle is not less than EPSILON.
|
||||
// For example when angle is small but not tiny but v1 and v2 and have very short lengths.
|
||||
if (glm::isnan(glm::dot(axis, axis))) {
|
||||
// set angle and axis to values that will generate an identity rotation
|
||||
angle = 0.0f;
|
||||
axis = glm::vec3(1.0f, 0.0f, 0.0f);
|
||||
}
|
||||
}
|
||||
return glm::angleAxis(angle, axis);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue