mirror of
https://github.com/overte-org/overte.git
synced 2025-08-07 19:50:38 +02:00
merge upstream/master into andrew/ragdoll
This commit is contained in:
commit
d3ba00770f
38 changed files with 691 additions and 338 deletions
|
@ -229,7 +229,8 @@ void Agent::run() {
|
||||||
|
|
||||||
// setup an Avatar for the script to use
|
// setup an Avatar for the script to use
|
||||||
AvatarData scriptedAvatar;
|
AvatarData scriptedAvatar;
|
||||||
|
scriptedAvatar.setForceFaceshiftConnected(true);
|
||||||
|
|
||||||
// call model URL setters with empty URLs so our avatar, if user, will have the default models
|
// call model URL setters with empty URLs so our avatar, if user, will have the default models
|
||||||
scriptedAvatar.setFaceModelURL(QUrl());
|
scriptedAvatar.setFaceModelURL(QUrl());
|
||||||
scriptedAvatar.setSkeletonModelURL(QUrl());
|
scriptedAvatar.setSkeletonModelURL(QUrl());
|
||||||
|
|
|
@ -237,14 +237,6 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayBufferSample[1];
|
_clientSamples[delayedChannelIndex + SINGLE_STEREO_OFFSET] += delayBufferSample[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following code is pretty gross and redundant, but AFAIK it's the best way to avoid
|
|
||||||
// too many conditionals in handling the delay samples at the beginning of _clientSamples.
|
|
||||||
// Basically we try to take the samples in batches of four, and then handle the remainder
|
|
||||||
// conditionally to get rid of the rest.
|
|
||||||
|
|
||||||
const int DOUBLE_STEREO_OFFSET = 4;
|
|
||||||
const int TRIPLE_STEREO_OFFSET = 6;
|
|
||||||
|
|
||||||
if (numSamplesDelay > 0) {
|
if (numSamplesDelay > 0) {
|
||||||
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
|
// if there was a sample delay for this buffer, we need to pull samples prior to the nextOutput
|
||||||
// to stick at the beginning
|
// to stick at the beginning
|
||||||
|
|
|
@ -90,12 +90,12 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
switch (packetArrivalInfo._status) {
|
switch (packetArrivalInfo._status) {
|
||||||
case SequenceNumberStats::Early: {
|
case SequenceNumberStats::Early: {
|
||||||
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
||||||
avatarRingBuffer->parseData(packet, packetsLost);
|
avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case SequenceNumberStats::OnTime: {
|
case SequenceNumberStats::OnTime: {
|
||||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||||
avatarRingBuffer->parseData(packet);
|
avatarRingBuffer->parseDataAndHandleDroppedPackets(packet, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
|
@ -134,12 +134,12 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
switch (packetArrivalInfo._status) {
|
switch (packetArrivalInfo._status) {
|
||||||
case SequenceNumberStats::Early: {
|
case SequenceNumberStats::Early: {
|
||||||
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
int packetsLost = packetArrivalInfo._seqDiffFromExpected;
|
||||||
matchingInjectedRingBuffer->parseData(packet, packetsLost);
|
matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, packetsLost);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case SequenceNumberStats::OnTime: {
|
case SequenceNumberStats::OnTime: {
|
||||||
// ask the AvatarAudioRingBuffer instance to parse the data
|
// ask the AvatarAudioRingBuffer instance to parse the data
|
||||||
matchingInjectedRingBuffer->parseData(packet);
|
matchingInjectedRingBuffer->parseDataAndHandleDroppedPackets(packet, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
|
|
|
@ -18,7 +18,7 @@ AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo, bool dynamicJitterBu
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int AvatarAudioRingBuffer::parseData(const QByteArray& packet, int packetsSkipped) {
|
int AvatarAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) {
|
||||||
frameReceivedUpdateTimingStats();
|
frameReceivedUpdateTimingStats();
|
||||||
|
|
||||||
_shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho);
|
_shouldLoopbackForNode = (packetTypeForPacket(packet) == PacketTypeMicrophoneAudioWithEcho);
|
||||||
|
|
|
@ -20,7 +20,7 @@ class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
|
||||||
public:
|
public:
|
||||||
AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false);
|
AvatarAudioRingBuffer(bool isStereo = false, bool dynamicJitterBuffer = false);
|
||||||
|
|
||||||
int parseData(const QByteArray& packet, int packetsSkipped = 0);
|
int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped);
|
||||||
private:
|
private:
|
||||||
// disallow copying of AvatarAudioRingBuffer objects
|
// disallow copying of AvatarAudioRingBuffer objects
|
||||||
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
||||||
|
|
144
examples/bot_randomExpression.js
Normal file
144
examples/bot_randomExpression.js
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
//
|
||||||
|
// bot_randomExpression.js
|
||||||
|
// examples
|
||||||
|
//
|
||||||
|
// Created by Ben Arnold on 7/23/14.
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// This is an example script that demonstrates an NPC avatar with
|
||||||
|
// random facial expressions.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
function getRandomFloat(min, max) {
|
||||||
|
return Math.random() * (max - min) + min;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getRandomInt (min, max) {
|
||||||
|
return Math.floor(Math.random() * (max - min + 1)) + min;
|
||||||
|
}
|
||||||
|
|
||||||
|
function printVector(string, vector) {
|
||||||
|
print(string + " " + vector.x + ", " + vector.y + ", " + vector.z);
|
||||||
|
}
|
||||||
|
|
||||||
|
var timePassed = 0.0;
|
||||||
|
var updateSpeed = 3.0;
|
||||||
|
|
||||||
|
var X_MIN = 5.0;
|
||||||
|
var X_MAX = 15.0;
|
||||||
|
var Z_MIN = 5.0;
|
||||||
|
var Z_MAX = 15.0;
|
||||||
|
var Y_PELVIS = 1.0;
|
||||||
|
|
||||||
|
// pick an integer between 1 and 100 for the body model for this bot
|
||||||
|
botNumber = getRandomInt(1, 100);
|
||||||
|
|
||||||
|
newFaceFilePrefix = "ron";
|
||||||
|
|
||||||
|
newBodyFilePrefix = "bot" + botNumber;
|
||||||
|
|
||||||
|
// set the face model fst using the bot number
|
||||||
|
// there is no need to change the body model - we're using the default
|
||||||
|
Avatar.faceModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newFaceFilePrefix + ".fst";
|
||||||
|
Avatar.skeletonModelURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/" + newBodyFilePrefix + ".fst";
|
||||||
|
Avatar.billboardURL = "https://s3-us-west-1.amazonaws.com/highfidelity-public/meshes/billboards/bot" + botNumber + ".png";
|
||||||
|
|
||||||
|
Agent.isAvatar = true;
|
||||||
|
Agent.isListeningToAudioStream = true;
|
||||||
|
|
||||||
|
// change the avatar's position to the random one
|
||||||
|
Avatar.position = { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) };;
|
||||||
|
printVector("New bot, position = ", Avatar.position);
|
||||||
|
|
||||||
|
var allBlendShapes = [];
|
||||||
|
var targetBlendCoefficient = [];
|
||||||
|
var currentBlendCoefficient = [];
|
||||||
|
|
||||||
|
function addBlendShape(s) {
|
||||||
|
allBlendShapes[allBlendShapes.length] = s;
|
||||||
|
}
|
||||||
|
|
||||||
|
//It is imperative that the following blendshapes are all present and are in the correct order
|
||||||
|
addBlendShape("EyeBlink_L");
|
||||||
|
addBlendShape("EyeBlink_R");
|
||||||
|
addBlendShape("EyeSquint_L");
|
||||||
|
addBlendShape("EyeSquint_R");
|
||||||
|
addBlendShape("EyeDown_L");
|
||||||
|
addBlendShape("EyeDown_R");
|
||||||
|
addBlendShape("EyeIn_L");
|
||||||
|
addBlendShape("EyeIn_R");
|
||||||
|
addBlendShape("EyeOpen_L");
|
||||||
|
addBlendShape("EyeOpen_R");
|
||||||
|
addBlendShape("EyeOut_L");
|
||||||
|
addBlendShape("EyeOut_R");
|
||||||
|
addBlendShape("EyeUp_L");
|
||||||
|
addBlendShape("EyeUp_R");
|
||||||
|
addBlendShape("BrowsD_L");
|
||||||
|
addBlendShape("BrowsD_R");
|
||||||
|
addBlendShape("BrowsU_C");
|
||||||
|
addBlendShape("BrowsU_L");
|
||||||
|
addBlendShape("BrowsU_R");
|
||||||
|
addBlendShape("JawFwd");
|
||||||
|
addBlendShape("JawLeft");
|
||||||
|
addBlendShape("JawOpen");
|
||||||
|
addBlendShape("JawChew");
|
||||||
|
addBlendShape("JawRight");
|
||||||
|
addBlendShape("MouthLeft");
|
||||||
|
addBlendShape("MouthRight");
|
||||||
|
addBlendShape("MouthFrown_L");
|
||||||
|
addBlendShape("MouthFrown_R");
|
||||||
|
addBlendShape("MouthSmile_L");
|
||||||
|
addBlendShape("MouthSmile_R");
|
||||||
|
addBlendShape("MouthDimple_L");
|
||||||
|
addBlendShape("MouthDimple_R");
|
||||||
|
addBlendShape("LipsStretch_L");
|
||||||
|
addBlendShape("LipsStretch_R");
|
||||||
|
addBlendShape("LipsUpperClose");
|
||||||
|
addBlendShape("LipsLowerClose");
|
||||||
|
addBlendShape("LipsUpperUp");
|
||||||
|
addBlendShape("LipsLowerDown");
|
||||||
|
addBlendShape("LipsUpperOpen");
|
||||||
|
addBlendShape("LipsLowerOpen");
|
||||||
|
addBlendShape("LipsFunnel");
|
||||||
|
addBlendShape("LipsPucker");
|
||||||
|
addBlendShape("ChinLowerRaise");
|
||||||
|
addBlendShape("ChinUpperRaise");
|
||||||
|
addBlendShape("Sneer");
|
||||||
|
addBlendShape("Puff");
|
||||||
|
addBlendShape("CheekSquint_L");
|
||||||
|
addBlendShape("CheekSquint_R");
|
||||||
|
|
||||||
|
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||||
|
targetBlendCoefficient[i] = 0;
|
||||||
|
currentBlendCoefficient[i] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
function setRandomExpression() {
|
||||||
|
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||||
|
targetBlendCoefficient[i] = Math.random();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var expressionChangeSpeed = 0.1;
|
||||||
|
|
||||||
|
function updateBlendShapes(deltaTime) {
|
||||||
|
|
||||||
|
for (var i = 0; i < allBlendShapes.length; i++) {
|
||||||
|
currentBlendCoefficient[i] += (targetBlendCoefficient[i] - currentBlendCoefficient[i]) * expressionChangeSpeed;
|
||||||
|
Avatar.setBlendshape(allBlendShapes[i], currentBlendCoefficient[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function update(deltaTime) {
|
||||||
|
timePassed += deltaTime;
|
||||||
|
if (timePassed > updateSpeed) {
|
||||||
|
timePassed = 0;
|
||||||
|
setRandomExpression();
|
||||||
|
}
|
||||||
|
updateBlendShapes(deltaTime);
|
||||||
|
}
|
||||||
|
|
||||||
|
Script.update.connect(update);
|
|
@ -1168,7 +1168,6 @@ function handeMenuEvent(menuItem){
|
||||||
angles.z = array[7].value;
|
angles.z = array[7].value;
|
||||||
selectedModelProperties.modelRotation = Quat.fromVec3Degrees(angles);
|
selectedModelProperties.modelRotation = Quat.fromVec3Degrees(angles);
|
||||||
selectedModelProperties.radius = array[8].value / 2;
|
selectedModelProperties.radius = array[8].value / 2;
|
||||||
print(selectedModelProperties.radius);
|
|
||||||
|
|
||||||
Models.editModel(selectedModelID, selectedModelProperties);
|
Models.editModel(selectedModelID, selectedModelProperties);
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,6 @@ var lastVoxelScale = 0;
|
||||||
var dragStart = { x: 0, y: 0 };
|
var dragStart = { x: 0, y: 0 };
|
||||||
var wheelPixelsMoved = 0;
|
var wheelPixelsMoved = 0;
|
||||||
|
|
||||||
|
|
||||||
var mouseX = 0;
|
var mouseX = 0;
|
||||||
var mouseY = 0;
|
var mouseY = 0;
|
||||||
|
|
||||||
|
@ -168,7 +167,16 @@ var voxelPreview = Overlays.addOverlay("cube", {
|
||||||
lineWidth: 4
|
lineWidth: 4
|
||||||
});
|
});
|
||||||
|
|
||||||
var linePreviewTop = Overlays.addOverlay("line3d", {
|
var linePreviewTop = [];
|
||||||
|
var linePreviewBottom = [];
|
||||||
|
var linePreviewLeft = [];
|
||||||
|
var linePreviewRight = [];
|
||||||
|
|
||||||
|
// Currend cursor index
|
||||||
|
var currentCursor = 0;
|
||||||
|
|
||||||
|
function addLineOverlay() {
|
||||||
|
return Overlays.addOverlay("line3d", {
|
||||||
position: { x: 0, y: 0, z: 0},
|
position: { x: 0, y: 0, z: 0},
|
||||||
end: { x: 0, y: 0, z: 0},
|
end: { x: 0, y: 0, z: 0},
|
||||||
color: { red: 255, green: 255, blue: 255},
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
@ -176,34 +184,24 @@ var linePreviewTop = Overlays.addOverlay("line3d", {
|
||||||
visible: false,
|
visible: false,
|
||||||
lineWidth: previewLineWidth
|
lineWidth: previewLineWidth
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
//Cursor line previews for up to three cursors
|
||||||
|
linePreviewTop[0] = addLineOverlay();
|
||||||
|
linePreviewTop[1] = addLineOverlay();
|
||||||
|
linePreviewTop[2] = addLineOverlay();
|
||||||
|
|
||||||
var linePreviewBottom = Overlays.addOverlay("line3d", {
|
linePreviewBottom[0] = addLineOverlay();
|
||||||
position: { x: 0, y: 0, z: 0},
|
linePreviewBottom[1] = addLineOverlay();
|
||||||
end: { x: 0, y: 0, z: 0},
|
linePreviewBottom[2] = addLineOverlay();
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1,
|
linePreviewLeft[0] = addLineOverlay();
|
||||||
visible: false,
|
linePreviewLeft[1] = addLineOverlay();
|
||||||
lineWidth: previewLineWidth
|
linePreviewLeft[2] = addLineOverlay();
|
||||||
});
|
|
||||||
|
linePreviewRight[0] = addLineOverlay();
|
||||||
var linePreviewLeft = Overlays.addOverlay("line3d", {
|
linePreviewRight[1] = addLineOverlay();
|
||||||
position: { x: 0, y: 0, z: 0},
|
linePreviewRight[2] = addLineOverlay();
|
||||||
end: { x: 0, y: 0, z: 0},
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1,
|
|
||||||
visible: false,
|
|
||||||
lineWidth: previewLineWidth
|
|
||||||
});
|
|
||||||
|
|
||||||
var linePreviewRight = Overlays.addOverlay("line3d", {
|
|
||||||
position: { x: 0, y: 0, z: 0},
|
|
||||||
end: { x: 0, y: 0, z: 0},
|
|
||||||
color: { red: 255, green: 255, blue: 255},
|
|
||||||
alpha: 1,
|
|
||||||
visible: false,
|
|
||||||
lineWidth: previewLineWidth
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
// these will be used below
|
// these will be used below
|
||||||
var scaleSelectorWidth = 144;
|
var scaleSelectorWidth = 144;
|
||||||
|
@ -809,21 +807,21 @@ function showPreviewLines() {
|
||||||
var pasteVoxel = getNewPasteVoxel(pickRay);
|
var pasteVoxel = getNewPasteVoxel(pickRay);
|
||||||
|
|
||||||
// X axis
|
// X axis
|
||||||
Overlays.editOverlay(linePreviewBottom, {
|
Overlays.editOverlay(linePreviewBottom[currentCursor], {
|
||||||
position: pasteVoxel.origin,
|
position: pasteVoxel.origin,
|
||||||
end: {x: pasteVoxel.origin.x + pasteVoxel.voxelSize, y: pasteVoxel.origin.y, z: pasteVoxel.origin.z },
|
end: {x: pasteVoxel.origin.x + pasteVoxel.voxelSize, y: pasteVoxel.origin.y, z: pasteVoxel.origin.z },
|
||||||
visible: true
|
visible: true
|
||||||
});
|
});
|
||||||
|
|
||||||
// Y axis
|
// Y axis
|
||||||
Overlays.editOverlay(linePreviewRight, {
|
Overlays.editOverlay(linePreviewRight[currentCursor], {
|
||||||
position: pasteVoxel.origin,
|
position: pasteVoxel.origin,
|
||||||
end: {x: pasteVoxel.origin.x, y: pasteVoxel.origin.y + pasteVoxel.voxelSize, z: pasteVoxel.origin.z },
|
end: {x: pasteVoxel.origin.x, y: pasteVoxel.origin.y + pasteVoxel.voxelSize, z: pasteVoxel.origin.z },
|
||||||
visible: true
|
visible: true
|
||||||
});
|
});
|
||||||
|
|
||||||
// Z axis
|
// Z axis
|
||||||
Overlays.editOverlay(linePreviewTop, {
|
Overlays.editOverlay(linePreviewTop[currentCursor], {
|
||||||
position: pasteVoxel.origin,
|
position: pasteVoxel.origin,
|
||||||
end: {x: pasteVoxel.origin.x, y: pasteVoxel.origin.y, z: pasteVoxel.origin.z - pasteVoxel.voxelSize },
|
end: {x: pasteVoxel.origin.x, y: pasteVoxel.origin.y, z: pasteVoxel.origin.z - pasteVoxel.voxelSize },
|
||||||
visible: true
|
visible: true
|
||||||
|
@ -837,10 +835,10 @@ function showPreviewLines() {
|
||||||
if (intersection.intersects) {
|
if (intersection.intersects) {
|
||||||
resultVoxel = calculateVoxelFromIntersection(intersection,"");
|
resultVoxel = calculateVoxelFromIntersection(intersection,"");
|
||||||
Overlays.editOverlay(voxelPreview, { visible: false });
|
Overlays.editOverlay(voxelPreview, { visible: false });
|
||||||
Overlays.editOverlay(linePreviewTop, { position: resultVoxel.topLeft, end: resultVoxel.topRight, visible: true });
|
Overlays.editOverlay(linePreviewTop[currentCursor], { position: resultVoxel.topLeft, end: resultVoxel.topRight, visible: true });
|
||||||
Overlays.editOverlay(linePreviewBottom, { position: resultVoxel.bottomLeft, end: resultVoxel.bottomRight, visible: true });
|
Overlays.editOverlay(linePreviewBottom[currentCursor], { position: resultVoxel.bottomLeft, end: resultVoxel.bottomRight, visible: true });
|
||||||
Overlays.editOverlay(linePreviewLeft, { position: resultVoxel.topLeft, end: resultVoxel.bottomLeft, visible: true });
|
Overlays.editOverlay(linePreviewLeft[currentCursor], { position: resultVoxel.topLeft, end: resultVoxel.bottomLeft, visible: true });
|
||||||
Overlays.editOverlay(linePreviewRight, { position: resultVoxel.topRight, end: resultVoxel.bottomRight, visible: true });
|
Overlays.editOverlay(linePreviewRight[currentCursor], { position: resultVoxel.topRight, end: resultVoxel.bottomRight, visible: true });
|
||||||
colors[0] = {red: intersection.voxel.red, green: intersection.voxel.green , blue: intersection.voxel.blue };
|
colors[0] = {red: intersection.voxel.red, green: intersection.voxel.green , blue: intersection.voxel.blue };
|
||||||
|
|
||||||
if (copyScale) {
|
if (copyScale) {
|
||||||
|
@ -849,10 +847,10 @@ function showPreviewLines() {
|
||||||
moveTools();
|
moveTools();
|
||||||
} else {
|
} else {
|
||||||
Overlays.editOverlay(voxelPreview, { visible: false });
|
Overlays.editOverlay(voxelPreview, { visible: false });
|
||||||
Overlays.editOverlay(linePreviewTop, { visible: false });
|
Overlays.editOverlay(linePreviewTop[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewBottom, { visible: false });
|
Overlays.editOverlay(linePreviewBottom[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewLeft, { visible: false });
|
Overlays.editOverlay(linePreviewLeft[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewRight, { visible: false });
|
Overlays.editOverlay(linePreviewRight[currentCursor], { visible: false });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -862,20 +860,20 @@ function showPreviewGuides() {
|
||||||
showPreviewVoxel();
|
showPreviewVoxel();
|
||||||
|
|
||||||
// make sure alternative is hidden
|
// make sure alternative is hidden
|
||||||
Overlays.editOverlay(linePreviewTop, { visible: false });
|
Overlays.editOverlay(linePreviewTop[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewBottom, { visible: false });
|
Overlays.editOverlay(linePreviewBottom[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewLeft, { visible: false });
|
Overlays.editOverlay(linePreviewLeft[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewRight, { visible: false });
|
Overlays.editOverlay(linePreviewRight[currentCursor], { visible: false });
|
||||||
} else {
|
} else {
|
||||||
showPreviewLines();
|
showPreviewLines();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// make sure all previews are off
|
// make sure all previews are off
|
||||||
Overlays.editOverlay(voxelPreview, { visible: false });
|
Overlays.editOverlay(voxelPreview, { visible: false });
|
||||||
Overlays.editOverlay(linePreviewTop, { visible: false });
|
Overlays.editOverlay(linePreviewTop[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewBottom, { visible: false });
|
Overlays.editOverlay(linePreviewBottom[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewLeft, { visible: false });
|
Overlays.editOverlay(linePreviewLeft[currentCursor], { visible: false });
|
||||||
Overlays.editOverlay(linePreviewRight, { visible: false });
|
Overlays.editOverlay(linePreviewRight[currentCursor], { visible: false });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -968,6 +966,14 @@ function mousePressEvent(event) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (event.deviceID == 1500) { // Left Hydra Controller
|
||||||
|
currentCursor = 0;
|
||||||
|
} else if (event.deviceID == 1501) { // Right Hydra Controller
|
||||||
|
currentCursor = 1;
|
||||||
|
} else {
|
||||||
|
currentCursor = 2;
|
||||||
|
}
|
||||||
|
|
||||||
var clickedOnSomething = false;
|
var clickedOnSomething = false;
|
||||||
var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y});
|
var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y});
|
||||||
|
|
||||||
|
@ -1220,6 +1226,7 @@ function menuItemEvent(menuItem) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function mouseMoveEvent(event) {
|
function mouseMoveEvent(event) {
|
||||||
|
|
||||||
if (!editToolsOn) {
|
if (!editToolsOn) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1227,6 +1234,14 @@ function mouseMoveEvent(event) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (event.deviceID == 1500) { // Left Hydra Controller
|
||||||
|
currentCursor = 0;
|
||||||
|
} else if (event.deviceID == 1501) { // Right Hydra Controller
|
||||||
|
currentCursor = 1;
|
||||||
|
} else {
|
||||||
|
currentCursor = 2;
|
||||||
|
}
|
||||||
|
|
||||||
// Move Import Preview
|
// Move Import Preview
|
||||||
if (isImporting) {
|
if (isImporting) {
|
||||||
var pickRay = Camera.computePickRay(event.x, event.y);
|
var pickRay = Camera.computePickRay(event.x, event.y);
|
||||||
|
@ -1475,10 +1490,12 @@ Controller.captureKeyEvents({ text: "-" });
|
||||||
|
|
||||||
function scriptEnding() {
|
function scriptEnding() {
|
||||||
Overlays.deleteOverlay(voxelPreview);
|
Overlays.deleteOverlay(voxelPreview);
|
||||||
Overlays.deleteOverlay(linePreviewTop);
|
for (var i = 0; i < linePreviewTop.length; i++) {
|
||||||
Overlays.deleteOverlay(linePreviewBottom);
|
Overlays.deleteOverlay(linePreviewTop[i]);
|
||||||
Overlays.deleteOverlay(linePreviewLeft);
|
Overlays.deleteOverlay(linePreviewBottom[i]);
|
||||||
Overlays.deleteOverlay(linePreviewRight);
|
Overlays.deleteOverlay(linePreviewLeft[i]);
|
||||||
|
Overlays.deleteOverlay(linePreviewRight[i]);
|
||||||
|
}
|
||||||
for (s = 0; s < numColors; s++) {
|
for (s = 0; s < numColors; s++) {
|
||||||
Overlays.deleteOverlay(swatches[s]);
|
Overlays.deleteOverlay(swatches[s]);
|
||||||
}
|
}
|
||||||
|
|
23
examples/laserPointer.js
Normal file
23
examples/laserPointer.js
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
//
|
||||||
|
// laserPointer.js
|
||||||
|
// examples
|
||||||
|
//
|
||||||
|
// Created by Clément Brisset on 7/18/14.
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
var LEFT = 0;
|
||||||
|
var RIGHT = 1;
|
||||||
|
var LEFT_HAND_FLAG = 1;
|
||||||
|
var RIGHT_HAND_FLAG = 2;
|
||||||
|
|
||||||
|
function update() {
|
||||||
|
var state = ((Controller.getTriggerValue(LEFT) > 0.9) ? LEFT_HAND_FLAG : 0) +
|
||||||
|
((Controller.getTriggerValue(RIGHT) > 0.9) ? RIGHT_HAND_FLAG : 0);
|
||||||
|
MyAvatar.setHandState(state);
|
||||||
|
}
|
||||||
|
|
||||||
|
Script.update.connect(update);
|
|
@ -592,7 +592,7 @@ void Application::paintGL() {
|
||||||
|
|
||||||
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
||||||
_myCamera.setTightness(0.0f); // In first person, camera follows (untweaked) head exactly without delay
|
_myCamera.setTightness(0.0f); // In first person, camera follows (untweaked) head exactly without delay
|
||||||
_myCamera.setTargetPosition(_myAvatar->getHead()->calculateAverageEyePosition());
|
_myCamera.setTargetPosition(_myAvatar->getHead()->getFilteredEyePosition());
|
||||||
_myCamera.setTargetRotation(_myAvatar->getHead()->getCameraOrientation());
|
_myCamera.setTargetRotation(_myAvatar->getHead()->getCameraOrientation());
|
||||||
|
|
||||||
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
||||||
|
@ -611,10 +611,10 @@ void Application::paintGL() {
|
||||||
if (OculusManager::isConnected()) {
|
if (OculusManager::isConnected()) {
|
||||||
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||||
_myCamera.setTargetRotation(_myAvatar->getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
|
_myCamera.setTargetRotation(_myAvatar->getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
|
||||||
_myCamera.setTargetPosition(_myAvatar->getHead()->calculateAverageEyePosition() + glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0));
|
_myCamera.setTargetPosition(_myAvatar->getHead()->getEyePosition() + glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0));
|
||||||
} else {
|
} else {
|
||||||
_myCamera.setTightness(0.0f);
|
_myCamera.setTightness(0.0f);
|
||||||
glm::vec3 eyePosition = _myAvatar->getHead()->calculateAverageEyePosition();
|
glm::vec3 eyePosition = _myAvatar->getHead()->getFilteredEyePosition();
|
||||||
float headHeight = eyePosition.y - _myAvatar->getPosition().y;
|
float headHeight = eyePosition.y - _myAvatar->getPosition().y;
|
||||||
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
|
||||||
_myCamera.setTargetPosition(_myAvatar->getPosition() + glm::vec3(0, headHeight + (_raiseMirror * _myAvatar->getScale()), 0));
|
_myCamera.setTargetPosition(_myAvatar->getPosition() + glm::vec3(0, headHeight + (_raiseMirror * _myAvatar->getScale()), 0));
|
||||||
|
@ -1911,17 +1911,9 @@ void Application::updateMyAvatarLookAtPosition() {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// I am not looking at anyone else, so just look forward
|
// I am not looking at anyone else, so just look forward
|
||||||
lookAtSpot = _myAvatar->getHead()->calculateAverageEyePosition() +
|
lookAtSpot = _myAvatar->getHead()->getEyePosition() +
|
||||||
(_myAvatar->getHead()->getFinalOrientationInWorldFrame() * glm::vec3(0.f, 0.f, -TREE_SCALE));
|
(_myAvatar->getHead()->getFinalOrientationInWorldFrame() * glm::vec3(0.f, 0.f, -TREE_SCALE));
|
||||||
}
|
}
|
||||||
// TODO: Add saccade to mouse pointer when stable, IF not looking at someone (since we know we are looking at it)
|
|
||||||
/*
|
|
||||||
const float FIXED_MIN_EYE_DISTANCE = 0.3f;
|
|
||||||
float minEyeDistance = FIXED_MIN_EYE_DISTANCE + (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON ? 0.0f :
|
|
||||||
glm::distance(_mouseRayOrigin, _myAvatar->getHead()->calculateAverageEyePosition()));
|
|
||||||
lookAtSpot = _mouseRayOrigin + _mouseRayDirection * qMax(minEyeDistance, distance);
|
|
||||||
*/
|
|
||||||
|
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
// Deflect the eyes a bit to match the detected Gaze from 3D camera if active
|
// Deflect the eyes a bit to match the detected Gaze from 3D camera if active
|
||||||
|
@ -1931,7 +1923,7 @@ void Application::updateMyAvatarLookAtPosition() {
|
||||||
float eyeYaw = tracker->getEstimatedEyeYaw();
|
float eyeYaw = tracker->getEstimatedEyeYaw();
|
||||||
const float GAZE_DEFLECTION_REDUCTION_DURING_EYE_CONTACT = 0.1f;
|
const float GAZE_DEFLECTION_REDUCTION_DURING_EYE_CONTACT = 0.1f;
|
||||||
// deflect using Faceshift gaze data
|
// deflect using Faceshift gaze data
|
||||||
glm::vec3 origin = _myAvatar->getHead()->calculateAverageEyePosition();
|
glm::vec3 origin = _myAvatar->getHead()->getEyePosition();
|
||||||
float pitchSign = (_myCamera.getMode() == CAMERA_MODE_MIRROR) ? -1.0f : 1.0f;
|
float pitchSign = (_myCamera.getMode() == CAMERA_MODE_MIRROR) ? -1.0f : 1.0f;
|
||||||
float deflection = Menu::getInstance()->getFaceshiftEyeDeflection();
|
float deflection = Menu::getInstance()->getFaceshiftEyeDeflection();
|
||||||
if (isLookingAtSomeone) {
|
if (isLookingAtSomeone) {
|
||||||
|
@ -2935,7 +2927,7 @@ void Application::renderRearViewMirror(const QRect& region, bool billboard) {
|
||||||
_mirrorCamera.setTargetPosition(glm::vec3());
|
_mirrorCamera.setTargetPosition(glm::vec3());
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
_mirrorCamera.setTargetPosition(_myAvatar->getHead()->calculateAverageEyePosition());
|
_mirrorCamera.setTargetPosition(_myAvatar->getHead()->getEyePosition());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_mirrorCamera.setAspectRatio((float)region.width() / region.height());
|
_mirrorCamera.setAspectRatio((float)region.width() / region.height());
|
||||||
|
@ -2964,7 +2956,7 @@ void Application::renderRearViewMirror(const QRect& region, bool billboard) {
|
||||||
_myAvatar->getSkeletonModel().getNeckPosition(neckPosition);
|
_myAvatar->getSkeletonModel().getNeckPosition(neckPosition);
|
||||||
|
|
||||||
// get the eye position relative to the body
|
// get the eye position relative to the body
|
||||||
glm::vec3 eyePosition = _myAvatar->getHead()->calculateAverageEyePosition();
|
glm::vec3 eyePosition = _myAvatar->getHead()->getEyePosition();
|
||||||
float eyeHeight = eyePosition.y - _myAvatar->getPosition().y;
|
float eyeHeight = eyePosition.y - _myAvatar->getPosition().y;
|
||||||
|
|
||||||
// set the translation of the face relative to the neck position
|
// set the translation of the face relative to the neck position
|
||||||
|
@ -3355,7 +3347,7 @@ void Application::nodeKilled(SharedNodePointer node) {
|
||||||
_modelEditSender.nodeKilled(node);
|
_modelEditSender.nodeKilled(node);
|
||||||
|
|
||||||
if (node->getType() == NodeType::AudioMixer) {
|
if (node->getType() == NodeType::AudioMixer) {
|
||||||
QMetaObject::invokeMethod(&_audio, "resetIncomingMixedAudioSequenceNumberStats");
|
QMetaObject::invokeMethod(&_audio, "audioMixerKilled");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (node->getType() == NodeType::VoxelServer) {
|
if (node->getType() == NodeType::VoxelServer) {
|
||||||
|
@ -3641,7 +3633,7 @@ ScriptEngine* Application::loadScript(const QString& scriptName, bool loadScript
|
||||||
scriptEngine->getModelsScriptingInterface()->setModelTree(_models.getTree());
|
scriptEngine->getModelsScriptingInterface()->setModelTree(_models.getTree());
|
||||||
|
|
||||||
// model has some custom types
|
// model has some custom types
|
||||||
Model::registerMetaTypes(scriptEngine->getEngine());
|
Model::registerMetaTypes(scriptEngine);
|
||||||
|
|
||||||
// hook our avatar object into this script engine
|
// hook our avatar object into this script engine
|
||||||
scriptEngine->setAvatarData(_myAvatar, "MyAvatar"); // leave it as a MyAvatar class to expose thrust features
|
scriptEngine->setAvatarData(_myAvatar, "MyAvatar"); // leave it as a MyAvatar class to expose thrust features
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
#include "Audio.h"
|
#include "Audio.h"
|
||||||
#include "Menu.h"
|
#include "Menu.h"
|
||||||
#include "Util.h"
|
#include "Util.h"
|
||||||
|
#include "AudioRingBuffer.h"
|
||||||
|
|
||||||
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
||||||
|
|
||||||
|
@ -125,14 +126,16 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
_scopeInput(0),
|
_scopeInput(0),
|
||||||
_scopeOutputLeft(0),
|
_scopeOutputLeft(0),
|
||||||
_scopeOutputRight(0),
|
_scopeOutputRight(0),
|
||||||
|
_statsEnabled(false),
|
||||||
_starveCount(0),
|
_starveCount(0),
|
||||||
_consecutiveNotMixedCount(0),
|
_consecutiveNotMixedCount(0),
|
||||||
_outgoingAvatarAudioSequenceNumber(0),
|
_outgoingAvatarAudioSequenceNumber(0),
|
||||||
_incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH),
|
_incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH),
|
||||||
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
||||||
_inputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AUDIO_CALLBACK_MSECS * CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
|
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_outputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_outputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_audioOutputBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
||||||
{
|
{
|
||||||
// clear the array of locally injected samples
|
// clear the array of locally injected samples
|
||||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
|
@ -148,15 +151,34 @@ void Audio::init(QGLWidget *parent) {
|
||||||
|
|
||||||
void Audio::reset() {
|
void Audio::reset() {
|
||||||
_ringBuffer.reset();
|
_ringBuffer.reset();
|
||||||
|
|
||||||
|
// we don't want to reset seq numbers when space-bar reset occurs.
|
||||||
|
//_outgoingAvatarAudioSequenceNumber = 0;
|
||||||
|
|
||||||
|
resetStats();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::resetStats() {
|
||||||
_starveCount = 0;
|
_starveCount = 0;
|
||||||
_consecutiveNotMixedCount = 0;
|
_consecutiveNotMixedCount = 0;
|
||||||
|
|
||||||
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
_audioMixerAvatarStreamAudioStats = AudioStreamStats();
|
||||||
_audioMixerInjectedStreamAudioStatsMap.clear();
|
_audioMixerInjectedStreamAudioStatsMap.clear();
|
||||||
|
|
||||||
//_outgoingAvatarAudioSequenceNumber = 0;
|
|
||||||
_incomingMixedAudioSequenceNumberStats.reset();
|
_incomingMixedAudioSequenceNumberStats.reset();
|
||||||
|
|
||||||
|
_interframeTimeGapStats.reset();
|
||||||
|
|
||||||
|
_audioInputMsecsReadStats.reset();
|
||||||
|
_inputRingBufferMsecsAvailableStats.reset();
|
||||||
|
|
||||||
|
_outputRingBufferFramesAvailableStats.reset();
|
||||||
|
_audioOutputMsecsUnplayedStats.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::audioMixerKilled() {
|
||||||
|
_outgoingAvatarAudioSequenceNumber = 0;
|
||||||
|
resetStats();
|
||||||
}
|
}
|
||||||
|
|
||||||
QAudioDeviceInfo getNamedAudioDeviceForMode(QAudio::Mode mode, const QString& deviceName) {
|
QAudioDeviceInfo getNamedAudioDeviceForMode(QAudio::Mode mode, const QString& deviceName) {
|
||||||
|
@ -499,8 +521,11 @@ void Audio::handleAudioInput() {
|
||||||
}
|
}
|
||||||
|
|
||||||
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
||||||
|
|
||||||
|
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
|
_audioInputMsecsReadStats.update(audioInputMsecsRead);
|
||||||
|
|
||||||
while (_inputRingBuffer.samplesAvailable() > inputSamplesRequired) {
|
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
||||||
|
|
||||||
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
|
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
|
||||||
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
||||||
|
@ -811,11 +836,12 @@ AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
||||||
|
|
||||||
void Audio::sendDownstreamAudioStatsPacket() {
|
void Audio::sendDownstreamAudioStatsPacket() {
|
||||||
|
|
||||||
_inputRingBufferFramesAvailableStats.update(getInputRingBufferFramesAvailable());
|
// since this function is called every second, we'll sample some of our stats here
|
||||||
|
|
||||||
|
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
|
||||||
|
|
||||||
// since this function is called every second, we'll sample the number of audio frames available here.
|
|
||||||
_outputRingBufferFramesAvailableStats.update(_ringBuffer.framesAvailable());
|
_outputRingBufferFramesAvailableStats.update(_ringBuffer.framesAvailable());
|
||||||
_audioOutputBufferFramesAvailableStats.update(getOutputRingBufferFramesAvailable());
|
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
|
||||||
|
|
||||||
// push the current seq number stats into history, which moves the history window forward 1s
|
// push the current seq number stats into history, which moves the history window forward 1s
|
||||||
// (since that's how often pushStatsToHistory() is called)
|
// (since that's how often pushStatsToHistory() is called)
|
||||||
|
@ -1286,6 +1312,10 @@ void Audio::toggleScopePause() {
|
||||||
_scopeEnabledPause = !_scopeEnabledPause;
|
_scopeEnabledPause = !_scopeEnabledPause;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Audio::toggleStats() {
|
||||||
|
_statsEnabled = !_statsEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
void Audio::selectAudioScopeFiveFrames() {
|
void Audio::selectAudioScopeFiveFrames() {
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiveFrames)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiveFrames)) {
|
||||||
reallocateScope(5);
|
reallocateScope(5);
|
||||||
|
@ -1365,6 +1395,174 @@ void Audio::addBufferToScope(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Audio::renderStats(const float* color, int width, int height) {
|
||||||
|
if (!_statsEnabled) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int LINES_WHEN_CENTERED = 30;
|
||||||
|
const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * LINES_WHEN_CENTERED;
|
||||||
|
|
||||||
|
int lines = _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 23;
|
||||||
|
int statsHeight = STATS_HEIGHT_PER_LINE * lines;
|
||||||
|
|
||||||
|
|
||||||
|
static const float backgroundColor[4] = { 0.2f, 0.2f, 0.2f, 0.6f };
|
||||||
|
|
||||||
|
int x = std::max((width - (int)STATS_WIDTH) / 2, 0);
|
||||||
|
int y = std::max((height - CENTERED_BACKGROUND_HEIGHT) / 2, 0);
|
||||||
|
int w = STATS_WIDTH;
|
||||||
|
int h = statsHeight;
|
||||||
|
renderBackground(backgroundColor, x, y, w, h);
|
||||||
|
|
||||||
|
|
||||||
|
int horizontalOffset = x + 5;
|
||||||
|
int verticalOffset = y;
|
||||||
|
|
||||||
|
float scale = 0.10f;
|
||||||
|
float rotation = 0.0f;
|
||||||
|
int font = 2;
|
||||||
|
|
||||||
|
|
||||||
|
char latencyStatString[512];
|
||||||
|
|
||||||
|
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||||
|
|
||||||
|
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
|
||||||
|
|
||||||
|
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
|
||||||
|
if (!audioMixerNodePointer.isNull()) {
|
||||||
|
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
|
||||||
|
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
|
||||||
|
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
|
||||||
|
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._ringBufferFramesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
|
||||||
|
outputRingBufferLatency = _outputRingBufferFramesAvailableStats.getWindowAverage() * BUFFER_SEND_INTERVAL_MSECS;
|
||||||
|
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
|
||||||
|
}
|
||||||
|
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
|
||||||
|
|
||||||
|
sprintf(latencyStatString, " Audio input buffer: %7.2fms - avg msecs of samples read to the input ring buffer in last 10s", audioInputBufferLatency);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
sprintf(latencyStatString, " Input ring buffer: %7.2fms - avg msecs of samples in input ring buffer in last 10s", inputRingBufferLatency);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
sprintf(latencyStatString, " Network to mixer: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
sprintf(latencyStatString, " AudioMixer ring buffer: %7.2fms - avg msecs of samples in audio mixer's ring buffer in last 10s", mixerRingBufferLatency);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
sprintf(latencyStatString, " Network to client: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
sprintf(latencyStatString, " Output ring buffer: %7.2fms - avg msecs of samples in output ring buffer in last 10s", outputRingBufferLatency);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
sprintf(latencyStatString, " Audio output buffer: %7.2fms - avg msecs of samples in audio output buffer in last 10s", audioOutputBufferLatency);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
sprintf(latencyStatString, " TOTAL: %7.2fms\n", totalLatency);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||||
|
|
||||||
|
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||||
|
|
||||||
|
|
||||||
|
char downstreamLabelString[] = "Downstream mixed audio stats:";
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||||
|
|
||||||
|
renderAudioStreamStats(getDownstreamAudioStreamStats(), horizontalOffset, verticalOffset, scale, rotation, font, color, true);
|
||||||
|
|
||||||
|
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||||
|
|
||||||
|
char upstreamMicLabelString[] = "Upstream mic audio stats:";
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamMicLabelString, color);
|
||||||
|
|
||||||
|
renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||||
|
|
||||||
|
|
||||||
|
foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) {
|
||||||
|
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||||
|
|
||||||
|
char upstreamInjectedLabelString[512];
|
||||||
|
sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s",
|
||||||
|
injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data());
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color);
|
||||||
|
|
||||||
|
renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
|
||||||
|
float scale, float rotation, int font, const float* color, bool isDownstreamStats) {
|
||||||
|
|
||||||
|
char stringBuffer[512];
|
||||||
|
|
||||||
|
sprintf(stringBuffer, " Packet loss | overall: %5.2f%% (%d lost), last_30s: %5.2f%% (%d lost)",
|
||||||
|
streamStats._packetStreamStats.getLostRate() * 100.0f,
|
||||||
|
streamStats._packetStreamStats._numLost,
|
||||||
|
streamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||||
|
streamStats._packetStreamWindowStats._numLost);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||||
|
|
||||||
|
if (isDownstreamStats) {
|
||||||
|
|
||||||
|
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||||
|
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
|
||||||
|
streamStats._ringBufferDesiredJitterBufferFrames,
|
||||||
|
streamStats._ringBufferFramesAvailableAverage,
|
||||||
|
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
|
||||||
|
streamStats._ringBufferFramesAvailable,
|
||||||
|
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
|
||||||
|
} else {
|
||||||
|
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
|
||||||
|
streamStats._ringBufferDesiredJitterBufferFrames,
|
||||||
|
streamStats._ringBufferFramesAvailableAverage,
|
||||||
|
streamStats._ringBufferFramesAvailable);
|
||||||
|
}
|
||||||
|
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||||
|
|
||||||
|
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
|
||||||
|
streamStats._ringBufferStarveCount,
|
||||||
|
streamStats._ringBufferConsecutiveNotMixedCount,
|
||||||
|
streamStats._ringBufferSilentFramesDropped,
|
||||||
|
streamStats._ringBufferOverflowCount);
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||||
|
|
||||||
|
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
|
||||||
|
formatUsecTime(streamStats._timeGapMin).toLatin1().data(),
|
||||||
|
formatUsecTime(streamStats._timeGapMax).toLatin1().data(),
|
||||||
|
formatUsecTime(streamStats._timeGapAverage).toLatin1().data());
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||||
|
|
||||||
|
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
|
||||||
|
formatUsecTime(streamStats._timeGapWindowMin).toLatin1().data(),
|
||||||
|
formatUsecTime(streamStats._timeGapWindowMax).toLatin1().data(),
|
||||||
|
formatUsecTime(streamStats._timeGapWindowAverage).toLatin1().data());
|
||||||
|
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Audio::renderScope(int width, int height) {
|
void Audio::renderScope(int width, int height) {
|
||||||
|
|
||||||
if (!_scopeEnabled)
|
if (!_scopeEnabled)
|
||||||
|
@ -1622,15 +1820,14 @@ int Audio::calculateNumberOfFrameSamples(int numBytes) const {
|
||||||
return frameSamples;
|
return frameSamples;
|
||||||
}
|
}
|
||||||
|
|
||||||
int Audio::getOutputRingBufferFramesAvailable() const {
|
float Audio::getAudioOutputMsecsUnplayed() const {
|
||||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
|
int bytesAudioOutputUnplayed = _audioOutput->bufferSize() - _audioOutput->bytesFree();
|
||||||
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
|
float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_outputFormat.bytesForDuration(USECS_PER_MSEC);
|
||||||
|
return msecsAudioOutputUnplayed;
|
||||||
return (_audioOutput->bufferSize() - _audioOutput->bytesFree()) * networkOutputToOutputRatio
|
|
||||||
/ (sizeof(int16_t) * _ringBuffer.getNumFrameSamples());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int Audio::getInputRingBufferFramesAvailable() const {
|
float Audio::getInputRingBufferMsecsAvailable() const {
|
||||||
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
|
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t);
|
||||||
return _inputRingBuffer.samplesAvailable() / inputToNetworkInputRatio / _inputRingBuffer.getNumFrameSamples();
|
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
|
return msecsInInputRingBuffer;
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,6 +71,7 @@ public:
|
||||||
|
|
||||||
void renderToolBox(int x, int y, bool boxed);
|
void renderToolBox(int x, int y, bool boxed);
|
||||||
void renderScope(int width, int height);
|
void renderScope(int width, int height);
|
||||||
|
void renderStats(const float* color, int width, int height);
|
||||||
|
|
||||||
int getNetworkSampleRate() { return SAMPLE_RATE; }
|
int getNetworkSampleRate() { return SAMPLE_RATE; }
|
||||||
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
||||||
|
@ -78,12 +79,12 @@ public:
|
||||||
bool getProcessSpatialAudio() const { return _processSpatialAudio; }
|
bool getProcessSpatialAudio() const { return _processSpatialAudio; }
|
||||||
|
|
||||||
const SequenceNumberStats& getIncomingMixedAudioSequenceNumberStats() const { return _incomingMixedAudioSequenceNumberStats; }
|
const SequenceNumberStats& getIncomingMixedAudioSequenceNumberStats() const { return _incomingMixedAudioSequenceNumberStats; }
|
||||||
|
|
||||||
|
float getInputRingBufferMsecsAvailable() const;
|
||||||
|
float getInputRingBufferAverageMsecsAvailable() const { return (float)_inputRingBufferMsecsAvailableStats.getWindowAverage(); }
|
||||||
|
|
||||||
int getInputRingBufferFramesAvailable() const;
|
float getAudioOutputMsecsUnplayed() const;
|
||||||
int getInputRingBufferAverageFramesAvailable() const { return (int)_inputRingBufferFramesAvailableStats.getWindowAverage(); }
|
float getAudioOutputAverageMsecsUnplayed() const { return (float)_audioOutputMsecsUnplayedStats.getWindowAverage(); }
|
||||||
|
|
||||||
int getOutputRingBufferFramesAvailable() const;
|
|
||||||
int getOutputRingBufferAverageFramesAvailable() const { return (int)_audioOutputBufferFramesAvailableStats.getWindowAverage(); }
|
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
void start();
|
void start();
|
||||||
|
@ -93,12 +94,14 @@ public slots:
|
||||||
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||||
void handleAudioInput();
|
void handleAudioInput();
|
||||||
void reset();
|
void reset();
|
||||||
void resetIncomingMixedAudioSequenceNumberStats() { _incomingMixedAudioSequenceNumberStats.reset(); }
|
void resetStats();
|
||||||
|
void audioMixerKilled();
|
||||||
void toggleMute();
|
void toggleMute();
|
||||||
void toggleAudioNoiseReduction();
|
void toggleAudioNoiseReduction();
|
||||||
void toggleToneInjection();
|
void toggleToneInjection();
|
||||||
void toggleScope();
|
void toggleScope();
|
||||||
void toggleScopePause();
|
void toggleScopePause();
|
||||||
|
void toggleStats();
|
||||||
void toggleAudioSpatialProcessing();
|
void toggleAudioSpatialProcessing();
|
||||||
void toggleStereoInput();
|
void toggleStereoInput();
|
||||||
void selectAudioScopeFiveFrames();
|
void selectAudioScopeFiveFrames();
|
||||||
|
@ -245,6 +248,10 @@ private:
|
||||||
void renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols);
|
void renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols);
|
||||||
void renderLineStrip(const float* color, int x, int y, int n, int offset, const QByteArray* byteArray);
|
void renderLineStrip(const float* color, int x, int y, int n, int offset, const QByteArray* byteArray);
|
||||||
|
|
||||||
|
// audio stats methods for rendering
|
||||||
|
void renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
|
||||||
|
float scale, float rotation, int font, const float* color, bool isDownstreamStats = false);
|
||||||
|
|
||||||
// Audio scope data
|
// Audio scope data
|
||||||
static const unsigned int NETWORK_SAMPLES_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
static const unsigned int NETWORK_SAMPLES_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||||
static const unsigned int DEFAULT_FRAMES_PER_SCOPE = 5;
|
static const unsigned int DEFAULT_FRAMES_PER_SCOPE = 5;
|
||||||
|
@ -261,6 +268,13 @@ private:
|
||||||
QByteArray* _scopeInput;
|
QByteArray* _scopeInput;
|
||||||
QByteArray* _scopeOutputLeft;
|
QByteArray* _scopeOutputLeft;
|
||||||
QByteArray* _scopeOutputRight;
|
QByteArray* _scopeOutputRight;
|
||||||
|
#ifdef _WIN32
|
||||||
|
static const unsigned int STATS_WIDTH = 1500;
|
||||||
|
#else
|
||||||
|
static const unsigned int STATS_WIDTH = 650;
|
||||||
|
#endif
|
||||||
|
static const unsigned int STATS_HEIGHT_PER_LINE = 20;
|
||||||
|
bool _statsEnabled;
|
||||||
|
|
||||||
int _starveCount;
|
int _starveCount;
|
||||||
int _consecutiveNotMixedCount;
|
int _consecutiveNotMixedCount;
|
||||||
|
@ -273,10 +287,11 @@ private:
|
||||||
|
|
||||||
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
||||||
|
|
||||||
MovingMinMaxAvg<int> _inputRingBufferFramesAvailableStats;
|
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
|
||||||
|
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
|
||||||
|
|
||||||
MovingMinMaxAvg<int> _outputRingBufferFramesAvailableStats;
|
MovingMinMaxAvg<int> _outputRingBufferFramesAvailableStats;
|
||||||
MovingMinMaxAvg<int> _audioOutputBufferFramesAvailableStats;
|
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -593,6 +593,12 @@ Menu::Menu() :
|
||||||
Qt::CTRL | Qt::SHIFT | Qt::Key_U,
|
Qt::CTRL | Qt::SHIFT | Qt::Key_U,
|
||||||
false);
|
false);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioStats,
|
||||||
|
0,
|
||||||
|
false,
|
||||||
|
appInstance->getAudio(),
|
||||||
|
SLOT(toggleStats()));
|
||||||
|
|
||||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, true);
|
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, true);
|
||||||
|
|
||||||
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
||||||
|
|
|
@ -315,6 +315,7 @@ namespace MenuOption {
|
||||||
const QString AudioScopeFrames = "Display Frames";
|
const QString AudioScopeFrames = "Display Frames";
|
||||||
const QString AudioScopePause = "Pause Audio Scope";
|
const QString AudioScopePause = "Pause Audio Scope";
|
||||||
const QString AudioScopeTwentyFrames = "Twenty";
|
const QString AudioScopeTwentyFrames = "Twenty";
|
||||||
|
const QString AudioStats = "Audio Stats";
|
||||||
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
|
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
|
||||||
const QString AudioSpatialProcessing = "Audio Spatial Processing";
|
const QString AudioSpatialProcessing = "Audio Spatial Processing";
|
||||||
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
|
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
|
||||||
|
|
|
@ -218,6 +218,52 @@ static TextRenderer* textRenderer(TextRendererType type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
|
void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
|
||||||
|
|
||||||
|
if (glm::distance(Application::getInstance()->getAvatar()->getPosition(),
|
||||||
|
_position) < 10.0f) {
|
||||||
|
// render pointing lasers
|
||||||
|
glm::vec3 laserColor = glm::vec3(1.0f, 0.0f, 1.0f);
|
||||||
|
float laserLength = 50.0f;
|
||||||
|
if (_handState == HAND_STATE_LEFT_POINTING ||
|
||||||
|
_handState == HAND_STATE_BOTH_POINTING) {
|
||||||
|
int leftIndex = _skeletonModel.getLeftHandJointIndex();
|
||||||
|
glm::vec3 leftPosition;
|
||||||
|
glm::quat leftRotation;
|
||||||
|
_skeletonModel.getJointPositionInWorldFrame(leftIndex, leftPosition);
|
||||||
|
_skeletonModel.getJointRotationInWorldFrame(leftIndex, leftRotation);
|
||||||
|
glPushMatrix(); {
|
||||||
|
glTranslatef(leftPosition.x, leftPosition.y, leftPosition.z);
|
||||||
|
float angle = glm::degrees(glm::angle(leftRotation));
|
||||||
|
glm::vec3 axis = glm::axis(leftRotation);
|
||||||
|
glRotatef(angle, axis.x, axis.y, axis.z);
|
||||||
|
glBegin(GL_LINES);
|
||||||
|
glColor3f(laserColor.x, laserColor.y, laserColor.z);
|
||||||
|
glVertex3f(0.0f, 0.0f, 0.0f);
|
||||||
|
glVertex3f(0.0f, laserLength, 0.0f);
|
||||||
|
glEnd();
|
||||||
|
} glPopMatrix();
|
||||||
|
}
|
||||||
|
if (_handState == HAND_STATE_RIGHT_POINTING ||
|
||||||
|
_handState == HAND_STATE_BOTH_POINTING) {
|
||||||
|
int rightIndex = _skeletonModel.getRightHandJointIndex();
|
||||||
|
glm::vec3 rightPosition;
|
||||||
|
glm::quat rightRotation;
|
||||||
|
_skeletonModel.getJointPositionInWorldFrame(rightIndex, rightPosition);
|
||||||
|
_skeletonModel.getJointRotationInWorldFrame(rightIndex, rightRotation);
|
||||||
|
glPushMatrix(); {
|
||||||
|
glTranslatef(rightPosition.x, rightPosition.y, rightPosition.z);
|
||||||
|
float angle = glm::degrees(glm::angle(rightRotation));
|
||||||
|
glm::vec3 axis = glm::axis(rightRotation);
|
||||||
|
glRotatef(angle, axis.x, axis.y, axis.z);
|
||||||
|
glBegin(GL_LINES);
|
||||||
|
glColor3f(laserColor.x, laserColor.y, laserColor.z);
|
||||||
|
glVertex3f(0.0f, 0.0f, 0.0f);
|
||||||
|
glVertex3f(0.0f, laserLength, 0.0f);
|
||||||
|
glEnd();
|
||||||
|
} glPopMatrix();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// simple frustum check
|
// simple frustum check
|
||||||
float boundingRadius = getBillboardSize();
|
float boundingRadius = getBillboardSize();
|
||||||
ViewFrustum* frustum = (renderMode == Avatar::SHADOW_RENDER_MODE) ?
|
ViewFrustum* frustum = (renderMode == Avatar::SHADOW_RENDER_MODE) ?
|
||||||
|
|
|
@ -125,7 +125,7 @@ void Hand::render(bool isMine, Model::RenderMode renderMode) {
|
||||||
|
|
||||||
glEnable(GL_DEPTH_TEST);
|
glEnable(GL_DEPTH_TEST);
|
||||||
glEnable(GL_RESCALE_NORMAL);
|
glEnable(GL_RESCALE_NORMAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Hand::renderHandTargets(bool isMine) {
|
void Hand::renderHandTargets(bool isMine) {
|
||||||
glPushMatrix();
|
glPushMatrix();
|
||||||
|
|
|
@ -159,6 +159,10 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_eyePosition = calculateAverageEyePosition();
|
_eyePosition = calculateAverageEyePosition();
|
||||||
|
|
||||||
|
float velocityFilter = glm::clamp(1.0f - glm::length(_filteredEyePosition - _eyePosition), 0.0f, 1.0f);
|
||||||
|
_filteredEyePosition = velocityFilter * _filteredEyePosition + (1.0f - velocityFilter) * _eyePosition;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Head::relaxLean(float deltaTime) {
|
void Head::relaxLean(float deltaTime) {
|
||||||
|
|
|
@ -88,8 +88,7 @@ public:
|
||||||
|
|
||||||
const bool getReturnToCenter() const { return _returnHeadToCenter; } // Do you want head to try to return to center (depends on interface detected)
|
const bool getReturnToCenter() const { return _returnHeadToCenter; } // Do you want head to try to return to center (depends on interface detected)
|
||||||
float getAverageLoudness() const { return _averageLoudness; }
|
float getAverageLoudness() const { return _averageLoudness; }
|
||||||
glm::vec3 calculateAverageEyePosition() const { return _leftEyePosition + (_rightEyePosition - _leftEyePosition ) * ONE_HALF; }
|
glm::vec3 getFilteredEyePosition() const { return _filteredEyePosition; }
|
||||||
|
|
||||||
/// \return the point about which scaling occurs.
|
/// \return the point about which scaling occurs.
|
||||||
glm::vec3 getScalePivot() const;
|
glm::vec3 getScalePivot() const;
|
||||||
|
|
||||||
|
@ -110,6 +109,8 @@ public:
|
||||||
void addLeanDeltas(float sideways, float forward);
|
void addLeanDeltas(float sideways, float forward);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
glm::vec3 calculateAverageEyePosition() const { return _leftEyePosition + (_rightEyePosition - _leftEyePosition ) * ONE_HALF; }
|
||||||
|
|
||||||
// disallow copies of the Head, copy of owning Avatar is disallowed too
|
// disallow copies of the Head, copy of owning Avatar is disallowed too
|
||||||
Head(const Head&);
|
Head(const Head&);
|
||||||
Head& operator= (const Head&);
|
Head& operator= (const Head&);
|
||||||
|
@ -120,6 +121,8 @@ private:
|
||||||
glm::vec3 _leftEyePosition;
|
glm::vec3 _leftEyePosition;
|
||||||
glm::vec3 _rightEyePosition;
|
glm::vec3 _rightEyePosition;
|
||||||
glm::vec3 _eyePosition;
|
glm::vec3 _eyePosition;
|
||||||
|
glm::vec3 _filteredEyePosition; // velocity filtered world space eye position
|
||||||
|
|
||||||
float _scale;
|
float _scale;
|
||||||
float _lastLoudness;
|
float _lastLoudness;
|
||||||
float _audioAttack;
|
float _audioAttack;
|
||||||
|
|
|
@ -137,9 +137,6 @@ void MyAvatar::simulate(float deltaTime) {
|
||||||
}
|
}
|
||||||
_skeletonModel.setShowTrueJointTransforms(! Menu::getInstance()->isOptionChecked(MenuOption::CollideAsRagdoll));
|
_skeletonModel.setShowTrueJointTransforms(! Menu::getInstance()->isOptionChecked(MenuOption::CollideAsRagdoll));
|
||||||
|
|
||||||
// no extra movement of the hand here any more ...
|
|
||||||
_handState = HAND_STATE_NULL;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
PerformanceTimer perfTimer("transform");
|
PerformanceTimer perfTimer("transform");
|
||||||
updateOrientation(deltaTime);
|
updateOrientation(deltaTime);
|
||||||
|
@ -915,7 +912,7 @@ const float RENDER_HEAD_CUTOFF_DISTANCE = 0.50f;
|
||||||
bool MyAvatar::shouldRenderHead(const glm::vec3& cameraPosition, RenderMode renderMode) const {
|
bool MyAvatar::shouldRenderHead(const glm::vec3& cameraPosition, RenderMode renderMode) const {
|
||||||
const Head* head = getHead();
|
const Head* head = getHead();
|
||||||
return (renderMode != NORMAL_RENDER_MODE) ||
|
return (renderMode != NORMAL_RENDER_MODE) ||
|
||||||
(glm::length(cameraPosition - head->calculateAverageEyePosition()) > RENDER_HEAD_CUTOFF_DISTANCE * _scale);
|
(glm::length(cameraPosition - head->getEyePosition()) > RENDER_HEAD_CUTOFF_DISTANCE * _scale);
|
||||||
}
|
}
|
||||||
|
|
||||||
float MyAvatar::computeDistanceToFloor(const glm::vec3& startPoint) {
|
float MyAvatar::computeDistanceToFloor(const glm::vec3& startPoint) {
|
||||||
|
|
|
@ -21,9 +21,9 @@
|
||||||
enum AvatarHandState
|
enum AvatarHandState
|
||||||
{
|
{
|
||||||
HAND_STATE_NULL = 0,
|
HAND_STATE_NULL = 0,
|
||||||
HAND_STATE_OPEN,
|
HAND_STATE_LEFT_POINTING,
|
||||||
HAND_STATE_GRASPING,
|
HAND_STATE_RIGHT_POINTING,
|
||||||
HAND_STATE_POINTING,
|
HAND_STATE_BOTH_POINTING,
|
||||||
NUM_HAND_STATES
|
NUM_HAND_STATES
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
// Generic client side Octree renderer class.
|
// Generic client side Octree renderer class.
|
||||||
class ModelTreeRenderer : public OctreeRenderer, public ModelItemFBXService {
|
class ModelTreeRenderer : public OctreeRenderer, public ModelItemFBXService {
|
||||||
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
ModelTreeRenderer();
|
ModelTreeRenderer();
|
||||||
virtual ~ModelTreeRenderer();
|
virtual ~ModelTreeRenderer();
|
||||||
|
@ -56,7 +57,7 @@ public:
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void clearModelsCache();
|
void clearModelsCache();
|
||||||
Model* getModel(const ModelItem& modelItem);
|
Q_INVOKABLE Model* getModel(const ModelItem& modelItem);
|
||||||
QMap<uint32_t, Model*> _knownModelsItemModels;
|
QMap<uint32_t, Model*> _knownModelsItemModels;
|
||||||
QMap<uint32_t, Model*> _unknownModelsItemModels;
|
QMap<uint32_t, Model*> _unknownModelsItemModels;
|
||||||
};
|
};
|
||||||
|
|
|
@ -178,7 +178,7 @@ void ApplicationOverlay::computeOculusPickRay(float x, float y, glm::vec3& direc
|
||||||
float dist = sqrt(x * x + y * y);
|
float dist = sqrt(x * x + y * y);
|
||||||
float z = -sqrt(1.0f - dist * dist);
|
float z = -sqrt(1.0f - dist * dist);
|
||||||
|
|
||||||
glm::vec3 relativePosition = myAvatar->getHead()->calculateAverageEyePosition() +
|
glm::vec3 relativePosition = myAvatar->getHead()->getEyePosition() +
|
||||||
glm::normalize(myAvatar->getOrientation() * glm::vec3(x, y, z));
|
glm::normalize(myAvatar->getOrientation() * glm::vec3(x, y, z));
|
||||||
|
|
||||||
//Rotate the UI pick ray by the avatar orientation
|
//Rotate the UI pick ray by the avatar orientation
|
||||||
|
@ -274,7 +274,7 @@ QPoint ApplicationOverlay::getPalmClickLocation(const PalmData *palm) const {
|
||||||
MyAvatar* myAvatar = application->getAvatar();
|
MyAvatar* myAvatar = application->getAvatar();
|
||||||
|
|
||||||
glm::vec3 tip = myAvatar->getLaserPointerTipPosition(palm);
|
glm::vec3 tip = myAvatar->getLaserPointerTipPosition(palm);
|
||||||
glm::vec3 eyePos = myAvatar->getHead()->calculateAverageEyePosition();
|
glm::vec3 eyePos = myAvatar->getHead()->getEyePosition();
|
||||||
glm::quat orientation = glm::inverse(myAvatar->getOrientation());
|
glm::quat orientation = glm::inverse(myAvatar->getOrientation());
|
||||||
glm::vec3 dir = orientation * glm::normalize(application->getCamera()->getPosition() - tip); //direction of ray goes towards camera
|
glm::vec3 dir = orientation * glm::normalize(application->getCamera()->getPosition() - tip); //direction of ray goes towards camera
|
||||||
glm::vec3 tipPos = orientation * (tip - eyePos);
|
glm::vec3 tipPos = orientation * (tip - eyePos);
|
||||||
|
@ -331,7 +331,7 @@ bool ApplicationOverlay::calculateRayUICollisionPoint(const glm::vec3& position,
|
||||||
|
|
||||||
glm::quat orientation = myAvatar->getOrientation();
|
glm::quat orientation = myAvatar->getOrientation();
|
||||||
|
|
||||||
glm::vec3 relativePosition = orientation * (position - myAvatar->getHead()->calculateAverageEyePosition());
|
glm::vec3 relativePosition = orientation * (position - myAvatar->getHead()->getEyePosition());
|
||||||
glm::vec3 relativeDirection = orientation * direction;
|
glm::vec3 relativeDirection = orientation * direction;
|
||||||
|
|
||||||
float t;
|
float t;
|
||||||
|
@ -375,7 +375,7 @@ void ApplicationOverlay::displayOverlayTextureOculus(Camera& whichCamera) {
|
||||||
|
|
||||||
glPushMatrix();
|
glPushMatrix();
|
||||||
const glm::quat& orientation = myAvatar->getOrientation();
|
const glm::quat& orientation = myAvatar->getOrientation();
|
||||||
const glm::vec3& position = myAvatar->getHead()->calculateAverageEyePosition();
|
const glm::vec3& position = myAvatar->getHead()->getEyePosition();
|
||||||
|
|
||||||
glm::mat4 rotation = glm::toMat4(orientation);
|
glm::mat4 rotation = glm::toMat4(orientation);
|
||||||
|
|
||||||
|
@ -1022,6 +1022,8 @@ void ApplicationOverlay::renderAudioMeter() {
|
||||||
|
|
||||||
audio->renderScope(glWidget->width(), glWidget->height());
|
audio->renderScope(glWidget->width(), glWidget->height());
|
||||||
|
|
||||||
|
audio->renderStats(WHITE_TEXT, glWidget->width(), glWidget->height());
|
||||||
|
|
||||||
glBegin(GL_QUADS);
|
glBegin(GL_QUADS);
|
||||||
if (isClipping) {
|
if (isClipping) {
|
||||||
glColor3f(1, 0, 0);
|
glColor3f(1, 0, 0);
|
||||||
|
@ -1210,7 +1212,7 @@ void ApplicationOverlay::renderTexturedHemisphere() {
|
||||||
Application* application = Application::getInstance();
|
Application* application = Application::getInstance();
|
||||||
MyAvatar* myAvatar = application->getAvatar();
|
MyAvatar* myAvatar = application->getAvatar();
|
||||||
const glm::quat& orientation = myAvatar->getOrientation();
|
const glm::quat& orientation = myAvatar->getOrientation();
|
||||||
const glm::vec3& position = myAvatar->getHead()->calculateAverageEyePosition();
|
const glm::vec3& position = myAvatar->getHead()->getEyePosition();
|
||||||
|
|
||||||
glm::mat4 rotation = glm::toMat4(orientation);
|
glm::mat4 rotation = glm::toMat4(orientation);
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
#include "Menu.h"
|
#include "Menu.h"
|
||||||
#include "ScriptsModel.h"
|
#include "ScriptsModel.h"
|
||||||
|
|
||||||
|
|
||||||
RunningScriptsWidget::RunningScriptsWidget(QWidget* parent) :
|
RunningScriptsWidget::RunningScriptsWidget(QWidget* parent) :
|
||||||
FramelessDialog(parent, 0, POSITION_LEFT),
|
FramelessDialog(parent, 0, POSITION_LEFT),
|
||||||
ui(new Ui::RunningScriptsWidget),
|
ui(new Ui::RunningScriptsWidget),
|
||||||
|
@ -104,13 +103,21 @@ void RunningScriptsWidget::setRunningScripts(const QStringList& list) {
|
||||||
delete widget->widget();
|
delete widget->widget();
|
||||||
delete widget;
|
delete widget;
|
||||||
}
|
}
|
||||||
|
QHash<QString, int> hash;
|
||||||
const int CLOSE_ICON_HEIGHT = 12;
|
const int CLOSE_ICON_HEIGHT = 12;
|
||||||
for (int i = 0; i < list.size(); i++) {
|
for (int i = 0; i < list.size(); i++) {
|
||||||
|
if (!hash.contains(list.at(i))) {
|
||||||
|
hash.insert(list.at(i), 1);
|
||||||
|
}
|
||||||
QWidget* row = new QWidget(ui->scrollAreaWidgetContents);
|
QWidget* row = new QWidget(ui->scrollAreaWidgetContents);
|
||||||
row->setLayout(new QHBoxLayout(row));
|
row->setLayout(new QHBoxLayout(row));
|
||||||
|
|
||||||
QUrl url = QUrl(list.at(i));
|
QUrl url = QUrl(list.at(i));
|
||||||
QLabel* name = new QLabel(url.fileName(), row);
|
QLabel* name = new QLabel(url.fileName(), row);
|
||||||
|
if (hash.find(list.at(i)).value() != 1) {
|
||||||
|
name->setText(name->text() + "(" + QString::number(hash.find(list.at(i)).value()) + ")");
|
||||||
|
}
|
||||||
|
++hash[list.at(i)];
|
||||||
QPushButton* closeButton = new QPushButton(row);
|
QPushButton* closeButton = new QPushButton(row);
|
||||||
closeButton->setFlat(true);
|
closeButton->setFlat(true);
|
||||||
closeButton->setIcon(
|
closeButton->setIcon(
|
||||||
|
|
|
@ -278,9 +278,8 @@ void Stats::display(
|
||||||
|
|
||||||
|
|
||||||
Audio* audio = Application::getInstance()->getAudio();
|
Audio* audio = Application::getInstance()->getAudio();
|
||||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamAudioStatsMap = audio->getAudioMixerInjectedStreamAudioStatsMap();
|
|
||||||
|
|
||||||
lines = _expanded ? 13 + (audioMixerInjectedStreamAudioStatsMap.size() + 2) * 3 : 3;
|
lines = _expanded ? 4 : 3;
|
||||||
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
||||||
horizontalOffset += 5;
|
horizontalOffset += 5;
|
||||||
|
|
||||||
|
@ -313,128 +312,6 @@ void Stats::display(
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
||||||
|
|
||||||
char inputAudioLabelString[] = "Input: avail_avg_10s/avail";
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, inputAudioLabelString, color);
|
|
||||||
|
|
||||||
char inputAudioStatsString[512];
|
|
||||||
sprintf(inputAudioStatsString, " %d/%d", audio->getInputRingBufferAverageFramesAvailable(),
|
|
||||||
audio->getInputRingBufferFramesAvailable());
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, inputAudioStatsString, color);
|
|
||||||
|
|
||||||
char audioMixerStatsLabelString[] = "AudioMixer stats:";
|
|
||||||
char streamStatsFormatLabelString[] = "lost%/lost_30s%";
|
|
||||||
char streamStatsFormatLabelString2[] = "desired/avail_avg_10s/avail";
|
|
||||||
char streamStatsFormatLabelString3[] = "gaps: min/max/avg, starv/ovfl";
|
|
||||||
char streamStatsFormatLabelString4[] = "gaps_30s: (same), notmix/sdrop";
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, audioMixerStatsLabelString, color);
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString, color);
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString2, color);
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString3, color);
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, streamStatsFormatLabelString4, color);
|
|
||||||
|
|
||||||
char downstreamLabelString[] = " Downstream:";
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
|
||||||
|
|
||||||
char downstreamAudioStatsString[512];
|
|
||||||
|
|
||||||
AudioStreamStats downstreamAudioStreamStats = audio->getDownstreamAudioStreamStats();
|
|
||||||
|
|
||||||
sprintf(downstreamAudioStatsString, " mix: %.2f%%/%.2f%%, %u/%u+%d/%u+%d", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
|
||||||
downstreamAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
|
||||||
downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames, downstreamAudioStreamStats._ringBufferFramesAvailableAverage,
|
|
||||||
audio->getOutputRingBufferAverageFramesAvailable(),
|
|
||||||
downstreamAudioStreamStats._ringBufferFramesAvailable, audio->getOutputRingBufferFramesAvailable());
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
|
||||||
|
|
||||||
sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(downstreamAudioStreamStats._timeGapMin).toLatin1().data(),
|
|
||||||
formatUsecTime(downstreamAudioStreamStats._timeGapMax).toLatin1().data(),
|
|
||||||
formatUsecTime(downstreamAudioStreamStats._timeGapAverage).toLatin1().data(),
|
|
||||||
downstreamAudioStreamStats._ringBufferStarveCount, downstreamAudioStreamStats._ringBufferOverflowCount);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
|
||||||
|
|
||||||
sprintf(downstreamAudioStatsString, " %s/%s/%s, %u/?", formatUsecTime(downstreamAudioStreamStats._timeGapWindowMin).toLatin1().data(),
|
|
||||||
formatUsecTime(downstreamAudioStreamStats._timeGapWindowMax).toLatin1().data(),
|
|
||||||
formatUsecTime(downstreamAudioStreamStats._timeGapWindowAverage).toLatin1().data(),
|
|
||||||
downstreamAudioStreamStats._ringBufferConsecutiveNotMixedCount);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
|
||||||
|
|
||||||
|
|
||||||
char upstreamLabelString[] = " Upstream:";
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamLabelString, color);
|
|
||||||
|
|
||||||
char upstreamAudioStatsString[512];
|
|
||||||
|
|
||||||
const AudioStreamStats& audioMixerAvatarAudioStreamStats = audio->getAudioMixerAvatarStreamAudioStats();
|
|
||||||
|
|
||||||
sprintf(upstreamAudioStatsString, " mic: %.2f%%/%.2f%%, %u/%u/%u", audioMixerAvatarAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
|
||||||
audioMixerAvatarAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
|
||||||
audioMixerAvatarAudioStreamStats._ringBufferDesiredJitterBufferFrames, audioMixerAvatarAudioStreamStats._ringBufferFramesAvailableAverage,
|
|
||||||
audioMixerAvatarAudioStreamStats._ringBufferFramesAvailable);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
|
||||||
|
|
||||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMin).toLatin1().data(),
|
|
||||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapMax).toLatin1().data(),
|
|
||||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapAverage).toLatin1().data(),
|
|
||||||
audioMixerAvatarAudioStreamStats._ringBufferStarveCount, audioMixerAvatarAudioStreamStats._ringBufferOverflowCount);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
|
||||||
|
|
||||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMin).toLatin1().data(),
|
|
||||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowMax).toLatin1().data(),
|
|
||||||
formatUsecTime(audioMixerAvatarAudioStreamStats._timeGapWindowAverage).toLatin1().data(),
|
|
||||||
audioMixerAvatarAudioStreamStats._ringBufferConsecutiveNotMixedCount, audioMixerAvatarAudioStreamStats._ringBufferSilentFramesDropped);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
|
||||||
|
|
||||||
foreach(const AudioStreamStats& injectedStreamAudioStats, audioMixerInjectedStreamAudioStatsMap) {
|
|
||||||
|
|
||||||
sprintf(upstreamAudioStatsString, " inj: %.2f%%/%.2f%%, %u/%u/%u", injectedStreamAudioStats._packetStreamStats.getLostRate()*100.0f,
|
|
||||||
injectedStreamAudioStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
|
||||||
injectedStreamAudioStats._ringBufferDesiredJitterBufferFrames, injectedStreamAudioStats._ringBufferFramesAvailableAverage,
|
|
||||||
injectedStreamAudioStats._ringBufferFramesAvailable);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
|
||||||
|
|
||||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapMin).toLatin1().data(),
|
|
||||||
formatUsecTime(injectedStreamAudioStats._timeGapMax).toLatin1().data(),
|
|
||||||
formatUsecTime(injectedStreamAudioStats._timeGapAverage).toLatin1().data(),
|
|
||||||
injectedStreamAudioStats._ringBufferStarveCount, injectedStreamAudioStats._ringBufferOverflowCount);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
|
||||||
|
|
||||||
sprintf(upstreamAudioStatsString, " %s/%s/%s, %u/%u", formatUsecTime(injectedStreamAudioStats._timeGapWindowMin).toLatin1().data(),
|
|
||||||
formatUsecTime(injectedStreamAudioStats._timeGapWindowMax).toLatin1().data(),
|
|
||||||
formatUsecTime(injectedStreamAudioStats._timeGapWindowAverage).toLatin1().data(),
|
|
||||||
injectedStreamAudioStats._ringBufferConsecutiveNotMixedCount, injectedStreamAudioStats._ringBufferSilentFramesDropped);
|
|
||||||
|
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
|
||||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamAudioStatsString, color);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
verticalOffset = 0;
|
verticalOffset = 0;
|
||||||
|
|
|
@ -30,7 +30,7 @@ InjectedAudioRingBuffer::InjectedAudioRingBuffer(const QUuid& streamIdentifier,
|
||||||
|
|
||||||
const uchar MAX_INJECTOR_VOLUME = 255;
|
const uchar MAX_INJECTOR_VOLUME = 255;
|
||||||
|
|
||||||
int InjectedAudioRingBuffer::parseData(const QByteArray& packet, int packetsSkipped) {
|
int InjectedAudioRingBuffer::parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) {
|
||||||
frameReceivedUpdateTimingStats();
|
frameReceivedUpdateTimingStats();
|
||||||
|
|
||||||
// setup a data stream to read from this packet
|
// setup a data stream to read from this packet
|
||||||
|
|
|
@ -20,7 +20,7 @@ class InjectedAudioRingBuffer : public PositionalAudioRingBuffer {
|
||||||
public:
|
public:
|
||||||
InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false);
|
InjectedAudioRingBuffer(const QUuid& streamIdentifier = QUuid(), bool dynamicJitterBuffer = false);
|
||||||
|
|
||||||
int parseData(const QByteArray& packet, int packetsSkipped = 0);
|
int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped);
|
||||||
|
|
||||||
const QUuid& getStreamIdentifier() const { return _streamIdentifier; }
|
const QUuid& getStreamIdentifier() const { return _streamIdentifier; }
|
||||||
float getRadius() const { return _radius; }
|
float getRadius() const { return _radius; }
|
||||||
|
|
|
@ -45,7 +45,7 @@ public:
|
||||||
|
|
||||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||||
|
|
||||||
virtual int parseData(const QByteArray& packet, int packetsSkipped = 0) = 0;
|
virtual int parseDataAndHandleDroppedPackets(const QByteArray& packet, int packetsSkipped) = 0;
|
||||||
|
|
||||||
int parsePositionalData(const QByteArray& positionalByteArray);
|
int parsePositionalData(const QByteArray& positionalByteArray);
|
||||||
int parseListenModeData(const QByteArray& listenModeByteArray);
|
int parseListenModeData(const QByteArray& listenModeByteArray);
|
||||||
|
|
|
@ -43,6 +43,7 @@ AvatarData::AvatarData() :
|
||||||
_handState(0),
|
_handState(0),
|
||||||
_keyState(NO_KEY_DOWN),
|
_keyState(NO_KEY_DOWN),
|
||||||
_isChatCirclingEnabled(false),
|
_isChatCirclingEnabled(false),
|
||||||
|
_forceFaceshiftConnected(false),
|
||||||
_hasNewJointRotations(true),
|
_hasNewJointRotations(true),
|
||||||
_headData(NULL),
|
_headData(NULL),
|
||||||
_handData(NULL),
|
_handData(NULL),
|
||||||
|
@ -80,6 +81,9 @@ QByteArray AvatarData::toByteArray() {
|
||||||
// lazily allocate memory for HeadData in case we're not an Avatar instance
|
// lazily allocate memory for HeadData in case we're not an Avatar instance
|
||||||
if (!_headData) {
|
if (!_headData) {
|
||||||
_headData = new HeadData(this);
|
_headData = new HeadData(this);
|
||||||
|
if (_forceFaceshiftConnected) {
|
||||||
|
_headData->_isFaceshiftConnected = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
QByteArray avatarDataByteArray;
|
QByteArray avatarDataByteArray;
|
||||||
|
|
|
@ -185,8 +185,8 @@ public:
|
||||||
void setClampedTargetScale(float targetScale);
|
void setClampedTargetScale(float targetScale);
|
||||||
|
|
||||||
// Hand State
|
// Hand State
|
||||||
void setHandState(char s) { _handState = s; }
|
Q_INVOKABLE void setHandState(char s) { _handState = s; }
|
||||||
char getHandState() const { return _handState; }
|
Q_INVOKABLE char getHandState() const { return _handState; }
|
||||||
|
|
||||||
const QVector<JointData>& getJointData() const { return _jointData; }
|
const QVector<JointData>& getJointData() const { return _jointData; }
|
||||||
void setJointData(const QVector<JointData>& jointData) { _jointData = jointData; }
|
void setJointData(const QVector<JointData>& jointData) { _jointData = jointData; }
|
||||||
|
@ -206,6 +206,10 @@ public:
|
||||||
|
|
||||||
Q_INVOKABLE virtual QStringList getJointNames() const { return _jointNames; }
|
Q_INVOKABLE virtual QStringList getJointNames() const { return _jointNames; }
|
||||||
|
|
||||||
|
Q_INVOKABLE void setBlendshape(QString name, float val) { _headData->setBlendshape(name, val); }
|
||||||
|
|
||||||
|
void setForceFaceshiftConnected(bool connected) { _forceFaceshiftConnected = connected; }
|
||||||
|
|
||||||
// key state
|
// key state
|
||||||
void setKeyState(KeyState s) { _keyState = s; }
|
void setKeyState(KeyState s) { _keyState = s; }
|
||||||
KeyState keyState() const { return _keyState; }
|
KeyState keyState() const { return _keyState; }
|
||||||
|
@ -300,7 +304,7 @@ protected:
|
||||||
std::string _chatMessage;
|
std::string _chatMessage;
|
||||||
|
|
||||||
bool _isChatCirclingEnabled;
|
bool _isChatCirclingEnabled;
|
||||||
|
bool _forceFaceshiftConnected;
|
||||||
bool _hasNewJointRotations; // set in AvatarData, cleared in Avatar
|
bool _hasNewJointRotations; // set in AvatarData, cleared in Avatar
|
||||||
|
|
||||||
HeadData* _headData;
|
HeadData* _headData;
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
#include "AvatarData.h"
|
#include "AvatarData.h"
|
||||||
#include "HeadData.h"
|
#include "HeadData.h"
|
||||||
|
|
||||||
|
#include "../fbx/src/FBXReader.h"
|
||||||
|
|
||||||
HeadData::HeadData(AvatarData* owningAvatar) :
|
HeadData::HeadData(AvatarData* owningAvatar) :
|
||||||
_baseYaw(0.0f),
|
_baseYaw(0.0f),
|
||||||
_basePitch(0.0f),
|
_basePitch(0.0f),
|
||||||
|
@ -52,6 +54,26 @@ void HeadData::setOrientation(const glm::quat& orientation) {
|
||||||
_baseRoll = eulers.z;
|
_baseRoll = eulers.z;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void HeadData::setBlendshape(QString name, float val) {
|
||||||
|
static bool hasInitializedLookupMap = false;
|
||||||
|
static QMap<QString, int> blendshapeLookupMap;
|
||||||
|
//Lazily construct a lookup map from the blendshapes
|
||||||
|
if (!hasInitializedLookupMap) {
|
||||||
|
for (int i = 0; i < NUM_FACESHIFT_BLENDSHAPES; i++) {
|
||||||
|
blendshapeLookupMap[FACESHIFT_BLENDSHAPES[i]] = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Check to see if the named blendshape exists, and then set its value if it does
|
||||||
|
QMap<QString, int>::iterator it = blendshapeLookupMap.find(name);
|
||||||
|
if (it != blendshapeLookupMap.end()) {
|
||||||
|
if (_blendshapeCoefficients.size() <= it.value()) {
|
||||||
|
_blendshapeCoefficients.resize(it.value() + 1);
|
||||||
|
}
|
||||||
|
_blendshapeCoefficients[it.value()] = val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void HeadData::addYaw(float yaw) {
|
void HeadData::addYaw(float yaw) {
|
||||||
setBaseYaw(_baseYaw + yaw);
|
setBaseYaw(_baseYaw + yaw);
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,6 +54,7 @@ public:
|
||||||
float getAudioAverageLoudness() const { return _audioAverageLoudness; }
|
float getAudioAverageLoudness() const { return _audioAverageLoudness; }
|
||||||
void setAudioAverageLoudness(float audioAverageLoudness) { _audioAverageLoudness = audioAverageLoudness; }
|
void setAudioAverageLoudness(float audioAverageLoudness) { _audioAverageLoudness = audioAverageLoudness; }
|
||||||
|
|
||||||
|
void setBlendshape(QString name, float val);
|
||||||
const QVector<float>& getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
|
const QVector<float>& getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
|
||||||
|
|
||||||
float getPupilDilation() const { return _pupilDilation; }
|
float getPupilDilation() const { return _pupilDilation; }
|
||||||
|
|
|
@ -577,6 +577,8 @@ const char* FACESHIFT_BLENDSHAPES[] = {
|
||||||
""
|
""
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const int NUM_FACESHIFT_BLENDSHAPES = sizeof(FACESHIFT_BLENDSHAPES) / sizeof(char*);
|
||||||
|
|
||||||
const char* HUMANIK_JOINTS[] = {
|
const char* HUMANIK_JOINTS[] = {
|
||||||
"RightHand",
|
"RightHand",
|
||||||
"RightForeArm",
|
"RightForeArm",
|
||||||
|
|
|
@ -29,6 +29,8 @@ typedef QList<FBXNode> FBXNodeList;
|
||||||
|
|
||||||
/// The names of the blendshapes expected by Faceshift, terminated with an empty string.
|
/// The names of the blendshapes expected by Faceshift, terminated with an empty string.
|
||||||
extern const char* FACESHIFT_BLENDSHAPES[];
|
extern const char* FACESHIFT_BLENDSHAPES[];
|
||||||
|
/// The size of FACESHIFT_BLENDSHAPES
|
||||||
|
extern const int NUM_FACESHIFT_BLENDSHAPES;
|
||||||
|
|
||||||
/// The names of the joints in the Maya HumanIK rig, terminated with an empty string.
|
/// The names of the joints in the Maya HumanIK rig, terminated with an empty string.
|
||||||
extern const char* HUMANIK_JOINTS[];
|
extern const char* HUMANIK_JOINTS[];
|
||||||
|
|
|
@ -70,7 +70,7 @@ ModelItemProperties ModelsScriptingInterface::getModelProperties(ModelItemID mod
|
||||||
if (_modelTree) {
|
if (_modelTree) {
|
||||||
_modelTree->lockForRead();
|
_modelTree->lockForRead();
|
||||||
ModelItem* model = const_cast<ModelItem*>(_modelTree->findModelByID(identity.id, true));
|
ModelItem* model = const_cast<ModelItem*>(_modelTree->findModelByID(identity.id, true));
|
||||||
if (model) {
|
if (model && _modelTree->getGeometryForModel(*model)) {
|
||||||
model->setSittingPoints(_modelTree->getGeometryForModel(*model)->sittingPoints);
|
model->setSittingPoints(_modelTree->getGeometryForModel(*model)->sittingPoints);
|
||||||
results.copyFromModelItem(*model);
|
results.copyFromModelItem(*model);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -23,8 +23,8 @@ static const QString CLASS_NAME = "ArrayBuffer";
|
||||||
Q_DECLARE_METATYPE(QByteArray*)
|
Q_DECLARE_METATYPE(QByteArray*)
|
||||||
|
|
||||||
ArrayBufferClass::ArrayBufferClass(ScriptEngine* scriptEngine) :
|
ArrayBufferClass::ArrayBufferClass(ScriptEngine* scriptEngine) :
|
||||||
QObject(scriptEngine->getEngine()),
|
QObject(scriptEngine),
|
||||||
QScriptClass(scriptEngine->getEngine()),
|
QScriptClass(scriptEngine),
|
||||||
_scriptEngine(scriptEngine) {
|
_scriptEngine(scriptEngine) {
|
||||||
qScriptRegisterMetaType<QByteArray>(engine(), toScriptValue, fromScriptValue);
|
qScriptRegisterMetaType<QByteArray>(engine(), toScriptValue, fromScriptValue);
|
||||||
QScriptValue global = engine()->globalObject();
|
QScriptValue global = engine()->globalObject();
|
||||||
|
|
|
@ -14,8 +14,8 @@
|
||||||
Q_DECLARE_METATYPE(QByteArray*)
|
Q_DECLARE_METATYPE(QByteArray*)
|
||||||
|
|
||||||
ArrayBufferViewClass::ArrayBufferViewClass(ScriptEngine* scriptEngine) :
|
ArrayBufferViewClass::ArrayBufferViewClass(ScriptEngine* scriptEngine) :
|
||||||
QObject(scriptEngine->getEngine()),
|
QObject(scriptEngine),
|
||||||
QScriptClass(scriptEngine->getEngine()),
|
QScriptClass(scriptEngine),
|
||||||
_scriptEngine(scriptEngine) {
|
_scriptEngine(scriptEngine) {
|
||||||
// Save string handles for quick lookup
|
// Save string handles for quick lookup
|
||||||
_bufferName = engine()->toStringHandle(BUFFER_PROPERTY_NAME.toLatin1());
|
_bufferName = engine()->toStringHandle(BUFFER_PROPERTY_NAME.toLatin1());
|
||||||
|
|
|
@ -87,7 +87,6 @@ ScriptEngine::ScriptEngine(const QString& scriptContents, const QString& fileNam
|
||||||
_isFinished(false),
|
_isFinished(false),
|
||||||
_isRunning(false),
|
_isRunning(false),
|
||||||
_isInitialized(false),
|
_isInitialized(false),
|
||||||
_engine(),
|
|
||||||
_isAvatar(false),
|
_isAvatar(false),
|
||||||
_avatarIdentityTimer(NULL),
|
_avatarIdentityTimer(NULL),
|
||||||
_avatarBillboardTimer(NULL),
|
_avatarBillboardTimer(NULL),
|
||||||
|
@ -113,7 +112,6 @@ ScriptEngine::ScriptEngine(const QUrl& scriptURL,
|
||||||
_isFinished(false),
|
_isFinished(false),
|
||||||
_isRunning(false),
|
_isRunning(false),
|
||||||
_isInitialized(false),
|
_isInitialized(false),
|
||||||
_engine(),
|
|
||||||
_isAvatar(false),
|
_isAvatar(false),
|
||||||
_avatarIdentityTimer(NULL),
|
_avatarIdentityTimer(NULL),
|
||||||
_avatarBillboardTimer(NULL),
|
_avatarBillboardTimer(NULL),
|
||||||
|
@ -194,7 +192,7 @@ void ScriptEngine::setAvatarData(AvatarData* avatarData, const QString& objectNa
|
||||||
_avatarData = avatarData;
|
_avatarData = avatarData;
|
||||||
|
|
||||||
// remove the old Avatar property, if it exists
|
// remove the old Avatar property, if it exists
|
||||||
_engine.globalObject().setProperty(objectName, QScriptValue());
|
globalObject().setProperty(objectName, QScriptValue());
|
||||||
|
|
||||||
// give the script engine the new Avatar script property
|
// give the script engine the new Avatar script property
|
||||||
registerGlobalObject(objectName, _avatarData);
|
registerGlobalObject(objectName, _avatarData);
|
||||||
|
@ -202,7 +200,7 @@ void ScriptEngine::setAvatarData(AvatarData* avatarData, const QString& objectNa
|
||||||
|
|
||||||
void ScriptEngine::setAvatarHashMap(AvatarHashMap* avatarHashMap, const QString& objectName) {
|
void ScriptEngine::setAvatarHashMap(AvatarHashMap* avatarHashMap, const QString& objectName) {
|
||||||
// remove the old Avatar property, if it exists
|
// remove the old Avatar property, if it exists
|
||||||
_engine.globalObject().setProperty(objectName, QScriptValue());
|
globalObject().setProperty(objectName, QScriptValue());
|
||||||
|
|
||||||
// give the script engine the new avatar hash map
|
// give the script engine the new avatar hash map
|
||||||
registerGlobalObject(objectName, avatarHashMap);
|
registerGlobalObject(objectName, avatarHashMap);
|
||||||
|
@ -231,48 +229,48 @@ void ScriptEngine::init() {
|
||||||
_particlesScriptingInterface.init();
|
_particlesScriptingInterface.init();
|
||||||
|
|
||||||
// register various meta-types
|
// register various meta-types
|
||||||
registerMetaTypes(&_engine);
|
registerMetaTypes(this);
|
||||||
registerMIDIMetaTypes(&_engine);
|
registerMIDIMetaTypes(this);
|
||||||
registerVoxelMetaTypes(&_engine);
|
registerVoxelMetaTypes(this);
|
||||||
registerEventTypes(&_engine);
|
registerEventTypes(this);
|
||||||
registerMenuItemProperties(&_engine);
|
registerMenuItemProperties(this);
|
||||||
registerAnimationTypes(&_engine);
|
registerAnimationTypes(this);
|
||||||
registerAvatarTypes(&_engine);
|
registerAvatarTypes(this);
|
||||||
Bitstream::registerTypes(&_engine);
|
Bitstream::registerTypes(this);
|
||||||
|
|
||||||
qScriptRegisterMetaType(&_engine, ParticlePropertiesToScriptValue, ParticlePropertiesFromScriptValue);
|
qScriptRegisterMetaType(this, ParticlePropertiesToScriptValue, ParticlePropertiesFromScriptValue);
|
||||||
qScriptRegisterMetaType(&_engine, ParticleIDtoScriptValue, ParticleIDfromScriptValue);
|
qScriptRegisterMetaType(this, ParticleIDtoScriptValue, ParticleIDfromScriptValue);
|
||||||
qScriptRegisterSequenceMetaType<QVector<ParticleID> >(&_engine);
|
qScriptRegisterSequenceMetaType<QVector<ParticleID> >(this);
|
||||||
|
|
||||||
qScriptRegisterMetaType(&_engine, ModelItemPropertiesToScriptValue, ModelItemPropertiesFromScriptValue);
|
qScriptRegisterMetaType(this, ModelItemPropertiesToScriptValue, ModelItemPropertiesFromScriptValue);
|
||||||
qScriptRegisterMetaType(&_engine, ModelItemIDtoScriptValue, ModelItemIDfromScriptValue);
|
qScriptRegisterMetaType(this, ModelItemIDtoScriptValue, ModelItemIDfromScriptValue);
|
||||||
qScriptRegisterMetaType(&_engine, RayToModelIntersectionResultToScriptValue, RayToModelIntersectionResultFromScriptValue);
|
qScriptRegisterMetaType(this, RayToModelIntersectionResultToScriptValue, RayToModelIntersectionResultFromScriptValue);
|
||||||
qScriptRegisterSequenceMetaType<QVector<ModelItemID> >(&_engine);
|
qScriptRegisterSequenceMetaType<QVector<ModelItemID> >(this);
|
||||||
|
|
||||||
qScriptRegisterSequenceMetaType<QVector<glm::vec2> >(&_engine);
|
qScriptRegisterSequenceMetaType<QVector<glm::vec2> >(this);
|
||||||
qScriptRegisterSequenceMetaType<QVector<glm::quat> >(&_engine);
|
qScriptRegisterSequenceMetaType<QVector<glm::quat> >(this);
|
||||||
qScriptRegisterSequenceMetaType<QVector<QString> >(&_engine);
|
qScriptRegisterSequenceMetaType<QVector<QString> >(this);
|
||||||
|
|
||||||
QScriptValue xmlHttpRequestConstructorValue = _engine.newFunction(XMLHttpRequestClass::constructor);
|
QScriptValue xmlHttpRequestConstructorValue = newFunction(XMLHttpRequestClass::constructor);
|
||||||
_engine.globalObject().setProperty("XMLHttpRequest", xmlHttpRequestConstructorValue);
|
globalObject().setProperty("XMLHttpRequest", xmlHttpRequestConstructorValue);
|
||||||
|
|
||||||
QScriptValue printConstructorValue = _engine.newFunction(debugPrint);
|
QScriptValue printConstructorValue = newFunction(debugPrint);
|
||||||
_engine.globalObject().setProperty("print", printConstructorValue);
|
globalObject().setProperty("print", printConstructorValue);
|
||||||
|
|
||||||
QScriptValue soundConstructorValue = _engine.newFunction(soundConstructor);
|
QScriptValue soundConstructorValue = newFunction(soundConstructor);
|
||||||
QScriptValue soundMetaObject = _engine.newQMetaObject(&Sound::staticMetaObject, soundConstructorValue);
|
QScriptValue soundMetaObject = newQMetaObject(&Sound::staticMetaObject, soundConstructorValue);
|
||||||
_engine.globalObject().setProperty("Sound", soundMetaObject);
|
globalObject().setProperty("Sound", soundMetaObject);
|
||||||
|
|
||||||
QScriptValue injectionOptionValue = _engine.scriptValueFromQMetaObject<AudioInjectorOptions>();
|
QScriptValue injectionOptionValue = scriptValueFromQMetaObject<AudioInjectorOptions>();
|
||||||
_engine.globalObject().setProperty("AudioInjectionOptions", injectionOptionValue);
|
globalObject().setProperty("AudioInjectionOptions", injectionOptionValue);
|
||||||
|
|
||||||
QScriptValue localVoxelsValue = _engine.scriptValueFromQMetaObject<LocalVoxels>();
|
QScriptValue localVoxelsValue = scriptValueFromQMetaObject<LocalVoxels>();
|
||||||
_engine.globalObject().setProperty("LocalVoxels", localVoxelsValue);
|
globalObject().setProperty("LocalVoxels", localVoxelsValue);
|
||||||
|
|
||||||
qScriptRegisterMetaType(&_engine, injectorToScriptValue, injectorFromScriptValue);
|
qScriptRegisterMetaType(this, injectorToScriptValue, injectorFromScriptValue);
|
||||||
qScriptRegisterMetaType( &_engine, injectorToScriptValueInputController, injectorFromScriptValueInputController);
|
qScriptRegisterMetaType( this, injectorToScriptValueInputController, injectorFromScriptValueInputController);
|
||||||
|
|
||||||
qScriptRegisterMetaType(&_engine, animationDetailsToScriptValue, animationDetailsFromScriptValue);
|
qScriptRegisterMetaType(this, animationDetailsToScriptValue, animationDetailsFromScriptValue);
|
||||||
|
|
||||||
registerGlobalObject("Script", this);
|
registerGlobalObject("Script", this);
|
||||||
registerGlobalObject("Audio", &_audioScriptingInterface);
|
registerGlobalObject("Audio", &_audioScriptingInterface);
|
||||||
|
@ -287,15 +285,14 @@ void ScriptEngine::init() {
|
||||||
registerGlobalObject("Voxels", &_voxelsScriptingInterface);
|
registerGlobalObject("Voxels", &_voxelsScriptingInterface);
|
||||||
|
|
||||||
// constants
|
// constants
|
||||||
QScriptValue globalObject = _engine.globalObject();
|
globalObject().setProperty("TREE_SCALE", newVariant(QVariant(TREE_SCALE)));
|
||||||
globalObject.setProperty("TREE_SCALE", _engine.newVariant(QVariant(TREE_SCALE)));
|
globalObject().setProperty("COLLISION_GROUP_ENVIRONMENT", newVariant(QVariant(COLLISION_GROUP_ENVIRONMENT)));
|
||||||
globalObject.setProperty("COLLISION_GROUP_ENVIRONMENT", _engine.newVariant(QVariant(COLLISION_GROUP_ENVIRONMENT)));
|
globalObject().setProperty("COLLISION_GROUP_AVATARS", newVariant(QVariant(COLLISION_GROUP_AVATARS)));
|
||||||
globalObject.setProperty("COLLISION_GROUP_AVATARS", _engine.newVariant(QVariant(COLLISION_GROUP_AVATARS)));
|
globalObject().setProperty("COLLISION_GROUP_VOXELS", newVariant(QVariant(COLLISION_GROUP_VOXELS)));
|
||||||
globalObject.setProperty("COLLISION_GROUP_VOXELS", _engine.newVariant(QVariant(COLLISION_GROUP_VOXELS)));
|
globalObject().setProperty("COLLISION_GROUP_PARTICLES", newVariant(QVariant(COLLISION_GROUP_PARTICLES)));
|
||||||
globalObject.setProperty("COLLISION_GROUP_PARTICLES", _engine.newVariant(QVariant(COLLISION_GROUP_PARTICLES)));
|
|
||||||
|
|
||||||
globalObject.setProperty("AVATAR_MOTION_OBEY_LOCAL_GRAVITY", _engine.newVariant(QVariant(AVATAR_MOTION_OBEY_LOCAL_GRAVITY)));
|
globalObject().setProperty("AVATAR_MOTION_OBEY_LOCAL_GRAVITY", newVariant(QVariant(AVATAR_MOTION_OBEY_LOCAL_GRAVITY)));
|
||||||
globalObject.setProperty("AVATAR_MOTION_OBEY_ENVIRONMENTAL_GRAVITY", _engine.newVariant(QVariant(AVATAR_MOTION_OBEY_ENVIRONMENTAL_GRAVITY)));
|
globalObject().setProperty("AVATAR_MOTION_OBEY_ENVIRONMENTAL_GRAVITY", newVariant(QVariant(AVATAR_MOTION_OBEY_ENVIRONMENTAL_GRAVITY)));
|
||||||
|
|
||||||
// let the VoxelPacketSender know how frequently we plan to call it
|
// let the VoxelPacketSender know how frequently we plan to call it
|
||||||
_voxelsScriptingInterface.getVoxelPacketSender()->setProcessCallIntervalHint(SCRIPT_DATA_CALLBACK_USECS);
|
_voxelsScriptingInterface.getVoxelPacketSender()->setProcessCallIntervalHint(SCRIPT_DATA_CALLBACK_USECS);
|
||||||
|
@ -304,8 +301,8 @@ void ScriptEngine::init() {
|
||||||
|
|
||||||
QScriptValue ScriptEngine::registerGlobalObject(const QString& name, QObject* object) {
|
QScriptValue ScriptEngine::registerGlobalObject(const QString& name, QObject* object) {
|
||||||
if (object) {
|
if (object) {
|
||||||
QScriptValue value = _engine.newQObject(object);
|
QScriptValue value = newQObject(object);
|
||||||
_engine.globalObject().setProperty(name, value);
|
globalObject().setProperty(name, value);
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
return QScriptValue::NullValue;
|
return QScriptValue::NullValue;
|
||||||
|
@ -313,15 +310,15 @@ QScriptValue ScriptEngine::registerGlobalObject(const QString& name, QObject* ob
|
||||||
|
|
||||||
void ScriptEngine::registerGetterSetter(const QString& name, QScriptEngine::FunctionSignature getter,
|
void ScriptEngine::registerGetterSetter(const QString& name, QScriptEngine::FunctionSignature getter,
|
||||||
QScriptEngine::FunctionSignature setter, QScriptValue object) {
|
QScriptEngine::FunctionSignature setter, QScriptValue object) {
|
||||||
QScriptValue setterFunction = _engine.newFunction(setter, 1);
|
QScriptValue setterFunction = newFunction(setter, 1);
|
||||||
QScriptValue getterFunction = _engine.newFunction(getter);
|
QScriptValue getterFunction = newFunction(getter);
|
||||||
|
|
||||||
if (!object.isNull()) {
|
if (!object.isNull()) {
|
||||||
object.setProperty(name, setterFunction, QScriptValue::PropertySetter);
|
object.setProperty(name, setterFunction, QScriptValue::PropertySetter);
|
||||||
object.setProperty(name, getterFunction, QScriptValue::PropertyGetter);
|
object.setProperty(name, getterFunction, QScriptValue::PropertyGetter);
|
||||||
} else {
|
} else {
|
||||||
_engine.globalObject().setProperty(name, setterFunction, QScriptValue::PropertySetter);
|
globalObject().setProperty(name, setterFunction, QScriptValue::PropertySetter);
|
||||||
_engine.globalObject().setProperty(name, getterFunction, QScriptValue::PropertyGetter);
|
globalObject().setProperty(name, getterFunction, QScriptValue::PropertyGetter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,25 +327,24 @@ void ScriptEngine::evaluate() {
|
||||||
init();
|
init();
|
||||||
}
|
}
|
||||||
|
|
||||||
QScriptValue result = _engine.evaluate(_scriptContents);
|
QScriptValue result = evaluate(_scriptContents);
|
||||||
|
|
||||||
if (_engine.hasUncaughtException()) {
|
if (hasUncaughtException()) {
|
||||||
int line = _engine.uncaughtExceptionLineNumber();
|
int line = uncaughtExceptionLineNumber();
|
||||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << result.toString();
|
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << result.toString();
|
||||||
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + result.toString());
|
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + result.toString());
|
||||||
_engine.clearExceptions();
|
clearExceptions();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
QScriptValue ScriptEngine::evaluate(const QString& program, const QString& fileName, int lineNumber) {
|
QScriptValue ScriptEngine::evaluate(const QString& program, const QString& fileName, int lineNumber) {
|
||||||
QScriptValue result = _engine.evaluate(program, fileName, lineNumber);
|
QScriptValue result = QScriptEngine::evaluate(program, fileName, lineNumber);
|
||||||
bool hasUncaughtException = _engine.hasUncaughtException();
|
if (hasUncaughtException()) {
|
||||||
if (hasUncaughtException) {
|
int line = uncaughtExceptionLineNumber();
|
||||||
int line = _engine.uncaughtExceptionLineNumber();
|
|
||||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ": " << result.toString();
|
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ": " << result.toString();
|
||||||
}
|
}
|
||||||
emit evaluationFinished(result, hasUncaughtException);
|
emit evaluationFinished(result, hasUncaughtException());
|
||||||
_engine.clearExceptions();
|
clearExceptions();
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -372,12 +368,12 @@ void ScriptEngine::run() {
|
||||||
_isFinished = false;
|
_isFinished = false;
|
||||||
emit runningStateChanged();
|
emit runningStateChanged();
|
||||||
|
|
||||||
QScriptValue result = _engine.evaluate(_scriptContents);
|
QScriptValue result = evaluate(_scriptContents);
|
||||||
if (_engine.hasUncaughtException()) {
|
if (hasUncaughtException()) {
|
||||||
int line = _engine.uncaughtExceptionLineNumber();
|
int line = uncaughtExceptionLineNumber();
|
||||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << result.toString();
|
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << result.toString();
|
||||||
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + result.toString());
|
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + result.toString());
|
||||||
_engine.clearExceptions();
|
clearExceptions();
|
||||||
}
|
}
|
||||||
|
|
||||||
QElapsedTimer startTime;
|
QElapsedTimer startTime;
|
||||||
|
@ -532,11 +528,11 @@ void ScriptEngine::run() {
|
||||||
qint64 now = usecTimestampNow();
|
qint64 now = usecTimestampNow();
|
||||||
float deltaTime = (float) (now - lastUpdate) / (float) USECS_PER_SECOND;
|
float deltaTime = (float) (now - lastUpdate) / (float) USECS_PER_SECOND;
|
||||||
|
|
||||||
if (_engine.hasUncaughtException()) {
|
if (hasUncaughtException()) {
|
||||||
int line = _engine.uncaughtExceptionLineNumber();
|
int line = uncaughtExceptionLineNumber();
|
||||||
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << _engine.uncaughtException().toString();
|
qDebug() << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << uncaughtException().toString();
|
||||||
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + _engine.uncaughtException().toString());
|
emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + uncaughtException().toString());
|
||||||
_engine.clearExceptions();
|
clearExceptions();
|
||||||
}
|
}
|
||||||
|
|
||||||
emit update(deltaTime);
|
emit update(deltaTime);
|
||||||
|
@ -694,12 +690,12 @@ void ScriptEngine::include(const QString& includeFile) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
QScriptValue result = _engine.evaluate(includeContents);
|
QScriptValue result = evaluate(includeContents);
|
||||||
if (_engine.hasUncaughtException()) {
|
if (hasUncaughtException()) {
|
||||||
int line = _engine.uncaughtExceptionLineNumber();
|
int line = uncaughtExceptionLineNumber();
|
||||||
qDebug() << "Uncaught exception at (" << includeFile << ") line" << line << ":" << result.toString();
|
qDebug() << "Uncaught exception at (" << includeFile << ") line" << line << ":" << result.toString();
|
||||||
emit errorMessage("Uncaught exception at (" + includeFile + ") line" + QString::number(line) + ":" + result.toString());
|
emit errorMessage("Uncaught exception at (" + includeFile + ") line" + QString::number(line) + ":" + result.toString());
|
||||||
_engine.clearExceptions();
|
clearExceptions();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ const QString NO_SCRIPT("");
|
||||||
|
|
||||||
const unsigned int SCRIPT_DATA_CALLBACK_USECS = floor(((1.0 / 60.0f) * 1000 * 1000) + 0.5);
|
const unsigned int SCRIPT_DATA_CALLBACK_USECS = floor(((1.0 / 60.0f) * 1000 * 1000) + 0.5);
|
||||||
|
|
||||||
class ScriptEngine : public QObject {
|
class ScriptEngine : public QScriptEngine {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
ScriptEngine(const QUrl& scriptURL,
|
ScriptEngine(const QUrl& scriptURL,
|
||||||
|
@ -57,7 +57,6 @@ public:
|
||||||
/// Access the ModelsScriptingInterface in order to initialize it with a custom packet sender and jurisdiction listener
|
/// Access the ModelsScriptingInterface in order to initialize it with a custom packet sender and jurisdiction listener
|
||||||
static ModelsScriptingInterface* getModelsScriptingInterface() { return &_modelsScriptingInterface; }
|
static ModelsScriptingInterface* getModelsScriptingInterface() { return &_modelsScriptingInterface; }
|
||||||
|
|
||||||
QScriptEngine* getEngine() { return &_engine; }
|
|
||||||
ArrayBufferClass* getArrayBufferClass() { return _arrayBufferClass; }
|
ArrayBufferClass* getArrayBufferClass() { return _arrayBufferClass; }
|
||||||
|
|
||||||
/// sets the script contents, will return false if failed, will fail if script is already running
|
/// sets the script contents, will return false if failed, will fail if script is already running
|
||||||
|
@ -121,7 +120,6 @@ protected:
|
||||||
bool _isFinished;
|
bool _isFinished;
|
||||||
bool _isRunning;
|
bool _isRunning;
|
||||||
bool _isInitialized;
|
bool _isInitialized;
|
||||||
QScriptEngine _engine;
|
|
||||||
bool _isAvatar;
|
bool _isAvatar;
|
||||||
QTimer* _avatarIdentityTimer;
|
QTimer* _avatarIdentityTimer;
|
||||||
QTimer* _avatarBillboardTimer;
|
QTimer* _avatarBillboardTimer;
|
||||||
|
|
Loading…
Reference in a new issue