Merge remote-tracking branch 'upstream/master' into 19554-2

This commit is contained in:
Ryan Huffman 2014-04-17 20:02:05 -07:00
commit a552d48401
53 changed files with 3001 additions and 286 deletions

View file

@ -28,6 +28,7 @@ link_hifi_library(audio ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(avatars ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(octree ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(voxels ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(fbx ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(particles ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(metavoxels ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(networking ${TARGET_NAME} "${ROOT_DIR}")
@ -51,4 +52,4 @@ IF (WIN32)
target_link_libraries(${TARGET_NAME} Winmm Ws2_32)
ENDIF(WIN32)
target_link_libraries(${TARGET_NAME} Qt5::Network Qt5::Widgets Qt5::Script "${GNUTLS_LIBRARY}")
target_link_libraries(${TARGET_NAME} Qt5::Network Qt5::Widgets Qt5::Script "${GNUTLS_LIBRARY}")

View file

@ -11,8 +11,10 @@
#include <QtCore/QCoreApplication>
#include <QtCore/QEventLoop>
#include <QtCore/QStandardPaths>
#include <QtCore/QTimer>
#include <QtNetwork/QNetworkAccessManager>
#include <QtNetwork/QNetworkDiskCache>
#include <QtNetwork/QNetworkRequest>
#include <QtNetwork/QNetworkReply>
@ -20,6 +22,7 @@
#include <AvatarData.h>
#include <NodeList.h>
#include <PacketHeaders.h>
#include <ResourceCache.h>
#include <UUID.h>
#include <VoxelConstants.h>
#include <ParticlesScriptingInterface.h>
@ -148,22 +151,32 @@ void Agent::run() {
<< NodeType::ParticleServer);
// figure out the URL for the script for this agent assignment
QString scriptURLString("http://%1:8080/assignment/%2");
scriptURLString = scriptURLString.arg(NodeList::getInstance()->getDomainHandler().getIP().toString(),
uuidStringWithoutCurlyBraces(_uuid));
QUrl scriptURL;
if (_payload.isEmpty()) {
scriptURL = QUrl(QString("http://%1:8080/assignment/%2")
.arg(NodeList::getInstance()->getDomainHandler().getIP().toString(),
uuidStringWithoutCurlyBraces(_uuid)));
} else {
scriptURL = QUrl(_payload);
}
QNetworkAccessManager *networkManager = new QNetworkAccessManager(this);
QNetworkReply *reply = networkManager->get(QNetworkRequest(QUrl(scriptURLString)));
QNetworkReply *reply = networkManager->get(QNetworkRequest(scriptURL));
QNetworkDiskCache* cache = new QNetworkDiskCache(networkManager);
QString cachePath = QStandardPaths::writableLocation(QStandardPaths::DataLocation);
cache->setCacheDirectory(!cachePath.isEmpty() ? cachePath : "agentCache");
networkManager->setCache(cache);
qDebug() << "Downloading script at" << scriptURLString;
qDebug() << "Downloading script at" << scriptURL.toString();
QEventLoop loop;
QObject::connect(reply, SIGNAL(finished()), &loop, SLOT(quit()));
loop.exec();
// let the AvatarData class use our QNetworkAcessManager
// let the AvatarData and ResourceCache classes use our QNetworkAccessManager
AvatarData::setNetworkAccessManager(networkManager);
ResourceCache::setNetworkAccessManager(networkManager);
QString scriptContents(reply->readAll());

View file

@ -42,13 +42,13 @@ body {
background-color: #28FF57;
}
#instance-field {
#extra-fields {
position: absolute;
right: 20px;
top: 40px;
top: 30px;
}
#instance-field input {
#extra-fields input {
width: 80px;
}

View file

@ -14,8 +14,13 @@
Run
</a>
</div>
<div class='big-field' id='instance-field'>
<input type='text' name='instances' placeholder='# of instances'>
<div id="extra-fields">
<div class='big-field' id='instance-field'>
<input type='text' name='instances' placeholder='# of instances'>
</div>
<div class='big-field' id='pool-field'>
<input type='text' name='pool' placeholder='pool'>
</div>
</div>
<!-- %div#stop-button.big-button -->
</body>

View file

@ -22,9 +22,14 @@ $(document).ready(function(){
+ '--' + boundary + '--\r\n';
var headers = {};
if ($('#instance-field input').val()) {
headers['ASSIGNMENT-INSTANCES'] = $('#instance-field input').val();
}
if ($('#pool-field input').val()) {
headers['ASSIGNMENT-POOL'] = $('#pool-field input').val();
}
// post form to assignment in order to create an assignment
$.ajax({

View file

@ -187,7 +187,7 @@ void DomainServer::setupNodeListAndAssignments(const QUuid& sessionUUID) {
}
}
QSet<Assignment::Type> parsedTypes(QSet<Assignment::Type>() << Assignment::AgentType);
QSet<Assignment::Type> parsedTypes;
parseAssignmentConfigs(parsedTypes);
populateDefaultStaticAssignmentsExcludingTypes(parsedTypes);
@ -222,12 +222,19 @@ void DomainServer::parseAssignmentConfigs(QSet<Assignment::Type>& excludedTypes)
if (assignmentType < Assignment::AllTypes && !excludedTypes.contains(assignmentType)) {
QVariant mapValue = _argumentVariantMap[variantMapKeys[configIndex]];
QJsonArray assignmentArray;
if (mapValue.type() == QVariant::String) {
QJsonDocument deserializedDocument = QJsonDocument::fromJson(mapValue.toString().toUtf8());
createStaticAssignmentsForType(assignmentType, deserializedDocument.array());
assignmentArray = deserializedDocument.array();
} else {
createStaticAssignmentsForType(assignmentType, mapValue.toJsonValue().toArray());
assignmentArray = mapValue.toJsonValue().toArray();
}
if (assignmentType != Assignment::AgentType) {
createStaticAssignmentsForType(assignmentType, assignmentArray);
} else {
createScriptedAssignmentsFromArray(assignmentArray);
}
excludedTypes.insert(assignmentType);
@ -242,6 +249,42 @@ void DomainServer::addStaticAssignmentToAssignmentHash(Assignment* newAssignment
_staticAssignmentHash.insert(newAssignment->getUUID(), SharedAssignmentPointer(newAssignment));
}
void DomainServer::createScriptedAssignmentsFromArray(const QJsonArray &configArray) {
foreach(const QJsonValue& jsonValue, configArray) {
if (jsonValue.isObject()) {
QJsonObject jsonObject = jsonValue.toObject();
// make sure we were passed a URL, otherwise this is an invalid scripted assignment
const QString ASSIGNMENT_URL_KEY = "url";
QString assignmentURL = jsonObject[ASSIGNMENT_URL_KEY].toString();
if (!assignmentURL.isEmpty()) {
// check the json for a pool
const QString ASSIGNMENT_POOL_KEY = "pool";
QString assignmentPool = jsonObject[ASSIGNMENT_POOL_KEY].toString();
// check for a number of instances, if not passed then default is 1
const QString ASSIGNMENT_INSTANCES_KEY = "instances";
int numInstances = jsonObject[ASSIGNMENT_INSTANCES_KEY].toInt();
numInstances = (numInstances == 0 ? 1 : numInstances);
for (int i = 0; i < numInstances; i++) {
// add a scripted assignment to the queue for this instance
Assignment* scriptAssignment = new Assignment(Assignment::CreateCommand,
Assignment::AgentType,
assignmentPool);
scriptAssignment->setPayload(assignmentURL.toUtf8());
qDebug() << "Adding scripted assignment to queue -" << *scriptAssignment;
qDebug() << "URL for script is" << assignmentURL;
_assignmentQueue.enqueue(SharedAssignmentPointer(scriptAssignment));
}
}
}
}
}
void DomainServer::createStaticAssignmentsForType(Assignment::Type type, const QJsonArray& configArray) {
// we have a string for config for this type
qDebug() << "Parsing config for assignment type" << type;
@ -284,8 +327,10 @@ void DomainServer::createStaticAssignmentsForType(Assignment::Type type, const Q
void DomainServer::populateDefaultStaticAssignmentsExcludingTypes(const QSet<Assignment::Type>& excludedTypes) {
// enumerate over all assignment types and see if we've already excluded it
for (int defaultedType = Assignment::AudioMixerType; defaultedType != Assignment::AllTypes; defaultedType++) {
if (!excludedTypes.contains((Assignment::Type) defaultedType)) {
for (Assignment::Type defaultedType = Assignment::AudioMixerType;
defaultedType != Assignment::AllTypes;
defaultedType = static_cast<Assignment::Type>(static_cast<int>(defaultedType) + 1)) {
if (!excludedTypes.contains(defaultedType) && defaultedType != Assignment::AgentType) {
// type has not been set from a command line or config file config, use the default
// by clearing whatever exists and writing a single default assignment with no payload
Assignment* newAssignment = new Assignment(Assignment::CreateCommand, (Assignment::Type) defaultedType);

View file

@ -66,6 +66,7 @@ private:
void parseAssignmentConfigs(QSet<Assignment::Type>& excludedTypes);
void addStaticAssignmentToAssignmentHash(Assignment* newAssignment);
void createScriptedAssignmentsFromArray(const QJsonArray& configArray);
void createStaticAssignmentsForType(Assignment::Type type, const QJsonArray& configArray);
void populateDefaultStaticAssignmentsExcludingTypes(const QSet<Assignment::Type>& excludedTypes);

View file

@ -0,0 +1,711 @@
//
// audioReflectorTools.js
// hifi
//
// Created by Brad Hefta-Gaub on 2/14/14.
// Copyright (c) 2014 HighFidelity, Inc. All rights reserved.
//
// Tools for manipulating the attributes of the AudioReflector behavior
//
//
var delayScale = 100.0;
var fanoutScale = 10.0;
var speedScale = 20;
var factorScale = 5.0;
var localFactorScale = 1.0;
var reflectiveScale = 100.0;
var diffusionScale = 100.0;
var absorptionScale = 100.0;
var combFilterScale = 50.0;
// these three properties are bound together, if you change one, the others will also change
var reflectiveRatio = AudioReflector.getReflectiveRatio();
var diffusionRatio = AudioReflector.getDiffusionRatio();
var absorptionRatio = AudioReflector.getAbsorptionRatio();
var reflectiveThumbX;
var diffusionThumbX;
var absorptionThumbX;
function setReflectiveRatio(reflective) {
var total = diffusionRatio + absorptionRatio + (reflective / reflectiveScale);
diffusionRatio = diffusionRatio / total;
absorptionRatio = absorptionRatio / total;
reflectiveRatio = (reflective / reflectiveScale) / total;
updateRatioValues();
}
function setDiffusionRatio(diffusion) {
var total = (diffusion / diffusionScale) + absorptionRatio + reflectiveRatio;
diffusionRatio = (diffusion / diffusionScale) / total;
absorptionRatio = absorptionRatio / total;
reflectiveRatio = reflectiveRatio / total;
updateRatioValues();
}
function setAbsorptionRatio(absorption) {
var total = diffusionRatio + (absorption / absorptionScale) + reflectiveRatio;
diffusionRatio = diffusionRatio / total;
absorptionRatio = (absorption / absorptionScale) / total;
reflectiveRatio = reflectiveRatio / total;
updateRatioValues();
}
function updateRatioSliders() {
reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * reflectiveRatio);
diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * diffusionRatio);
absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * absorptionRatio);
Overlays.editOverlay(reflectiveThumb, { x: reflectiveThumbX } );
Overlays.editOverlay(diffusionThumb, { x: diffusionThumbX } );
Overlays.editOverlay(absorptionThumb, { x: absorptionThumbX } );
}
function updateRatioValues() {
AudioReflector.setReflectiveRatio(reflectiveRatio);
AudioReflector.setDiffusionRatio(diffusionRatio);
AudioReflector.setAbsorptionRatio(absorptionRatio);
}
var topY = 250;
var sliderHeight = 35;
var delayY = topY;
topY += sliderHeight;
var delayLabel = Overlays.addOverlay("text", {
x: 40,
y: delayY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 12,
leftMargin: 5,
text: "Delay:"
});
var delaySlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: delayY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var delayMinThumbX = 110;
var delayMaxThumbX = delayMinThumbX + 110;
var delayThumbX = delayMinThumbX + ((delayMaxThumbX - delayMinThumbX) * (AudioReflector.getPreDelay() / delayScale));
var delayThumb = Overlays.addOverlay("image", {
x: delayThumbX,
y: delayY + 9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 255, green: 0, blue: 0},
alpha: 1
});
var fanoutY = topY;
topY += sliderHeight;
var fanoutLabel = Overlays.addOverlay("text", {
x: 40,
y: fanoutY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 12,
leftMargin: 5,
text: "Fanout:"
});
var fanoutSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: fanoutY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var fanoutMinThumbX = 110;
var fanoutMaxThumbX = fanoutMinThumbX + 110;
var fanoutThumbX = fanoutMinThumbX + ((fanoutMaxThumbX - fanoutMinThumbX) * (AudioReflector.getDiffusionFanout() / fanoutScale));
var fanoutThumb = Overlays.addOverlay("image", {
x: fanoutThumbX,
y: fanoutY + 9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 255, green: 255, blue: 0},
alpha: 1
});
var speedY = topY;
topY += sliderHeight;
var speedLabel = Overlays.addOverlay("text", {
x: 40,
y: speedY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Speed\nin ms/m:"
});
var speedSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: speedY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var speedMinThumbX = 110;
var speedMaxThumbX = speedMinThumbX + 110;
var speedThumbX = speedMinThumbX + ((speedMaxThumbX - speedMinThumbX) * (AudioReflector.getSoundMsPerMeter() / speedScale));
var speedThumb = Overlays.addOverlay("image", {
x: speedThumbX,
y: speedY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 0, green: 255, blue: 0},
alpha: 1
});
var factorY = topY;
topY += sliderHeight;
var factorLabel = Overlays.addOverlay("text", {
x: 40,
y: factorY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Attenuation\nFactor:"
});
var factorSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: factorY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var factorMinThumbX = 110;
var factorMaxThumbX = factorMinThumbX + 110;
var factorThumbX = factorMinThumbX + ((factorMaxThumbX - factorMinThumbX) * (AudioReflector.getDistanceAttenuationScalingFactor() / factorScale));
var factorThumb = Overlays.addOverlay("image", {
x: factorThumbX,
y: factorY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 0, green: 0, blue: 255},
alpha: 1
});
var localFactorY = topY;
topY += sliderHeight;
var localFactorLabel = Overlays.addOverlay("text", {
x: 40,
y: localFactorY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Local\nFactor:"
});
var localFactorSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: localFactorY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var localFactorMinThumbX = 110;
var localFactorMaxThumbX = localFactorMinThumbX + 110;
var localFactorThumbX = localFactorMinThumbX + ((localFactorMaxThumbX - localFactorMinThumbX) * (AudioReflector.getLocalAudioAttenuationFactor() / localFactorScale));
var localFactorThumb = Overlays.addOverlay("image", {
x: localFactorThumbX,
y: localFactorY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 0, green: 128, blue: 128},
alpha: 1
});
var combFilterY = topY;
topY += sliderHeight;
var combFilterLabel = Overlays.addOverlay("text", {
x: 40,
y: combFilterY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Comb Filter\nWindow:"
});
var combFilterSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: combFilterY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var combFilterMinThumbX = 110;
var combFilterMaxThumbX = combFilterMinThumbX + 110;
var combFilterThumbX = combFilterMinThumbX + ((combFilterMaxThumbX - combFilterMinThumbX) * (AudioReflector.getCombFilterWindow() / combFilterScale));
var combFilterThumb = Overlays.addOverlay("image", {
x: combFilterThumbX,
y: combFilterY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 128, green: 128, blue: 0},
alpha: 1
});
var reflectiveY = topY;
topY += sliderHeight;
var reflectiveLabel = Overlays.addOverlay("text", {
x: 40,
y: reflectiveY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Reflective\nRatio:"
});
var reflectiveSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: reflectiveY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var reflectiveMinThumbX = 110;
var reflectiveMaxThumbX = reflectiveMinThumbX + 110;
reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * AudioReflector.getReflectiveRatio());
var reflectiveThumb = Overlays.addOverlay("image", {
x: reflectiveThumbX,
y: reflectiveY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var diffusionY = topY;
topY += sliderHeight;
var diffusionLabel = Overlays.addOverlay("text", {
x: 40,
y: diffusionY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Diffusion\nRatio:"
});
var diffusionSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: diffusionY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var diffusionMinThumbX = 110;
var diffusionMaxThumbX = diffusionMinThumbX + 110;
diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * AudioReflector.getDiffusionRatio());
var diffusionThumb = Overlays.addOverlay("image", {
x: diffusionThumbX,
y: diffusionY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 0, green: 255, blue: 255},
alpha: 1
});
var absorptionY = topY;
topY += sliderHeight;
var absorptionLabel = Overlays.addOverlay("text", {
x: 40,
y: absorptionY,
width: 60,
height: sliderHeight,
color: { red: 0, green: 0, blue: 0},
textColor: { red: 255, green: 255, blue: 255},
topMargin: 6,
leftMargin: 5,
text: "Absorption\nRatio:"
});
var absorptionSlider = Overlays.addOverlay("image", {
// alternate form of expressing bounds
bounds: { x: 100, y: absorptionY, width: 150, height: sliderHeight},
subImage: { x: 46, y: 0, width: 200, height: 71 },
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
color: { red: 255, green: 255, blue: 255},
alpha: 1
});
var absorptionMinThumbX = 110;
var absorptionMaxThumbX = absorptionMinThumbX + 110;
absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * AudioReflector.getAbsorptionRatio());
var absorptionThumb = Overlays.addOverlay("image", {
x: absorptionThumbX,
y: absorptionY+9,
width: 18,
height: 17,
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
color: { red: 255, green: 0, blue: 255},
alpha: 1
});
// When our script shuts down, we should clean up all of our overlays
function scriptEnding() {
Overlays.deleteOverlay(factorLabel);
Overlays.deleteOverlay(factorThumb);
Overlays.deleteOverlay(factorSlider);
Overlays.deleteOverlay(combFilterLabel);
Overlays.deleteOverlay(combFilterThumb);
Overlays.deleteOverlay(combFilterSlider);
Overlays.deleteOverlay(localFactorLabel);
Overlays.deleteOverlay(localFactorThumb);
Overlays.deleteOverlay(localFactorSlider);
Overlays.deleteOverlay(speedLabel);
Overlays.deleteOverlay(speedThumb);
Overlays.deleteOverlay(speedSlider);
Overlays.deleteOverlay(delayLabel);
Overlays.deleteOverlay(delayThumb);
Overlays.deleteOverlay(delaySlider);
Overlays.deleteOverlay(fanoutLabel);
Overlays.deleteOverlay(fanoutThumb);
Overlays.deleteOverlay(fanoutSlider);
Overlays.deleteOverlay(reflectiveLabel);
Overlays.deleteOverlay(reflectiveThumb);
Overlays.deleteOverlay(reflectiveSlider);
Overlays.deleteOverlay(diffusionLabel);
Overlays.deleteOverlay(diffusionThumb);
Overlays.deleteOverlay(diffusionSlider);
Overlays.deleteOverlay(absorptionLabel);
Overlays.deleteOverlay(absorptionThumb);
Overlays.deleteOverlay(absorptionSlider);
}
Script.scriptEnding.connect(scriptEnding);
var count = 0;
// Our update() function is called at approximately 60fps, and we will use it to animate our various overlays
function update(deltaTime) {
count++;
}
Script.update.connect(update);
// The slider is handled in the mouse event callbacks.
var movingSliderDelay = false;
var movingSliderFanout = false;
var movingSliderSpeed = false;
var movingSliderFactor = false;
var movingSliderCombFilter = false;
var movingSliderLocalFactor = false;
var movingSliderReflective = false;
var movingSliderDiffusion = false;
var movingSliderAbsorption = false;
var thumbClickOffsetX = 0;
function mouseMoveEvent(event) {
if (movingSliderDelay) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < delayMinThumbX) {
newThumbX = delayMinThumbX;
}
if (newThumbX > delayMaxThumbX) {
newThumbX = delayMaxThumbX;
}
Overlays.editOverlay(delayThumb, { x: newThumbX } );
var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale;
AudioReflector.setPreDelay(delay);
}
if (movingSliderFanout) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < fanoutMinThumbX) {
newThumbX = fanoutMinThumbX;
}
if (newThumbX > fanoutMaxThumbX) {
newThumbX = fanoutMaxThumbX;
}
Overlays.editOverlay(fanoutThumb, { x: newThumbX } );
var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale);
AudioReflector.setDiffusionFanout(fanout);
}
if (movingSliderSpeed) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < speedMinThumbX) {
newThumbX = speedMminThumbX;
}
if (newThumbX > speedMaxThumbX) {
newThumbX = speedMaxThumbX;
}
Overlays.editOverlay(speedThumb, { x: newThumbX } );
var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale;
AudioReflector.setSoundMsPerMeter(speed);
}
if (movingSliderFactor) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < factorMinThumbX) {
newThumbX = factorMminThumbX;
}
if (newThumbX > factorMaxThumbX) {
newThumbX = factorMaxThumbX;
}
Overlays.editOverlay(factorThumb, { x: newThumbX } );
var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale;
AudioReflector.setDistanceAttenuationScalingFactor(factor);
}
if (movingSliderCombFilter) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < combFilterMinThumbX) {
newThumbX = combFilterMminThumbX;
}
if (newThumbX > combFilterMaxThumbX) {
newThumbX = combFilterMaxThumbX;
}
Overlays.editOverlay(combFilterThumb, { x: newThumbX } );
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
AudioReflector.setCombFilterWindow(combFilter);
}
if (movingSliderLocalFactor) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < localFactorMinThumbX) {
newThumbX = localFactorMminThumbX;
}
if (newThumbX > localFactorMaxThumbX) {
newThumbX = localFactorMaxThumbX;
}
Overlays.editOverlay(localFactorThumb, { x: newThumbX } );
var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale;
AudioReflector.setLocalAudioAttenuationFactor(localFactor);
}
if (movingSliderAbsorption) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < absorptionMinThumbX) {
newThumbX = absorptionMminThumbX;
}
if (newThumbX > absorptionMaxThumbX) {
newThumbX = absorptionMaxThumbX;
}
Overlays.editOverlay(absorptionThumb, { x: newThumbX } );
var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale;
setAbsorptionRatio(absorption);
}
if (movingSliderReflective) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < reflectiveMinThumbX) {
newThumbX = reflectiveMminThumbX;
}
if (newThumbX > reflectiveMaxThumbX) {
newThumbX = reflectiveMaxThumbX;
}
Overlays.editOverlay(reflectiveThumb, { x: newThumbX } );
var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale;
setReflectiveRatio(reflective);
}
if (movingSliderDiffusion) {
newThumbX = event.x - thumbClickOffsetX;
if (newThumbX < diffusionMinThumbX) {
newThumbX = diffusionMminThumbX;
}
if (newThumbX > diffusionMaxThumbX) {
newThumbX = diffusionMaxThumbX;
}
Overlays.editOverlay(diffusionThumb, { x: newThumbX } );
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
setDiffusionRatio(diffusion);
}
}
// we also handle click detection in our mousePressEvent()
function mousePressEvent(event) {
var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y});
if (clickedOverlay == delayThumb) {
movingSliderDelay = true;
thumbClickOffsetX = event.x - delayThumbX;
}
if (clickedOverlay == fanoutThumb) {
movingSliderFanout = true;
thumbClickOffsetX = event.x - fanoutThumbX;
}
if (clickedOverlay == speedThumb) {
movingSliderSpeed = true;
thumbClickOffsetX = event.x - speedThumbX;
}
if (clickedOverlay == factorThumb) {
movingSliderFactor = true;
thumbClickOffsetX = event.x - factorThumbX;
}
if (clickedOverlay == localFactorThumb) {
movingSliderLocalFactor = true;
thumbClickOffsetX = event.x - localFactorThumbX;
}
if (clickedOverlay == combFilterThumb) {
movingSliderCombFilter = true;
thumbClickOffsetX = event.x - combFilterThumbX;
}
if (clickedOverlay == diffusionThumb) {
movingSliderDiffusion = true;
thumbClickOffsetX = event.x - diffusionThumbX;
}
if (clickedOverlay == absorptionThumb) {
movingSliderAbsorption = true;
thumbClickOffsetX = event.x - absorptionThumbX;
}
if (clickedOverlay == reflectiveThumb) {
movingSliderReflective = true;
thumbClickOffsetX = event.x - reflectiveThumbX;
}
}
function mouseReleaseEvent(event) {
if (movingSliderDelay) {
movingSliderDelay = false;
var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale;
AudioReflector.setPreDelay(delay);
delayThumbX = newThumbX;
}
if (movingSliderFanout) {
movingSliderFanout = false;
var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale);
AudioReflector.setDiffusionFanout(fanout);
fanoutThumbX = newThumbX;
}
if (movingSliderSpeed) {
movingSliderSpeed = false;
var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale;
AudioReflector.setSoundMsPerMeter(speed);
speedThumbX = newThumbX;
}
if (movingSliderFactor) {
movingSliderFactor = false;
var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale;
AudioReflector.setDistanceAttenuationScalingFactor(factor);
factorThumbX = newThumbX;
}
if (movingSliderCombFilter) {
movingSliderCombFilter = false;
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
AudioReflector.setCombFilterWindow(combFilter);
combFilterThumbX = newThumbX;
}
if (movingSliderLocalFactor) {
movingSliderLocalFactor = false;
var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale;
AudioReflector.setLocalAudioAttenuationFactor(localFactor);
localFactorThumbX = newThumbX;
}
if (movingSliderReflective) {
movingSliderReflective = false;
var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale;
setReflectiveRatio(reflective);
reflectiveThumbX = newThumbX;
updateRatioSliders();
}
if (movingSliderDiffusion) {
movingSliderDiffusion = false;
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
setDiffusionRatio(diffusion);
diffusionThumbX = newThumbX;
updateRatioSliders();
}
if (movingSliderAbsorption) {
movingSliderAbsorption = false;
var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale;
setAbsorptionRatio(absorption);
absorptionThumbX = newThumbX;
updateRatioSliders();
}
}
Controller.mouseMoveEvent.connect(mouseMoveEvent);
Controller.mousePressEvent.connect(mousePressEvent);
Controller.mouseReleaseEvent.connect(mouseReleaseEvent);

View file

@ -15,12 +15,12 @@ var AMPLITUDE = 45.0;
var cumulativeTime = 0.0;
print("# Joint list start");
var jointList = MyAvatar.getJointNames();
var jointMappings = "\n# Joint list start";
for (var i = 0; i < jointList.length; i++) {
print("jointIndex = " + jointList[i] + " = " + i);
jointMappings = jointMappings + "\njointIndex = " + jointList[i] + " = " + i;
}
print("# Joint list end");
print(jointMappings + "\n# Joint list end");
Script.update.connect(function(deltaTime) {
cumulativeTime += deltaTime;

49
examples/dancing_bot.js Normal file
View file

@ -0,0 +1,49 @@
//
// dancing_bot.js
// examples
//
// Created by Andrzej Kapolka on 4/16/14.
// Copyright 2014 High Fidelity, Inc.
//
// This is an example script that demonstrates an NPC avatar running an FBX animation loop.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
var animation = AnimationCache.getAnimation("http://www.fungibleinsight.com/faces/gangnam_style_2.fbx");
Avatar.skeletonModelURL = "http://www.fungibleinsight.com/faces/beta.fst";
Agent.isAvatar = true;
var jointMapping;
var frameIndex = 0.0;
var FRAME_RATE = 30.0; // frames per second
Script.update.connect(function(deltaTime) {
if (!jointMapping) {
var avatarJointNames = Avatar.jointNames;
var animationJointNames = animation.jointNames;
if (avatarJointNames === 0 || animationJointNames.length === 0) {
return;
}
jointMapping = new Array(avatarJointNames.length);
for (var i = 0; i < avatarJointNames.length; i++) {
jointMapping[i] = animationJointNames.indexOf(avatarJointNames[i]);
}
}
frameIndex += deltaTime * FRAME_RATE;
var frames = animation.frames;
var rotations = frames[Math.floor(frameIndex) % frames.length].rotations;
for (var j = 0; j < jointMapping.length; j++) {
var rotationIndex = jointMapping[j];
if (rotationIndex != -1) {
Avatar.setJointData(j, rotations[rotationIndex]);
}
}
});

View file

@ -64,19 +64,92 @@ colors[8] = { red: 31, green: 64, blue: 64 };
var numColors = 9;
var whichColor = -1; // Starting color is 'Copy' mode
// Create sounds for adding, deleting, recoloring voxels
var addSound1 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Voxels/voxel+create+2.raw");
var addSound2 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Voxels/voxel+create+4.raw");
var addSound3 = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Voxels/voxel+create+3.raw");
var deleteSound = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Voxels/voxel+delete+2.raw");
var changeColorSound = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Voxels/voxel+edit+2.raw");
var clickSound = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Switches+and+sliders/toggle+switch+-+medium.raw");
// Create sounds for for every script actions that require one
var audioOptions = new AudioInjectionOptions();
audioOptions.volume = 0.5;
audioOptions.volume = 1.0;
audioOptions.position = Vec3.sum(MyAvatar.position, { x: 0, y: 1, z: 0 } ); // start with audio slightly above the avatar
function SoundArray() {
this.audioOptions = audioOptions
this.sounds = new Array();
this.addSound = function (soundURL) {
this.sounds[this.sounds.length] = new Sound(soundURL);
}
this.play = function (index) {
if (0 <= index && index < this.sounds.length) {
Audio.playSound(this.sounds[index], this.audioOptions);
} else {
print("[ERROR] editVoxels.js:randSound.play() : Index " + index + " out of range.");
}
}
this.playRandom = function () {
if (this.sounds.length > 0) {
rand = Math.floor(Math.random() * this.sounds.length);
Audio.playSound(this.sounds[rand], this.audioOptions);
} else {
print("[ERROR] editVoxels.js:randSound.playRandom() : Array is empty.");
}
}
}
var addVoxelSound = new SoundArray();
addVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Add/VA+1.raw");
addVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Add/VA+2.raw");
addVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Add/VA+3.raw");
addVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Add/VA+4.raw");
addVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Add/VA+5.raw");
addVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Add/VA+6.raw");
var delVoxelSound = new SoundArray();
delVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Del/VD+A1.raw");
delVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Del/VD+A2.raw");
delVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Del/VD+A3.raw");
delVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Del/VD+B1.raw");
delVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Del/VD+B2.raw");
delVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Del/VD+B3.raw");
var resizeVoxelSound = new SoundArray();
resizeVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Size/V+Size+Minus.raw");
resizeVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Voxel+Size/V+Size+Plus.raw");
var voxelSizeMinus = 0;
var voxelSizePlus = 1;
var swatchesSound = new SoundArray();
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+1.raw");
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+2.raw");
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+3.raw");
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+4.raw");
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+5.raw");
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+6.raw");
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+7.raw");
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+8.raw");
swatchesSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Swatches/Swatch+9.raw");
var undoSound = new SoundArray();
undoSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Undo/Undo+1.raw");
undoSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Undo/Undo+2.raw");
undoSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Undo/Undo+3.raw");
var scriptInitSound = new SoundArray();
scriptInitSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Script+Init/Script+Init+A.raw");
scriptInitSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Script+Init/Script+Init+B.raw");
scriptInitSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Script+Init/Script+Init+C.raw");
scriptInitSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Script+Init/Script+Init+D.raw");
var modeSwitchSound = new SoundArray();
modeSwitchSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Mode+Switch/Mode+1.raw");
modeSwitchSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Mode+Switch/Mode+2.raw");
modeSwitchSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Mode+Switch/Mode+3.raw");
var initialVoxelSound = new SoundArray();
initialVoxelSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Initial+Voxel/Initial+V.raw");
var colorInheritSound = new SoundArray();
colorInheritSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Color+Inherit/Inherit+A.raw");
colorInheritSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Color+Inherit/Inherit+B.raw");
colorInheritSound.addSound("https://highfidelity-public.s3.amazonaws.com/sounds/Voxel+Editing/Color+Inherit/Inherit+C.raw");
var editToolsOn = true; // starts out off
// previewAsVoxel - by default, we will preview adds/deletes/recolors as just 4 lines on the intersecting face. But if you
@ -379,8 +452,17 @@ function calcThumbFromScale(scale) {
if (thumbStep > pointerVoxelScaleSteps) {
thumbStep = pointerVoxelScaleSteps;
}
var oldThumbX = thumbX;
thumbX = (thumbDeltaPerStep * (thumbStep - 1)) + minThumbX;
Overlays.editOverlay(thumb, { x: thumbX + sliderX } );
if (thumbX > oldThumbX) {
resizeVoxelSound.play(voxelSizePlus);
print("Plus");
} else if (thumbX < oldThumbX) {
resizeVoxelSound.play(voxelSizeMinus);
print("Minus");
}
}
function calcScaleFromThumb(newThumbX) {
@ -443,15 +525,6 @@ var recolorToolSelected = false;
var eyedropperToolSelected = false;
var pasteMode = false;
function playRandomAddSound(audioOptions) {
if (Math.random() < 0.33) {
Audio.playSound(addSound1, audioOptions);
} else if (Math.random() < 0.5) {
Audio.playSound(addSound2, audioOptions);
} else {
Audio.playSound(addSound3, audioOptions);
}
}
function calculateVoxelFromIntersection(intersection, operation) {
//print("calculateVoxelFromIntersection() operation="+operation);
@ -744,7 +817,7 @@ function trackKeyReleaseEvent(event) {
moveTools();
setAudioPosition(); // make sure we set the audio position before playing sounds
showPreviewGuides();
Audio.playSound(clickSound, audioOptions);
scriptInitSound.playRandom();
}
if (event.text == "ALT") {
@ -808,18 +881,21 @@ function mousePressEvent(event) {
Overlays.editOverlay(thumb, { imageURL: toolIconUrl + "voxel-size-slider-handle.svg", });
} else if (clickedOverlay == voxelTool) {
modeSwitchSound.play(0);
voxelToolSelected = true;
recolorToolSelected = false;
eyedropperToolSelected = false;
moveTools();
clickedOnSomething = true;
} else if (clickedOverlay == recolorTool) {
modeSwitchSound.play(1);
voxelToolSelected = false;
recolorToolSelected = true;
eyedropperToolSelected = false;
moveTools();
clickedOnSomething = true;
} else if (clickedOverlay == eyedropperTool) {
modeSwitchSound.play(2);
voxelToolSelected = false;
recolorToolSelected = false;
eyedropperToolSelected = true;
@ -846,6 +922,7 @@ function mousePressEvent(event) {
whichColor = s;
moveTools();
clickedOnSomething = true;
swatchesSound.play(whichColor);
break;
}
}
@ -888,7 +965,7 @@ function mousePressEvent(event) {
// Delete voxel
voxelDetails = calculateVoxelFromIntersection(intersection,"delete");
Voxels.eraseVoxel(voxelDetails.x, voxelDetails.y, voxelDetails.z, voxelDetails.s);
Audio.playSound(deleteSound, audioOptions);
delVoxelSound.playRandom();
Overlays.editOverlay(voxelPreview, { visible: false });
} else if (eyedropperToolSelected || trackAsEyedropper) {
if (whichColor != -1) {
@ -896,6 +973,7 @@ function mousePressEvent(event) {
colors[whichColor].green = intersection.voxel.green;
colors[whichColor].blue = intersection.voxel.blue;
moveTools();
swatchesSound.play(whichColor);
}
} else if (recolorToolSelected || trackAsRecolor) {
@ -903,10 +981,9 @@ function mousePressEvent(event) {
voxelDetails = calculateVoxelFromIntersection(intersection,"recolor");
// doing this erase then set will make sure we only recolor just the target voxel
Voxels.eraseVoxel(voxelDetails.x, voxelDetails.y, voxelDetails.z, voxelDetails.s);
Voxels.setVoxel(voxelDetails.x, voxelDetails.y, voxelDetails.z, voxelDetails.s,
colors[whichColor].red, colors[whichColor].green, colors[whichColor].blue);
Audio.playSound(changeColorSound, audioOptions);
swatchesSound.play(whichColor);
Overlays.editOverlay(voxelPreview, { visible: false });
} else if (voxelToolSelected) {
// Add voxel on face
@ -930,7 +1007,7 @@ function mousePressEvent(event) {
lastVoxelColor = { red: newColor.red, green: newColor.green, blue: newColor.blue };
lastVoxelScale = voxelDetails.s;
playRandomAddSound(audioOptions);
addVoxelSound.playRandom();
Overlays.editOverlay(voxelPreview, { visible: false });
dragStart = { x: event.x, y: event.y };
@ -946,12 +1023,12 @@ function keyPressEvent(event) {
if (event.text == "`") {
print("Color = Copy");
whichColor = -1;
Audio.playSound(clickSound, audioOptions);
colorInheritSound.playRandom();
moveTools();
} else if ((nVal > 0) && (nVal <= numColors)) {
whichColor = nVal - 1;
print("Color = " + (whichColor + 1));
Audio.playSound(clickSound, audioOptions);
swatchesSound.play(whichColor);
moveTools();
} else if (event.text == "0") {
// Create a brand new 1 meter voxel in front of your avatar
@ -969,8 +1046,12 @@ function keyPressEvent(event) {
Voxels.eraseVoxel(newVoxel.x, newVoxel.y, newVoxel.z, newVoxel.s);
Voxels.setVoxel(newVoxel.x, newVoxel.y, newVoxel.z, newVoxel.s, newVoxel.red, newVoxel.green, newVoxel.blue);
setAudioPosition();
playRandomAddSound(audioOptions);
initialVoxelSound.playRandom();
} else if (event.text == "z") {
undoSound.playRandom();
}
}
trackKeyPressEvent(event); // used by preview support

View file

@ -152,11 +152,10 @@ function particleCollisionWithVoxel(particle, voxel, collision) {
var position = particleProperties.position;
Particles.deleteParticle(particle);
// Make a hole in this voxel
Vec3.print("penetration", collision.penetration);
Vec3.print("contactPoint", collision.contactPoint);
Voxels.eraseVoxel(contactPoint.x, contactPoint.y, contactPoint.z, HOLE_SIZE);
Voxels.eraseVoxel(position.x, position.y, position.z, HOLE_SIZE);
audioOptions.position = Vec3.sum(Camera.getPosition(), Quat.getFront(Camera.getOrientation()));
//Vec3.print("voxel penetration", collision.penetration);
//Vec3.print("voxel contactPoint", collision.contactPoint);
Voxels.eraseVoxel(collision.contactPoint.x, collision.contactPoint.y, collision.contactPoint.z, HOLE_SIZE);
audioOptions.position = collision.contactPoint;
Audio.playSound(impactSound, audioOptions);
}
@ -171,9 +170,9 @@ function particleCollisionWithParticle(particle1, particle2, collision) {
// Record shot time
var endTime = new Date();
var msecs = endTime.valueOf() - shotTime.valueOf();
print("hit, msecs = " + msecs);
Vec3.print("penetration = ", collision.penetration);
Vec3.print("contactPoint = ", collision.contactPoint);
//print("hit, msecs = " + msecs);
//Vec3.print("penetration = ", collision.penetration);
//Vec3.print("contactPoint = ", collision.contactPoint);
Particles.deleteParticle(particle1);
Particles.deleteParticle(particle2);
// play the sound near the camera so the shooter can hear it

View file

@ -120,6 +120,7 @@ include(${MACRO_DIR}/LinkHifiLibrary.cmake)
link_hifi_library(shared ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(octree ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(voxels ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(fbx ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(metavoxels ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(networking ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(particles ${TARGET_NAME} "${ROOT_DIR}")

View file

@ -1659,16 +1659,12 @@ void Application::init() {
_particleCollisionSystem.init(&_particleEditSender, _particles.getTree(), _voxels.getTree(), &_audio, &_avatarManager);
// connect the _particleCollisionSystem to our script engine's ParticleScriptingInterface
connect(&_particleCollisionSystem,
SIGNAL(particleCollisionWithVoxel(const ParticleID&, const VoxelDetail&, const CollisionInfo&)),
ScriptEngine::getParticlesScriptingInterface(),
SIGNAL(particleCollisionWithVoxels(const ParticleID&, const VoxelDetail&, const CollisionInfo&)));
// connect the _particleCollisionSystem to our script engine's ParticlesScriptingInterface
connect(&_particleCollisionSystem, &ParticleCollisionSystem::particleCollisionWithVoxel,
ScriptEngine::getParticlesScriptingInterface(), &ParticlesScriptingInterface::particleCollisionWithVoxel);
connect(&_particleCollisionSystem,
SIGNAL(particleCollisionWithParticle(const ParticleID&, const ParticleID&, const CollisionInfo&)),
ScriptEngine::getParticlesScriptingInterface(),
SIGNAL(particleCollisionWithParticle(const ParticleID&, const ParticleID&, const CollisionInfo&)));
connect(&_particleCollisionSystem, &ParticleCollisionSystem::particleCollisionWithParticle,
ScriptEngine::getParticlesScriptingInterface(), &ParticlesScriptingInterface::particleCollisionWithParticle);
_audio.init(_glWidget);
@ -1678,7 +1674,16 @@ void Application::init() {
connect(_rearMirrorTools, SIGNAL(restoreView()), SLOT(restoreMirrorView()));
connect(_rearMirrorTools, SIGNAL(shrinkView()), SLOT(shrinkMirrorView()));
connect(_rearMirrorTools, SIGNAL(resetView()), SLOT(resetSensors()));
connect(_myAvatar, SIGNAL(transformChanged()), this, SLOT(bumpSettings()));
// set up our audio reflector
_audioReflector.setMyAvatar(getAvatar());
_audioReflector.setVoxels(_voxels.getTree());
_audioReflector.setAudio(getAudio());
connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection);
connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection);
// save settings when avatar changes
connect(_myAvatar, &MyAvatar::transformChanged, this, &Application::bumpSettings);
}
void Application::closeMirrorView() {
@ -2450,6 +2455,9 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
// disable specular lighting for ground and voxels
glMaterialfv(GL_FRONT, GL_SPECULAR, NO_SPECULAR_COLOR);
// draw the audio reflector overlay
_audioReflector.render();
// Draw voxels
if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) {
@ -3284,6 +3292,7 @@ void Application::stopAllScripts() {
}
_scriptEnginesHash.clear();
_runningScriptsWidget->setRunningScripts(getRunningScripts());
bumpSettings();
}
void Application::stopScript(const QString &scriptName)
@ -3292,6 +3301,7 @@ void Application::stopScript(const QString &scriptName)
qDebug() << "stopping script..." << scriptName;
_scriptEnginesHash.remove(scriptName);
_runningScriptsWidget->setRunningScripts(getRunningScripts());
bumpSettings();
}
void Application::reloadAllScripts() {
@ -3387,6 +3397,8 @@ void Application::loadScript(const QString& scriptName) {
scriptEngine->registerGlobalObject("Menu", MenuScriptingInterface::getInstance());
scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance());
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
scriptEngine->registerGlobalObject("AnimationCache", &_animationCache);
scriptEngine->registerGlobalObject("AudioReflector", &_audioReflector);
QThread* workerThread = new QThread(this);
@ -3407,6 +3419,7 @@ void Application::loadScript(const QString& scriptName) {
// restore the main window's active state
_window->activateWindow();
bumpSettings();
}
void Application::loadDialog() {

View file

@ -38,6 +38,7 @@
#include <VoxelEditPacketSender.h>
#include "Audio.h"
#include "AudioReflector.h"
#include "BuckyBalls.h"
#include "Camera.h"
#include "DatagramProcessor.h"
@ -162,6 +163,7 @@ public:
bool isThrottleRendering() const { return _glWidget->isThrottleRendering(); }
MyAvatar* getAvatar() { return _myAvatar; }
Audio* getAudio() { return &_audio; }
const AudioReflector* getAudioReflector() const { return &_audioReflector; }
Camera* getCamera() { return &_myCamera; }
ViewFrustum* getViewFrustum() { return &_viewFrustum; }
ViewFrustum* getShadowViewFrustum() { return &_shadowViewFrustum; }
@ -468,6 +470,7 @@ private:
QSet<int> _keysPressed;
GeometryCache _geometryCache;
AnimationCache _animationCache;
TextureCache _textureCache;
GlowEffect _glowEffect;
@ -513,7 +516,7 @@ private:
TouchEvent _lastTouchEvent;
Overlays _overlays;
AudioReflector _audioReflector;
RunningScriptsWidget* _runningScriptsWidget;
QHash<QString, ScriptEngine*> _scriptEnginesHash;
};

View file

@ -37,6 +37,7 @@
#include <SharedUtil.h>
#include <StdDev.h>
#include <UUID.h>
#include <glm/glm.hpp>
#include "Application.h"
#include "Audio.h"
@ -87,7 +88,11 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
_collisionSoundDuration(0.0f),
_proceduralEffectSample(0),
_numFramesDisplayStarve(0),
_muted(false)
_muted(false),
_processSpatialAudio(false),
_spatialAudioStart(0),
_spatialAudioFinish(0),
_spatialAudioRingBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL, true) // random access mode
{
// clear the array of locally injected samples
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
@ -398,7 +403,7 @@ void Audio::handleAudioInput() {
unsigned int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio;
QByteArray inputByteArray = _inputDevice->readAll();
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) {
// if this person wants local loopback add that to the locally injected audio
@ -406,7 +411,7 @@ void Audio::handleAudioInput() {
// we didn't have the loopback output device going so set that up now
_loopbackOutputDevice = _loopbackAudioOutput->start();
}
if (_inputFormat == _outputFormat) {
if (_loopbackOutputDevice) {
_loopbackOutputDevice->write(inputByteArray);
@ -524,7 +529,7 @@ void Audio::handleAudioInput() {
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
float smallestSample = FLT_MAX;
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i+= NOISE_GATE_FRAMES_TO_AVERAGE) {
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
float thisAverage = 0.0f;
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
thisAverage += _noiseSampleFrames[j];
@ -559,6 +564,13 @@ void Audio::handleAudioInput() {
_lastInputLoudness = 0;
}
// at this point we have clean monoAudioSamples, which match our target output...
// this is what we should send to our interested listeners
if (_processSpatialAudio && !_muted && _audioOutput) {
QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat);
}
if (_proceduralAudioOutput) {
processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
}
@ -622,7 +634,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
_totalPacketsReceived++;
double timeDiff = diffclock(&_lastReceiveTime, &currentReceiveTime);
// Discard first few received packets for computing jitter (often they pile up on start)
if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) {
_stdev.addValue(timeDiff);
@ -650,6 +662,69 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
_lastReceiveTime = currentReceiveTime;
}
// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo
// data we know that we will have 2x samples for each stereo time sample at the format's sample rate
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) {
// Calculate the number of remaining samples available. The source spatial audio buffer will get
// clipped if there are insufficient samples available in the accumulation buffer.
unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable();
// Locate where in the accumulation buffer the new samples need to go
if (sampleTime >= _spatialAudioFinish) {
if (_spatialAudioStart == _spatialAudioFinish) {
// Nothing in the spatial audio ring buffer yet, Just do a straight copy, clipping if necessary
unsigned int sampleCount = (remaining < numSamples) ? remaining : numSamples;
if (sampleCount) {
_spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCount);
}
_spatialAudioFinish = _spatialAudioStart + sampleCount / _desiredOutputFormat.channelCount();
} else {
// Spatial audio ring buffer already has data, but there is no overlap with the new sample.
// Compute the appropriate time delay and pad with silence until the new start time.
unsigned int delay = sampleTime - _spatialAudioFinish;
unsigned int delayCount = delay * _desiredOutputFormat.channelCount();
unsigned int silentCount = (remaining < delayCount) ? remaining : delayCount;
if (silentCount) {
_spatialAudioRingBuffer.addSilentFrame(silentCount);
}
// Recalculate the number of remaining samples
remaining -= silentCount;
unsigned int sampleCount = (remaining < numSamples) ? remaining : numSamples;
// Copy the new spatial audio to the accumulation ring buffer
if (sampleCount) {
_spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCount);
}
_spatialAudioFinish += (sampleCount + silentCount) / _desiredOutputFormat.channelCount();
}
} else {
// There is overlap between the spatial audio buffer and the new sample, mix the overlap
// Calculate the offset from the buffer's current read position, which should be located at _spatialAudioStart
unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount();
unsigned int mixedSamplesCount = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount();
mixedSamplesCount = (mixedSamplesCount < numSamples) ? mixedSamplesCount : numSamples;
const int16_t* spatial = reinterpret_cast<const int16_t*>(spatialAudio.data());
for (unsigned int i = 0; i < mixedSamplesCount; i++) {
int existingSample = _spatialAudioRingBuffer[i + offset];
int newSample = spatial[i];
int sumOfSamples = existingSample + newSample;
_spatialAudioRingBuffer[i + offset] = static_cast<int16_t>(glm::clamp<int>(sumOfSamples,
std::numeric_limits<short>::min(), std::numeric_limits<short>::max()));
}
// Copy the remaining unoverlapped spatial audio to the spatial audio buffer, if any
unsigned int nonMixedSampleCount = numSamples - mixedSamplesCount;
nonMixedSampleCount = (remaining < nonMixedSampleCount) ? remaining : nonMixedSampleCount;
if (nonMixedSampleCount) {
_spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data() + mixedSamplesCount, nonMixedSampleCount);
// Extend the finish time by the amount of unoverlapped samples
_spatialAudioFinish += nonMixedSampleCount / _desiredOutputFormat.channelCount();
}
}
}
bool Audio::mousePressEvent(int x, int y) {
if (_iconBounds.contains(x, y)) {
toggleMute();
@ -669,7 +744,7 @@ void Audio::toggleAudioNoiseReduction() {
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
_ringBuffer.parseData(audioByteArray);
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
@ -700,13 +775,32 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
//qDebug() << "pushing " << numNetworkOutputSamples;
_ringBuffer.setIsStarved(false);
// copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards
int16_t* ringBufferSamples= new int16_t[numNetworkOutputSamples];
_ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
// add the next numNetworkOutputSamples from each QByteArray
// in our _localInjectionByteArrays QVector to the localInjectedSamples
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples];
if (_processSpatialAudio) {
unsigned int sampleTime = _spatialAudioStart;
QByteArray buffer;
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
_ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
// Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
}
// Send audio off for spatial processing
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
// copy the samples we'll resample from the spatial audio ring buffer - this also
// pushes the read pointer of the spatial audio ring buffer forwards
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
// Advance the start point for the next packet of audio to arrive
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
} else {
// copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards
_ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
}
// copy the packet from the RB to the output
linearResampling(ringBufferSamples,
@ -756,6 +850,15 @@ void Audio::toggleToneInjection() {
_toneInjectionEnabled = !_toneInjectionEnabled;
}
void Audio::toggleAudioSpatialProcessing() {
_processSpatialAudio = !_processSpatialAudio;
if (_processSpatialAudio) {
_spatialAudioStart = 0;
_spatialAudioFinish = 0;
_spatialAudioRingBuffer.reset();
}
}
// Take a pointer to the acquired microphone input samples and add procedural sounds
void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
float sample;
@ -996,6 +1099,12 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
_proceduralAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
gettimeofday(&_lastReceiveTime, NULL);
// setup spatial audio ringbuffer
int numFrameSamples = _outputFormat.sampleRate() * _desiredOutputFormat.channelCount();
_spatialAudioRingBuffer.resizeForFrameSize(numFrameSamples);
_spatialAudioStart = _spatialAudioFinish = 0;
supportedFormat = true;
}
}

View file

@ -54,8 +54,6 @@ public:
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; }
int getJitterBufferSamples() { return _jitterBufferSamples; }
void lowPassFilter(int16_t* inputBuffer);
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
@ -73,15 +71,19 @@ public:
int getNetworkSampleRate() { return SAMPLE_RATE; }
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
bool getProcessSpatialAudio() const { return _processSpatialAudio; }
public slots:
void start();
void stop();
void addReceivedAudioToBuffer(const QByteArray& audioByteArray);
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
void handleAudioInput();
void reset();
void toggleMute();
void toggleAudioNoiseReduction();
void toggleToneInjection();
void toggleAudioSpatialProcessing();
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
@ -97,6 +99,8 @@ public slots:
signals:
bool muteToggled();
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
private:
@ -162,9 +166,15 @@ private:
GLuint _boxTextureId;
QRect _iconBounds;
// Audio callback in class context.
/// Audio callback in class context.
inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight);
bool _processSpatialAudio; /// Process received audio by spatial audio hooks
unsigned int _spatialAudioStart; /// Start of spatial audio interval (in sample rate time base)
unsigned int _spatialAudioFinish; /// End of spatial audio interval (in sample rate time base)
AudioRingBuffer _spatialAudioRingBuffer; /// Spatially processed audio
// Process procedural audio by
// 1. Echo to the local procedural output device
// 2. Mix with the audio input

View file

@ -0,0 +1,762 @@
//
// AudioReflector.cpp
// interface
//
// Created by Brad Hefta-Gaub on 4/2/2014
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
//
#include <QMutexLocker>
#include "AudioReflector.h"
#include "Menu.h"
const float DEFAULT_PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections
const float DEFAULT_MS_DELAY_PER_METER = 3.0f;
const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f;
const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f;
const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is this long
const int DEFAULT_DIFFUSION_FANOUT = 5;
const int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10;
const float DEFAULT_LOCAL_ATTENUATION_FACTOR = 0.125;
const float DEFAULT_COMB_FILTER_WINDOW = 0.05f; //ms delay differential to avoid
const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point
const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed
const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused
AudioReflector::AudioReflector(QObject* parent) :
QObject(parent),
_preDelay(DEFAULT_PRE_DELAY),
_soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
_distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR),
_localAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
_combFilterWindow(DEFAULT_COMB_FILTER_WINDOW),
_diffusionFanout(DEFAULT_DIFFUSION_FANOUT),
_absorptionRatio(DEFAULT_ABSORPTION_RATIO),
_diffusionRatio(DEFAULT_DIFFUSION_RATIO),
_withDiffusion(false),
_lastPreDelay(DEFAULT_PRE_DELAY),
_lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
_lastDistanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR),
_lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
_lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT),
_lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO),
_lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO)
{
_reflections = 0;
_diffusionPathCount = 0;
_averageAttenuation = 0.0f;
_maxAttenuation = 0.0f;
_minAttenuation = 0.0f;
_averageDelay = 0;
_maxDelay = 0;
_minDelay = 0;
}
bool AudioReflector::haveAttributesChanged() {
bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
bool attributesChange = (_withDiffusion != withDiffusion
|| _lastPreDelay != _preDelay
|| _lastSoundMsPerMeter != _soundMsPerMeter
|| _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor
|| _lastDiffusionFanout != _diffusionFanout
|| _lastAbsorptionRatio != _absorptionRatio
|| _lastDiffusionRatio != _diffusionRatio);
if (attributesChange) {
_withDiffusion = withDiffusion;
_lastPreDelay = _preDelay;
_lastSoundMsPerMeter = _soundMsPerMeter;
_lastDistanceAttenuationScalingFactor = _distanceAttenuationScalingFactor;
_lastDiffusionFanout = _diffusionFanout;
_lastAbsorptionRatio = _absorptionRatio;
_lastDiffusionRatio = _diffusionRatio;
}
return attributesChange;
}
void AudioReflector::render() {
// if we're not set up yet, or we're not processing spatial audio, then exit early
if (!_myAvatar || !_audio->getProcessSpatialAudio()) {
return;
}
// use this oportunity to calculate our reflections
calculateAllReflections();
// only render if we've been asked to do so
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)) {
drawRays();
}
}
// delay = 1ms per foot
// = 3ms per meter
float AudioReflector::getDelayFromDistance(float distance) {
float delay = (_soundMsPerMeter * distance);
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) {
delay += _preDelay;
}
return delay;
}
// attenuation = from the Audio Mixer
float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
const float DISTANCE_SCALE = 2.5f;
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
const float DISTANCE_LOG_BASE = 2.5f;
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
float distanceSquareToSource = distance * distance;
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
DISTANCE_SCALE_LOG +
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
return distanceCoefficient;
}
glm::vec3 AudioReflector::getFaceNormal(BoxFace face) {
bool wantSlightRandomness = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces);
glm::vec3 faceNormal;
const float MIN_RANDOM_LENGTH = 0.99f;
const float MAX_RANDOM_LENGTH = 1.0f;
const float NON_RANDOM_LENGTH = 1.0f;
float normalLength = wantSlightRandomness ? randFloatInRange(MIN_RANDOM_LENGTH, MAX_RANDOM_LENGTH) : NON_RANDOM_LENGTH;
float remainder = (1.0f - normalLength)/2.0f;
float remainderSignA = randomSign();
float remainderSignB = randomSign();
if (face == MIN_X_FACE) {
faceNormal = glm::vec3(-normalLength, remainder * remainderSignA, remainder * remainderSignB);
} else if (face == MAX_X_FACE) {
faceNormal = glm::vec3(normalLength, remainder * remainderSignA, remainder * remainderSignB);
} else if (face == MIN_Y_FACE) {
faceNormal = glm::vec3(remainder * remainderSignA, -normalLength, remainder * remainderSignB);
} else if (face == MAX_Y_FACE) {
faceNormal = glm::vec3(remainder * remainderSignA, normalLength, remainder * remainderSignB);
} else if (face == MIN_Z_FACE) {
faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -normalLength);
} else if (face == MAX_Z_FACE) {
faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, normalLength);
}
return faceNormal;
}
// set up our buffers for our attenuated and delayed samples
const int NUMBER_OF_CHANNELS = 2;
void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint,
const QByteArray& samples, unsigned int sampleTime, int sampleRate) {
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource);
glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() :
_myAvatar->getHead()->getPosition();
glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() :
_myAvatar->getHead()->getPosition();
int totalNumberOfSamples = samples.size() / sizeof(int16_t);
int totalNumberOfStereoSamples = samples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS);
const int16_t* originalSamplesData = (const int16_t*)samples.constData();
QByteArray attenuatedLeftSamples;
QByteArray attenuatedRightSamples;
attenuatedLeftSamples.resize(samples.size());
attenuatedRightSamples.resize(samples.size());
int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data();
int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data();
// calculate the distance to the ears
float rightEarDistance = glm::distance(audiblePoint.location, rightEarPosition);
float leftEarDistance = glm::distance(audiblePoint.location, leftEarPosition);
float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay;
float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay;
float averageEarDelayMsecs = (leftEarDelayMsecs + rightEarDelayMsecs) / 2.0f;
bool safeToInject = true; // assume the best
// check to see if this new injection point would be within the comb filter
// suppression window for any of the existing known delays
QMap<float, float>& knownDelays = (source == INBOUND_AUDIO) ? _inboundAudioDelays : _localAudioDelays;
QMap<float, float>::const_iterator lowerBound = knownDelays.lowerBound(averageEarDelayMsecs - _combFilterWindow);
if (lowerBound != knownDelays.end()) {
float closestFound = lowerBound.value();
float deltaToClosest = (averageEarDelayMsecs - closestFound);
if (deltaToClosest > -_combFilterWindow && deltaToClosest < _combFilterWindow) {
safeToInject = false;
}
}
// keep track of any of our suppressed echoes so we can report them in our statistics
if (!safeToInject) {
QVector<float>& suppressedEchoes = (source == INBOUND_AUDIO) ? _inboundEchoesSuppressed : _localEchoesSuppressed;
suppressedEchoes << averageEarDelayMsecs;
} else {
knownDelays[averageEarDelayMsecs] = averageEarDelayMsecs;
_totalDelay += rightEarDelayMsecs + leftEarDelayMsecs;
_delayCount += 2;
_maxDelay = std::max(_maxDelay,rightEarDelayMsecs);
_maxDelay = std::max(_maxDelay,leftEarDelayMsecs);
_minDelay = std::min(_minDelay,rightEarDelayMsecs);
_minDelay = std::min(_minDelay,leftEarDelayMsecs);
int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
float rightEarAttenuation = audiblePoint.attenuation *
getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance);
float leftEarAttenuation = audiblePoint.attenuation *
getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance);
_totalAttenuation += rightEarAttenuation + leftEarAttenuation;
_attenuationCount += 2;
_maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation);
_maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation);
_minAttenuation = std::min(_minAttenuation,rightEarAttenuation);
_minAttenuation = std::min(_minAttenuation,leftEarAttenuation);
// run through the samples, and attenuate them
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
int16_t rightSample = leftSample;
if (wantStereo) {
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
}
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
}
// now inject the attenuated array with the appropriate delay
unsigned int sampleTimeLeft = sampleTime + leftEarDelay;
unsigned int sampleTimeRight = sampleTime + rightEarDelay;
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
}
}
void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) {
const int NUM_CHANNELS_INPUT = 1;
const int NUM_CHANNELS_OUTPUT = 2;
const int EXPECTED_SAMPLE_RATE = 24000;
if (format.channelCount() == NUM_CHANNELS_INPUT && format.sampleRate() == EXPECTED_SAMPLE_RATE) {
QAudioFormat outputFormat = format;
outputFormat.setChannelCount(NUM_CHANNELS_OUTPUT);
QByteArray stereoInputData(samples.size() * NUM_CHANNELS_OUTPUT, 0);
int numberOfSamples = (samples.size() / sizeof(int16_t));
int16_t* monoSamples = (int16_t*)samples.data();
int16_t* stereoSamples = (int16_t*)stereoInputData.data();
for (int i = 0; i < numberOfSamples; i++) {
stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor;
stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor;
}
_localAudioDelays.clear();
_localEchoesSuppressed.clear();
echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat);
}
}
}
void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
_inboundAudioDelays.clear();
_inboundEchoesSuppressed.clear();
echoAudio(INBOUND_AUDIO, sampleTime, samples, format);
}
void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
_maxDelay = 0;
_maxAttenuation = 0.0f;
_minDelay = std::numeric_limits<int>::max();
_minAttenuation = std::numeric_limits<float>::max();
_totalDelay = 0.0f;
_delayCount = 0;
_totalAttenuation = 0.0f;
_attenuationCount = 0;
QMutexLocker locker(&_mutex);
// depending on if we're processing local or external audio, pick the correct points vector
QVector<AudiblePoint>& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
foreach(const AudiblePoint& audiblePoint, audiblePoints) {
injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate());
}
_averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount;
_averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount;
if (_reflections == 0) {
_minDelay = 0.0f;
_minAttenuation = 0.0f;
}
}
void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) {
glDisable(GL_LIGHTING);
glLineWidth(2.0);
// Draw the vector itself
glBegin(GL_LINES);
glColor3f(color.x,color.y,color.z);
glVertex3f(start.x, start.y, start.z);
glVertex3f(end.x, end.y, end.z);
glEnd();
glEnable(GL_LIGHTING);
}
AudioPath::AudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& direction,
float attenuation, float delay, float distance,bool isDiffusion, int bounceCount) :
source(source),
isDiffusion(isDiffusion),
startPoint(origin),
startDirection(direction),
startDelay(delay),
startAttenuation(attenuation),
lastPoint(origin),
lastDirection(direction),
lastDistance(distance),
lastDelay(delay),
lastAttenuation(attenuation),
bounceCount(bounceCount),
finalized(false),
reflections()
{
}
void AudioReflector::addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection,
float initialAttenuation, float initialDelay, float initialDistance, bool isDiffusion) {
AudioPath* path = new AudioPath(source, origin, initialDirection, initialAttenuation, initialDelay,
initialDistance, isDiffusion, 0);
QVector<AudioPath*>& audioPaths = source == INBOUND_AUDIO ? _inboundAudioPaths : _localAudioPaths;
audioPaths.push_back(path);
}
void AudioReflector::calculateAllReflections() {
// only recalculate when we've moved, or if the attributes have changed
// TODO: what about case where new voxels are added in front of us???
bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation();
glm::vec3 origin = _myAvatar->getHead()->getPosition();
glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition();
bool shouldRecalc = _reflections == 0
|| !isSimilarPosition(origin, _origin)
|| !isSimilarOrientation(orientation, _orientation)
|| !isSimilarPosition(listenerPosition, _listenerPosition)
|| haveAttributesChanged();
if (shouldRecalc) {
QMutexLocker locker(&_mutex);
quint64 start = usecTimestampNow();
_origin = origin;
_orientation = orientation;
_listenerPosition = listenerPosition;
analyzePaths(); // actually does the work
quint64 end = usecTimestampNow();
const bool wantDebugging = false;
if (wantDebugging) {
qDebug() << "newCalculateAllReflections() elapsed=" << (end - start);
}
}
}
void AudioReflector::drawRays() {
const glm::vec3 RED(1,0,0);
const glm::vec3 GREEN(0,1,0);
const glm::vec3 BLUE(0,0,1);
const glm::vec3 CYAN(0,1,1);
int diffusionNumber = 0;
QMutexLocker locker(&_mutex);
// draw the paths for inbound audio
foreach(AudioPath* const& path, _inboundAudioPaths) {
// if this is an original reflection, draw it in RED
if (path->isDiffusion) {
diffusionNumber++;
drawPath(path, GREEN);
} else {
drawPath(path, RED);
}
}
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) {
// draw the paths for local audio
foreach(AudioPath* const& path, _localAudioPaths) {
// if this is an original reflection, draw it in RED
if (path->isDiffusion) {
diffusionNumber++;
drawPath(path, CYAN);
} else {
drawPath(path, BLUE);
}
}
}
}
void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) {
glm::vec3 start = path->startPoint;
glm::vec3 color = originalColor;
const float COLOR_ADJUST_PER_BOUNCE = 0.75f;
foreach (glm::vec3 end, path->reflections) {
drawVector(start, end, color);
start = end;
color = color * COLOR_ADJUST_PER_BOUNCE;
}
}
void AudioReflector::clearPaths() {
// clear our inbound audio paths
foreach(AudioPath* const& path, _inboundAudioPaths) {
delete path;
}
_inboundAudioPaths.clear();
_inboundAudiblePoints.clear(); // clear our inbound audible points
// clear our local audio paths
foreach(AudioPath* const& path, _localAudioPaths) {
delete path;
}
_localAudioPaths.clear();
_localAudiblePoints.clear(); // clear our local audible points
}
// Here's how this works: we have an array of AudioPaths, we loop on all of our currently calculating audio
// paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it
// is considered finalized.
// If the ray hits a surface, then, based on the characteristics of that surface, it will calculate the new
// attenuation, path length, and delay for the primary path. For surfaces that have diffusion, it will also create
// fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation
// of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop.
void AudioReflector::analyzePaths() {
clearPaths();
// add our initial paths
glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT);
glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP);
glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT);
glm::vec3 left = -right;
glm::vec3 down = -up;
glm::vec3 back = -front;
glm::vec3 frontRightUp = glm::normalize(front + right + up);
glm::vec3 frontLeftUp = glm::normalize(front + left + up);
glm::vec3 backRightUp = glm::normalize(back + right + up);
glm::vec3 backLeftUp = glm::normalize(back + left + up);
glm::vec3 frontRightDown = glm::normalize(front + right + down);
glm::vec3 frontLeftDown = glm::normalize(front + left + down);
glm::vec3 backRightDown = glm::normalize(back + right + down);
glm::vec3 backLeftDown = glm::normalize(back + left + down);
float initialAttenuation = 1.0f;
float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? _preDelay : 0.0f;
// NOTE: we're still calculating our initial paths based on the listeners position. But the analysis code has been
// updated to support individual sound sources (which is how we support diffusion), we can use this new paradigm to
// add support for individual sound sources, and more directional sound sources
addAudioPath(INBOUND_AUDIO, _origin, front, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, right, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, up, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, down, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, back, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, left, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, backRightUp, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, backLeftUp, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, backRightDown, initialAttenuation, preDelay);
addAudioPath(INBOUND_AUDIO, _origin, backLeftDown, initialAttenuation, preDelay);
// the original paths for the local audio are directional to the front of the origin
addAudioPath(LOCAL_AUDIO, _origin, front, initialAttenuation, preDelay);
addAudioPath(LOCAL_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay);
addAudioPath(LOCAL_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay);
addAudioPath(LOCAL_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay);
addAudioPath(LOCAL_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay);
// loop through all our audio paths and keep analyzing them until they complete
int steps = 0;
int acitvePaths = _inboundAudioPaths.size() + _localAudioPaths.size(); // when we start, all paths are active
while(acitvePaths > 0) {
acitvePaths = analyzePathsSingleStep();
steps++;
}
_reflections = _inboundAudiblePoints.size() + _localAudiblePoints.size();
_diffusionPathCount = countDiffusionPaths();
}
int AudioReflector::countDiffusionPaths() {
int diffusionCount = 0;
foreach(AudioPath* const& path, _inboundAudioPaths) {
if (path->isDiffusion) {
diffusionCount++;
}
}
foreach(AudioPath* const& path, _localAudioPaths) {
if (path->isDiffusion) {
diffusionCount++;
}
}
return diffusionCount;
}
int AudioReflector::analyzePathsSingleStep() {
// iterate all the active sound paths, calculate one step per active path
int activePaths = 0;
QVector<AudioPath*>* pathsLists[] = { &_inboundAudioPaths, &_localAudioPaths };
for(unsigned int i = 0; i < sizeof(pathsLists) / sizeof(pathsLists[0]); i++) {
QVector<AudioPath*>& pathList = *pathsLists[i];
foreach(AudioPath* const& path, pathList) {
glm::vec3 start = path->lastPoint;
glm::vec3 direction = path->lastDirection;
OctreeElement* elementHit; // output from findRayIntersection
float distance; // output from findRayIntersection
BoxFace face; // output from findRayIntersection
if (!path->finalized) {
activePaths++;
if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) {
path->finalized = true;
} else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) {
// TODO: we need to decide how we want to handle locking on the ray intersection, if we force lock,
// we get an accurate picture, but it could prevent rendering of the voxels. If we trylock (default),
// we might not get ray intersections where they may exist, but we can't really detect that case...
// add last parameter of Octree::Lock to force locking
handlePathPoint(path, distance, elementHit, face);
} else {
// If we didn't intersect, but this was a diffusion ray, then we will go ahead and cast a short ray out
// from our last known point, in the last known direction, and leave that sound source hanging there
if (path->isDiffusion) {
const float MINIMUM_RANDOM_DISTANCE = 0.25f;
const float MAXIMUM_RANDOM_DISTANCE = 0.5f;
float distance = randFloatInRange(MINIMUM_RANDOM_DISTANCE, MAXIMUM_RANDOM_DISTANCE);
handlePathPoint(path, distance, NULL, UNKNOWN_FACE);
} else {
path->finalized = true; // if it doesn't intersect, then it is finished
}
}
}
}
}
return activePaths;
}
void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face) {
glm::vec3 start = path->lastPoint;
glm::vec3 direction = path->lastDirection;
glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT));
float currentReflectiveAttenuation = path->lastAttenuation; // only the reflective components
float currentDelay = path->lastDelay; // start with our delay so far
float pathDistance = path->lastDistance;
pathDistance += glm::distance(start, end);
float toListenerDistance = glm::distance(end, _listenerPosition);
// adjust our current delay by just the delay from the most recent ray
currentDelay += getDelayFromDistance(distance);
// now we know the current attenuation for the "perfect" reflection case, but we now incorporate
// our surface materials to determine how much of this ray is absorbed, reflected, and diffused
SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit);
float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio;
float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio;
bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
int fanout = wantDiffusions ? _diffusionFanout : 0;
float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / (float)fanout;
// total delay includes the bounce back to listener
float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance);
float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance + pathDistance);
// if our resulting partial diffusion attenuation, is still above our minimum attenuation
// then we add new paths for each diffusion point
if ((partialDiffusionAttenuation * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT
&& totalDelay < MAXIMUM_DELAY_MS) {
// diffusions fan out from random places on the semisphere of the collision point
for(int i = 0; i < fanout; i++) {
glm::vec3 diffusion;
// We're creating a random normal here. But we want it to be relatively dramatic compared to how we handle
// our slightly random surface normals.
const float MINIMUM_RANDOM_LENGTH = 0.5f;
const float MAXIMUM_RANDOM_LENGTH = 1.0f;
float randomness = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
float remainder = (1.0f - randomness)/2.0f;
float remainderSignA = randomSign();
float remainderSignB = randomSign();
if (face == MIN_X_FACE) {
diffusion = glm::vec3(-randomness, remainder * remainderSignA, remainder * remainderSignB);
} else if (face == MAX_X_FACE) {
diffusion = glm::vec3(randomness, remainder * remainderSignA, remainder * remainderSignB);
} else if (face == MIN_Y_FACE) {
diffusion = glm::vec3(remainder * remainderSignA, -randomness, remainder * remainderSignB);
} else if (face == MAX_Y_FACE) {
diffusion = glm::vec3(remainder * remainderSignA, randomness, remainder * remainderSignB);
} else if (face == MIN_Z_FACE) {
diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -randomness);
} else if (face == MAX_Z_FACE) {
diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, randomness);
} else if (face == UNKNOWN_FACE) {
float randomnessX = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
float randomnessY = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
float randomnessZ = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
diffusion = glm::vec3(direction.x * randomnessX, direction.y * randomnessY, direction.z * randomnessZ);
}
diffusion = glm::normalize(diffusion);
// add new audio path for these diffusions, the new path's source is the same as the original source
addAudioPath(path->source, end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance, true);
}
} else {
const bool wantDebugging = false;
if (wantDebugging) {
if ((partialDiffusionAttenuation * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) {
qDebug() << "too quiet to diffuse";
qDebug() << " partialDiffusionAttenuation=" << partialDiffusionAttenuation;
qDebug() << " toListenerAttenuation=" << toListenerAttenuation;
qDebug() << " result=" << (partialDiffusionAttenuation * toListenerAttenuation);
qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT;
}
if (totalDelay > MAXIMUM_DELAY_MS) {
qDebug() << "too delayed to diffuse";
qDebug() << " totalDelay=" << totalDelay;
qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS;
}
}
}
// if our reflective attenuation is above our minimum, then add our reflection point and
// allow our path to continue
if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT
&& totalDelay < MAXIMUM_DELAY_MS) {
// add this location, as the reflective attenuation as well as the total diffusion attenuation
// NOTE: we add the delay to the audible point, not back to the listener. The additional delay
// and attenuation to the listener is recalculated at the point where we actually inject the
// audio so that it can be adjusted to ear position
AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance};
QVector<AudiblePoint>& audiblePoints = path->source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
audiblePoints.push_back(point);
// add this location to the path points, so we can visualize it
path->reflections.push_back(end);
// now, if our reflective attenuation is over our minimum then keep going...
if (reflectiveAttenuation * toListenerAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) {
glm::vec3 faceNormal = getFaceNormal(face);
path->lastDirection = glm::normalize(glm::reflect(direction,faceNormal));
path->lastPoint = end;
path->lastAttenuation = reflectiveAttenuation;
path->lastDelay = currentDelay;
path->lastDistance = pathDistance;
path->bounceCount++;
} else {
path->finalized = true; // if we're too quiet, then we're done
}
} else {
const bool wantDebugging = false;
if (wantDebugging) {
if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) {
qDebug() << "too quiet to add audible point";
qDebug() << " reflectiveAttenuation + totalDiffusionAttenuation=" << (reflectiveAttenuation + totalDiffusionAttenuation);
qDebug() << " toListenerAttenuation=" << toListenerAttenuation;
qDebug() << " result=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation);
qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT;
}
if (totalDelay > MAXIMUM_DELAY_MS) {
qDebug() << "too delayed to add audible point";
qDebug() << " totalDelay=" << totalDelay;
qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS;
}
}
path->finalized = true; // if we're too quiet, then we're done
}
}
// TODO: eventually we will add support for different surface characteristics based on the element
// that is hit, which is why we pass in the elementHit to this helper function. But for now, all
// surfaces have the same characteristics
SurfaceCharacteristics AudioReflector::getSurfaceCharacteristics(OctreeElement* elementHit) {
SurfaceCharacteristics result = { getReflectiveRatio(), _absorptionRatio, _diffusionRatio };
return result;
}
void AudioReflector::setReflectiveRatio(float ratio) {
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
float currentReflectiveRatio = (1.0f - (_absorptionRatio + _diffusionRatio));
float halfDifference = (safeRatio - currentReflectiveRatio) / 2.0f;
// evenly distribute the difference between the two other ratios
_absorptionRatio -= halfDifference;
_diffusionRatio -= halfDifference;
}
void AudioReflector::setAbsorptionRatio(float ratio) {
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
_absorptionRatio = safeRatio;
const float MAX_COMBINED_RATIO = 1.0f;
if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) {
_diffusionRatio = MAX_COMBINED_RATIO - _absorptionRatio;
}
}
void AudioReflector::setDiffusionRatio(float ratio) {
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
_diffusionRatio = safeRatio;
const float MAX_COMBINED_RATIO = 1.0f;
if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) {
_absorptionRatio = MAX_COMBINED_RATIO - _diffusionRatio;
}
}

View file

@ -0,0 +1,222 @@
//
// AudioReflector.h
// interface
//
// Created by Brad Hefta-Gaub on 4/2/2014
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
//
#ifndef interface_AudioReflector_h
#define interface_AudioReflector_h
#include <QMutex>
#include <VoxelTree.h>
#include "Audio.h"
#include "avatar/MyAvatar.h"
enum AudioSource {
LOCAL_AUDIO,
INBOUND_AUDIO
};
class AudioPath {
public:
AudioPath(AudioSource source = INBOUND_AUDIO, const glm::vec3& origin = glm::vec3(0.0f),
const glm::vec3& direction = glm::vec3(0.0f), float attenuation = 1.0f,
float delay = 0.0f, float distance = 0.0f, bool isDiffusion = false, int bounceCount = 0);
AudioSource source;
bool isDiffusion;
glm::vec3 startPoint;
glm::vec3 startDirection;
float startDelay;
float startAttenuation;
glm::vec3 lastPoint;
glm::vec3 lastDirection;
float lastDistance;
float lastDelay;
float lastAttenuation;
unsigned int bounceCount;
bool finalized;
QVector<glm::vec3> reflections;
};
class AudiblePoint {
public:
glm::vec3 location; /// location of the audible point
float delay; /// includes total delay including pre delay to the point of the audible location, not to the listener's ears
float attenuation; /// only the reflective & diffusive portion of attenuation, doesn't include distance attenuation
float distance; /// includes total distance to the point of the audible location, not to the listener's ears
};
class SurfaceCharacteristics {
public:
float reflectiveRatio;
float absorptionRatio;
float diffusionRatio;
};
class AudioReflector : public QObject {
Q_OBJECT
public:
AudioReflector(QObject* parent = NULL);
// setup functions to configure the resources used by the AudioReflector
void setVoxels(VoxelTree* voxels) { _voxels = voxels; }
void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; }
void setAudio(Audio* audio) { _audio = audio; }
void render(); /// must be called in the application render loop
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
public slots:
// statistics
int getReflections() const { return _reflections; }
float getAverageDelayMsecs() const { return _averageDelay; }
float getAverageAttenuation() const { return _averageAttenuation; }
float getMaxDelayMsecs() const { return _maxDelay; }
float getMaxAttenuation() const { return _maxAttenuation; }
float getMinDelayMsecs() const { return _minDelay; }
float getMinAttenuation() const { return _minAttenuation; }
float getDelayFromDistance(float distance);
int getDiffusionPathCount() const { return _diffusionPathCount; }
int getEchoesInjected() const { return _inboundAudioDelays.size() + _localAudioDelays.size(); }
int getEchoesSuppressed() const { return _inboundEchoesSuppressed.size() + _localEchoesSuppressed.size(); }
/// ms of delay added to all echos
float getPreDelay() const { return _preDelay; }
void setPreDelay(float preDelay) { _preDelay = preDelay; }
/// ms per meter that sound travels, larger means slower, which sounds bigger
float getSoundMsPerMeter() const { return _soundMsPerMeter; }
void setSoundMsPerMeter(float soundMsPerMeter) { _soundMsPerMeter = soundMsPerMeter; }
/// scales attenuation to be louder or softer than the default distance attenuation
float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; }
void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; }
/// scales attenuation of local audio to be louder or softer than the default attenuation
float getLocalAudioAttenuationFactor() const { return _localAudioAttenuationFactor; }
void setLocalAudioAttenuationFactor(float factor) { _localAudioAttenuationFactor = factor; }
/// ms window in which we will suppress echoes to reduce comb filter effects
float getCombFilterWindow() const { return _combFilterWindow; }
void setCombFilterWindow(float value) { _combFilterWindow = value; }
/// number of points of diffusion from each reflection point, as fanout increases there are more chances for secondary
/// echoes, but each diffusion ray is quieter and therefore more likely to be below the sound floor
int getDiffusionFanout() const { return _diffusionFanout; }
void setDiffusionFanout(int fanout) { _diffusionFanout = fanout; }
/// ratio 0.0 - 1.0 of amount of each ray that is absorbed upon hitting a surface
float getAbsorptionRatio() const { return _absorptionRatio; }
void setAbsorptionRatio(float ratio);
// ratio 0.0 - 1.0 of amount of each ray that is diffused upon hitting a surface
float getDiffusionRatio() const { return _diffusionRatio; }
void setDiffusionRatio(float ratio);
// remaining ratio 0.0 - 1.0 of amount of each ray that is cleanly reflected upon hitting a surface
float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); }
void setReflectiveRatio(float ratio);
signals:
private:
VoxelTree* _voxels; // used to access voxel scene
MyAvatar* _myAvatar; // access to listener
Audio* _audio; // access to audio API
// Helpers for drawing
void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color);
// helper for generically calculating attenuation based on distance
float getDistanceAttenuationCoefficient(float distance);
// statistics
int _reflections;
int _diffusionPathCount;
int _delayCount;
float _totalDelay;
float _averageDelay;
float _maxDelay;
float _minDelay;
int _attenuationCount;
float _totalAttenuation;
float _averageAttenuation;
float _maxAttenuation;
float _minAttenuation;
glm::vec3 _listenerPosition;
glm::vec3 _origin;
glm::quat _orientation;
QVector<AudioPath*> _inboundAudioPaths; /// audio paths we're processing for inbound audio
QVector<AudiblePoint> _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths
QMap<float, float> _inboundAudioDelays; /// delay times for currently injected audio points
QVector<float> _inboundEchoesSuppressed; /// delay times for currently injected audio points
QVector<AudioPath*> _localAudioPaths; /// audio paths we're processing for local audio
QVector<AudiblePoint> _localAudiblePoints; /// the audible points that have been calculated from the local audio paths
QMap<float, float> _localAudioDelays; /// delay times for currently injected audio points
QVector<float> _localEchoesSuppressed; /// delay times for currently injected audio points
// adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties,
// as well as diffusion sound sources
void addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation,
float initialDelay, float initialDistance = 0.0f, bool isDiffusion = false);
// helper that handles audioPath analysis
int analyzePathsSingleStep();
void handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face);
void clearPaths();
void analyzePaths();
void drawRays();
void drawPath(AudioPath* path, const glm::vec3& originalColor);
void calculateAllReflections();
int countDiffusionPaths();
glm::vec3 getFaceNormal(BoxFace face);
void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
// return the surface characteristics of the element we hit
SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit = NULL);
QMutex _mutex;
float _preDelay;
float _soundMsPerMeter;
float _distanceAttenuationScalingFactor;
float _localAudioAttenuationFactor;
float _combFilterWindow;
int _diffusionFanout; // number of points of diffusion from each reflection point
// all elements have the same material for now...
float _absorptionRatio;
float _diffusionRatio;
float _reflectiveRatio;
// remember the last known values at calculation
bool haveAttributesChanged();
bool _withDiffusion;
float _lastPreDelay;
float _lastSoundMsPerMeter;
float _lastDistanceAttenuationScalingFactor;
float _lastLocalAudioAttenuationFactor;
int _lastDiffusionFanout;
float _lastAbsorptionRatio;
float _lastDiffusionRatio;
};
#endif // interface_AudioReflector_h

View file

@ -350,8 +350,8 @@ Menu::Menu() :
QMenu* renderDebugMenu = developerMenu->addMenu("Render Debugging Tools");
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings, Qt::CTRL | Qt::SHIFT | Qt::Key_P);
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings, Qt::CTRL | Qt::SHIFT | Qt::Key_S);
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings);
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings);
addCheckableActionToQMenuAndActionHash(renderDebugMenu,
MenuOption::CullSharedFaces,
@ -362,7 +362,7 @@ Menu::Menu() :
addCheckableActionToQMenuAndActionHash(renderDebugMenu,
MenuOption::ShowCulledSharedFaces,
Qt::CTRL | Qt::SHIFT | Qt::Key_X,
0,
false,
appInstance->getVoxels(),
SLOT(showCulledSharedFaces()));
@ -386,6 +386,50 @@ Menu::Menu() :
appInstance->getAudio(),
SLOT(toggleToneInjection()));
QMenu* spatialAudioMenu = audioDebugMenu->addMenu("Spatial Audio");
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessing,
Qt::CTRL | Qt::SHIFT | Qt::Key_M,
false,
appInstance->getAudio(),
SLOT(toggleAudioSpatialProcessing()));
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingIncludeOriginal,
Qt::CTRL | Qt::SHIFT | Qt::Key_O,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSeparateEars,
Qt::CTRL | Qt::SHIFT | Qt::Key_E,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingPreDelay,
Qt::CTRL | Qt::SHIFT | Qt::Key_D,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingStereoSource,
Qt::CTRL | Qt::SHIFT | Qt::Key_S,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingHeadOriented,
Qt::CTRL | Qt::SHIFT | Qt::Key_H,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingWithDiffusions,
Qt::CTRL | Qt::SHIFT | Qt::Key_W,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingRenderPaths,
Qt::CTRL | Qt::SHIFT | Qt::Key_R,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces,
Qt::CTRL | Qt::SHIFT | Qt::Key_X,
true);
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingProcessLocalAudio,
Qt::CTRL | Qt::SHIFT | Qt::Key_A,
true);
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
this,

View file

@ -258,6 +258,18 @@ namespace MenuOption {
const QString Atmosphere = "Atmosphere";
const QString AudioNoiseReduction = "Audio Noise Reduction";
const QString AudioToneInjection = "Inject Test Tone";
const QString AudioSpatialProcessing = "Audio Spatial Processing";
const QString AudioSpatialProcessingHeadOriented = "Head Oriented";
const QString AudioSpatialProcessingIncludeOriginal = "Includes Network Original";
const QString AudioSpatialProcessingPreDelay = "Add Pre-Delay";
const QString AudioSpatialProcessingProcessLocalAudio = "Process Local Audio";
const QString AudioSpatialProcessingRenderPaths = "Render Paths";
const QString AudioSpatialProcessingSeparateEars = "Separate Ears";
const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces";
const QString AudioSpatialProcessingStereoSource = "Stereo Source";
const QString AudioSpatialProcessingWithDiffusions = "With Diffusions";
const QString Avatars = "Avatars";
const QString Bandwidth = "Bandwidth Display";
const QString BandwidthDetails = "Bandwidth Details";

View file

@ -22,9 +22,9 @@
#include <AccountManager.h>
#include "Application.h"
#include "renderer/FBXReader.h"
#include <FBXReader.h>
#include "Application.h"
#include "ModelUploader.h"
@ -306,14 +306,14 @@ bool ModelUploader::addTextures(const QString& texdir, const QString fbxFile) {
foreach (FBXMesh mesh, geometry.meshes) {
foreach (FBXMeshPart part, mesh.parts) {
if (!part.diffuseFilename.isEmpty()) {
if (!addPart(texdir + "/" + part.diffuseFilename,
if (!part.diffuseTexture.filename.isEmpty()) {
if (!addPart(texdir + "/" + part.diffuseTexture.filename,
QString("texture%1").arg(++_texturesCount))) {
return false;
}
}
if (!part.normalFilename.isEmpty()) {
if (!addPart(texdir + "/" + part.normalFilename,
if (!part.normalTexture.filename.isEmpty()) {
if (!addPart(texdir + "/" + part.normalTexture.filename,
QString("texture%1").arg(++_texturesCount))) {
return false;
}

View file

@ -78,112 +78,6 @@ float angle_to(glm::vec3 head_pos, glm::vec3 source_pos, float render_yaw, float
return atan2(head_pos.x - source_pos.x, head_pos.z - source_pos.z) + render_yaw + head_yaw;
}
// Helper function returns the positive angle (in radians) between two 3D vectors
float angleBetween(const glm::vec3& v1, const glm::vec3& v2) {
return acosf((glm::dot(v1, v2)) / (glm::length(v1) * glm::length(v2)));
}
// Helper function return the rotation from the first vector onto the second
glm::quat rotationBetween(const glm::vec3& v1, const glm::vec3& v2) {
float angle = angleBetween(v1, v2);
if (glm::isnan(angle) || angle < EPSILON) {
return glm::quat();
}
glm::vec3 axis;
if (angle > 179.99f * RADIANS_PER_DEGREE) { // 180 degree rotation; must use another axis
axis = glm::cross(v1, glm::vec3(1.0f, 0.0f, 0.0f));
float axisLength = glm::length(axis);
if (axisLength < EPSILON) { // parallel to x; y will work
axis = glm::normalize(glm::cross(v1, glm::vec3(0.0f, 1.0f, 0.0f)));
} else {
axis /= axisLength;
}
} else {
axis = glm::normalize(glm::cross(v1, v2));
}
return glm::angleAxis(angle, axis);
}
glm::vec3 extractTranslation(const glm::mat4& matrix) {
return glm::vec3(matrix[3][0], matrix[3][1], matrix[3][2]);
}
void setTranslation(glm::mat4& matrix, const glm::vec3& translation) {
matrix[3][0] = translation.x;
matrix[3][1] = translation.y;
matrix[3][2] = translation.z;
}
glm::quat extractRotation(const glm::mat4& matrix, bool assumeOrthogonal) {
// uses the iterative polar decomposition algorithm described by Ken Shoemake at
// http://www.cs.wisc.edu/graphics/Courses/838-s2002/Papers/polar-decomp.pdf
// code adapted from Clyde, https://github.com/threerings/clyde/blob/master/src/main/java/com/threerings/math/Matrix4f.java
// start with the contents of the upper 3x3 portion of the matrix
glm::mat3 upper = glm::mat3(matrix);
if (!assumeOrthogonal) {
for (int i = 0; i < 10; i++) {
// store the results of the previous iteration
glm::mat3 previous = upper;
// compute average of the matrix with its inverse transpose
float sd00 = previous[1][1] * previous[2][2] - previous[2][1] * previous[1][2];
float sd10 = previous[0][1] * previous[2][2] - previous[2][1] * previous[0][2];
float sd20 = previous[0][1] * previous[1][2] - previous[1][1] * previous[0][2];
float det = previous[0][0] * sd00 + previous[2][0] * sd20 - previous[1][0] * sd10;
if (fabs(det) == 0.0f) {
// determinant is zero; matrix is not invertible
break;
}
float hrdet = 0.5f / det;
upper[0][0] = +sd00 * hrdet + previous[0][0] * 0.5f;
upper[1][0] = -sd10 * hrdet + previous[1][0] * 0.5f;
upper[2][0] = +sd20 * hrdet + previous[2][0] * 0.5f;
upper[0][1] = -(previous[1][0] * previous[2][2] - previous[2][0] * previous[1][2]) * hrdet + previous[0][1] * 0.5f;
upper[1][1] = +(previous[0][0] * previous[2][2] - previous[2][0] * previous[0][2]) * hrdet + previous[1][1] * 0.5f;
upper[2][1] = -(previous[0][0] * previous[1][2] - previous[1][0] * previous[0][2]) * hrdet + previous[2][1] * 0.5f;
upper[0][2] = +(previous[1][0] * previous[2][1] - previous[2][0] * previous[1][1]) * hrdet + previous[0][2] * 0.5f;
upper[1][2] = -(previous[0][0] * previous[2][1] - previous[2][0] * previous[0][1]) * hrdet + previous[1][2] * 0.5f;
upper[2][2] = +(previous[0][0] * previous[1][1] - previous[1][0] * previous[0][1]) * hrdet + previous[2][2] * 0.5f;
// compute the difference; if it's small enough, we're done
glm::mat3 diff = upper - previous;
if (diff[0][0] * diff[0][0] + diff[1][0] * diff[1][0] + diff[2][0] * diff[2][0] + diff[0][1] * diff[0][1] +
diff[1][1] * diff[1][1] + diff[2][1] * diff[2][1] + diff[0][2] * diff[0][2] + diff[1][2] * diff[1][2] +
diff[2][2] * diff[2][2] < EPSILON) {
break;
}
}
}
// now that we have a nice orthogonal matrix, we can extract the rotation quaternion
// using the method described in http://en.wikipedia.org/wiki/Rotation_matrix#Conversions
float x2 = fabs(1.0f + upper[0][0] - upper[1][1] - upper[2][2]);
float y2 = fabs(1.0f - upper[0][0] + upper[1][1] - upper[2][2]);
float z2 = fabs(1.0f - upper[0][0] - upper[1][1] + upper[2][2]);
float w2 = fabs(1.0f + upper[0][0] + upper[1][1] + upper[2][2]);
return glm::normalize(glm::quat(0.5f * sqrtf(w2),
0.5f * sqrtf(x2) * (upper[1][2] >= upper[2][1] ? 1.0f : -1.0f),
0.5f * sqrtf(y2) * (upper[2][0] >= upper[0][2] ? 1.0f : -1.0f),
0.5f * sqrtf(z2) * (upper[0][1] >= upper[1][0] ? 1.0f : -1.0f)));
}
glm::vec3 extractScale(const glm::mat4& matrix) {
return glm::vec3(glm::length(matrix[0]), glm::length(matrix[1]), glm::length(matrix[2]));
}
float extractUniformScale(const glm::mat4& matrix) {
return extractUniformScale(extractScale(matrix));
}
float extractUniformScale(const glm::vec3& scale) {
return (scale.x + scale.y + scale.z) / 3.0f;
}
// Draw a 3D vector floating in space
void drawVector(glm::vec3 * vector) {
glDisable(GL_LIGHTING);
@ -629,4 +523,4 @@ bool pointInSphere(glm::vec3& point, glm::vec3& sphereCenter, double sphereRadiu
return true;
}
return false;
}
}

View file

@ -44,22 +44,6 @@ void drawVector(glm::vec3* vector);
void printVector(glm::vec3 vec);
float angleBetween(const glm::vec3& v1, const glm::vec3& v2);
glm::quat rotationBetween(const glm::vec3& v1, const glm::vec3& v2);
glm::vec3 extractTranslation(const glm::mat4& matrix);
void setTranslation(glm::mat4& matrix, const glm::vec3& translation);
glm::quat extractRotation(const glm::mat4& matrix, bool assumeOrthogonal = false);
glm::vec3 extractScale(const glm::mat4& matrix);
float extractUniformScale(const glm::mat4& matrix);
float extractUniformScale(const glm::vec3& scale);
double diffclock(timeval *clock1,timeval *clock2);
void renderCollisionOverlay(int width, int height, float magnitude, float red = 0, float blue = 0, float green = 0);

View file

@ -30,6 +30,8 @@ enum eyeContactTargets {
MOUTH
};
const float EYE_EAR_GAP = 0.08f;
class Avatar;
class ProgramObject;
@ -73,6 +75,11 @@ public:
glm::quat getEyeRotation(const glm::vec3& eyePosition) const;
const glm::vec3& getRightEyePosition() const { return _rightEyePosition; }
const glm::vec3& getLeftEyePosition() const { return _leftEyePosition; }
glm::vec3 getRightEarPosition() const { return _rightEyePosition + (getRightDirection() * EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); }
glm::vec3 getLeftEarPosition() const { return _leftEyePosition + (getRightDirection() * -EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); }
FaceModel& getFaceModel() { return _faceModel; }
const FaceModel& getFaceModel() const { return _faceModel; }

View file

@ -15,9 +15,10 @@
#include <faceplus.h>
#endif
#include <FBXReader.h>
#include "Application.h"
#include "Faceplus.h"
#include "renderer/FBXReader.h"
static int floatVectorMetaTypeId = qRegisterMetaType<QVector<float> >();

View file

@ -13,9 +13,10 @@
#include <SharedUtil.h>
#include <FBXReader.h>
#include "Application.h"
#include "Visage.h"
#include "renderer/FBXReader.h"
// this has to go after our normal includes, because its definition of HANDLE conflicts with Qt's
#ifdef HAVE_VISAGE

View file

@ -543,14 +543,14 @@ void NetworkGeometry::setGeometry(const FBXGeometry& geometry) {
int totalIndices = 0;
foreach (const FBXMeshPart& part, mesh.parts) {
NetworkMeshPart networkPart;
if (!part.diffuseFilename.isEmpty()) {
if (!part.diffuseTexture.filename.isEmpty()) {
networkPart.diffuseTexture = Application::getInstance()->getTextureCache()->getTexture(
_textureBase.resolved(QUrl(part.diffuseFilename)), false, mesh.isEye);
_textureBase.resolved(QUrl(part.diffuseTexture.filename)), false, mesh.isEye, part.diffuseTexture.content);
networkPart.diffuseTexture->setLoadPriorities(_loadPriorities);
}
if (!part.normalFilename.isEmpty()) {
if (!part.normalTexture.filename.isEmpty()) {
networkPart.normalTexture = Application::getInstance()->getTextureCache()->getTexture(
_textureBase.resolved(QUrl(part.normalFilename)), true);
_textureBase.resolved(QUrl(part.normalTexture.filename)), true, false, part.normalTexture.content);
networkPart.normalTexture->setLoadPriorities(_loadPriorities);
}
networkMesh.parts.append(networkPart);

View file

@ -20,7 +20,7 @@
#include <ResourceCache.h>
#include "FBXReader.h"
#include <FBXReader.h>
class Model;
class NetworkGeometry;

View file

@ -105,13 +105,22 @@ GLuint TextureCache::getBlueTextureID() {
return _blueTextureID;
}
QSharedPointer<NetworkTexture> TextureCache::getTexture(const QUrl& url, bool normalMap, bool dilatable) {
/// Extra data for creating textures.
class TextureExtra {
public:
bool normalMap;
const QByteArray& content;
};
QSharedPointer<NetworkTexture> TextureCache::getTexture(const QUrl& url, bool normalMap,
bool dilatable, const QByteArray& content) {
if (!dilatable) {
return ResourceCache::getResource(url, QUrl(), false, &normalMap).staticCast<NetworkTexture>();
TextureExtra extra = { normalMap, content };
return ResourceCache::getResource(url, QUrl(), false, &extra).staticCast<NetworkTexture>();
}
QSharedPointer<NetworkTexture> texture = _dilatableNetworkTextures.value(url);
if (texture.isNull()) {
texture = QSharedPointer<NetworkTexture>(new DilatableNetworkTexture(url), &Resource::allReferencesCleared);
texture = QSharedPointer<NetworkTexture>(new DilatableNetworkTexture(url, content), &Resource::allReferencesCleared);
texture->setSelf(texture);
texture->setCache(this);
_dilatableNetworkTextures.insert(url, texture);
@ -215,7 +224,9 @@ bool TextureCache::eventFilter(QObject* watched, QEvent* event) {
QSharedPointer<Resource> TextureCache::createResource(const QUrl& url,
const QSharedPointer<Resource>& fallback, bool delayLoad, const void* extra) {
return QSharedPointer<Resource>(new NetworkTexture(url, *(const bool*)extra), &Resource::allReferencesCleared);
const TextureExtra* textureExtra = static_cast<const TextureExtra*>(extra);
return QSharedPointer<Resource>(new NetworkTexture(url, textureExtra->normalMap, textureExtra->content),
&Resource::allReferencesCleared);
}
QOpenGLFramebufferObject* TextureCache::createFramebufferObject() {
@ -238,8 +249,8 @@ Texture::~Texture() {
glDeleteTextures(1, &_id);
}
NetworkTexture::NetworkTexture(const QUrl& url, bool normalMap) :
Resource(url),
NetworkTexture::NetworkTexture(const QUrl& url, bool normalMap, const QByteArray& content) :
Resource(url, !content.isEmpty()),
_translucent(false) {
if (!url.isValid()) {
@ -250,12 +261,19 @@ NetworkTexture::NetworkTexture(const QUrl& url, bool normalMap) :
glBindTexture(GL_TEXTURE_2D, getID());
loadSingleColorTexture(normalMap ? OPAQUE_BLUE : OPAQUE_WHITE);
glBindTexture(GL_TEXTURE_2D, 0);
// if we have content, load it after we have our self pointer
if (!content.isEmpty()) {
_startedLoading = true;
QMetaObject::invokeMethod(this, "loadContent", Qt::QueuedConnection, Q_ARG(const QByteArray&, content));
}
}
class ImageReader : public QRunnable {
public:
ImageReader(const QWeakPointer<Resource>& texture, QNetworkReply* reply);
ImageReader(const QWeakPointer<Resource>& texture, QNetworkReply* reply, const QUrl& url = QUrl(),
const QByteArray& content = QByteArray());
virtual void run();
@ -263,27 +281,37 @@ private:
QWeakPointer<Resource> _texture;
QNetworkReply* _reply;
QUrl _url;
QByteArray _content;
};
ImageReader::ImageReader(const QWeakPointer<Resource>& texture, QNetworkReply* reply) :
ImageReader::ImageReader(const QWeakPointer<Resource>& texture, QNetworkReply* reply,
const QUrl& url, const QByteArray& content) :
_texture(texture),
_reply(reply) {
_reply(reply),
_url(url),
_content(content) {
}
void ImageReader::run() {
QSharedPointer<Resource> texture = _texture.toStrongRef();
if (texture.isNull()) {
_reply->deleteLater();
if (_reply) {
_reply->deleteLater();
}
return;
}
QUrl url = _reply->url();
QImage image = QImage::fromData(_reply->readAll());
_reply->deleteLater();
if (_reply) {
_url = _reply->url();
_content = _reply->readAll();
_reply->deleteLater();
}
QImage image = QImage::fromData(_content);
// enforce a fixed maximum
const int MAXIMUM_SIZE = 1024;
if (image.width() > MAXIMUM_SIZE || image.height() > MAXIMUM_SIZE) {
qDebug() << "Image greater than maximum size:" << url << image.width() << image.height();
qDebug() << "Image greater than maximum size:" << _url << image.width() << image.height();
image = image.scaled(MAXIMUM_SIZE, MAXIMUM_SIZE, Qt::KeepAspectRatio);
}
@ -315,7 +343,7 @@ void ImageReader::run() {
}
int imageArea = image.width() * image.height();
if (opaquePixels == imageArea) {
qDebug() << "Image with alpha channel is completely opaque:" << url;
qDebug() << "Image with alpha channel is completely opaque:" << _url;
image = image.convertToFormat(QImage::Format_RGB888);
}
QMetaObject::invokeMethod(texture.data(), "setImage", Q_ARG(const QImage&, image),
@ -327,6 +355,10 @@ void NetworkTexture::downloadFinished(QNetworkReply* reply) {
QThreadPool::globalInstance()->start(new ImageReader(_self, reply));
}
void NetworkTexture::loadContent(const QByteArray& content) {
QThreadPool::globalInstance()->start(new ImageReader(_self, NULL, _url, content));
}
void NetworkTexture::setImage(const QImage& image, bool translucent) {
_translucent = translucent;
@ -348,8 +380,8 @@ void NetworkTexture::imageLoaded(const QImage& image) {
// nothing by default
}
DilatableNetworkTexture::DilatableNetworkTexture(const QUrl& url) :
NetworkTexture(url, false),
DilatableNetworkTexture::DilatableNetworkTexture(const QUrl& url, const QByteArray& content) :
NetworkTexture(url, false, content),
_innerRadius(0),
_outerRadius(0)
{

View file

@ -44,7 +44,8 @@ public:
GLuint getBlueTextureID();
/// Loads a texture from the specified URL.
QSharedPointer<NetworkTexture> getTexture(const QUrl& url, bool normalMap = false, bool dilatable = false);
QSharedPointer<NetworkTexture> getTexture(const QUrl& url, bool normalMap = false, bool dilatable = false,
const QByteArray& content = QByteArray());
/// Returns a pointer to the primary framebuffer object. This render target includes a depth component, and is
/// used for scene rendering.
@ -115,7 +116,7 @@ class NetworkTexture : public Resource, public Texture {
public:
NetworkTexture(const QUrl& url, bool normalMap);
NetworkTexture(const QUrl& url, bool normalMap, const QByteArray& content);
/// Checks whether it "looks like" this texture is translucent
/// (majority of pixels neither fully opaque or fully transparent).
@ -124,10 +125,12 @@ public:
protected:
virtual void downloadFinished(QNetworkReply* reply);
virtual void imageLoaded(const QImage& image);
Q_INVOKABLE void loadContent(const QByteArray& content);
Q_INVOKABLE void setImage(const QImage& image, bool translucent);
virtual void imageLoaded(const QImage& image);
private:
bool _translucent;
@ -139,7 +142,7 @@ class DilatableNetworkTexture : public NetworkTexture {
public:
DilatableNetworkTexture(const QUrl& url);
DilatableNetworkTexture(const QUrl& url, const QByteArray& content);
/// Returns a pointer to a texture with the requested amount of dilation.
QSharedPointer<Texture> getDilatedTexture(float dilation);

View file

@ -293,6 +293,7 @@ void Stats::display(
glm::vec3 avatarPos = myAvatar->getPosition();
lines = _expanded ? 5 : 3;
drawBackground(backgroundColor, horizontalOffset, 0, _geoStatsWidth, lines * STATS_PELS_PER_LINE + 10);
horizontalOffset += 5;
@ -341,6 +342,10 @@ void Stats::display(
VoxelSystem* voxels = Application::getInstance()->getVoxels();
lines = _expanded ? 12 : 3;
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
lines += 8; // spatial audio processing adds 1 spacing line and 7 extra lines of info
}
drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10);
horizontalOffset += 5;
@ -497,5 +502,89 @@ void Stats::display(
voxelStats << "LOD: You can see " << qPrintable(displayLODDetails.trimmed());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, (char*)voxelStats.str().c_str(), color);
}
}
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
verticalOffset += STATS_PELS_PER_LINE; // space one line...
const AudioReflector* audioReflector = Application::getInstance()->getAudioReflector();
// add some reflection stats
char reflectionsStatus[128];
sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s, Normals: %s",
audioReflector->getReflections(),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)
? "included" : "silent"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)
? "two" : "one"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource)
? "stereo" : "mono"),
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces)
? "random" : "regular")
);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ?
audioReflector->getPreDelay() : 0.0f;
sprintf(reflectionsStatus, "Delay: pre: %6.3f, average %6.3f, max %6.3f, min %6.3f, speed: %6.3f",
preDelay,
audioReflector->getAverageDelayMsecs(),
audioReflector->getMaxDelayMsecs(),
audioReflector->getMinDelayMsecs(),
audioReflector->getSoundMsPerMeter());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, Factor: %5.3f",
audioReflector->getAverageAttenuation(),
audioReflector->getMaxAttenuation(),
audioReflector->getMinAttenuation(),
audioReflector->getDistanceAttenuationScalingFactor());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
sprintf(reflectionsStatus, "Local Audio: %s Attenuation: %5.3f",
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)
? "yes" : "no"),
audioReflector->getLocalAudioAttenuationFactor());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
bool diffusionEnabled = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
int fanout = diffusionEnabled ? audioReflector->getDiffusionFanout() : 0;
int diffusionPaths = diffusionEnabled ? audioReflector->getDiffusionPathCount() : 0;
sprintf(reflectionsStatus, "Diffusion: %s, Fanout: %d, Paths: %d",
(diffusionEnabled ? "yes" : "no"), fanout, diffusionPaths);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
const float AS_PERCENT = 100.0f;
float reflectiveRatio = audioReflector->getReflectiveRatio() * AS_PERCENT;
float diffusionRatio = audioReflector->getDiffusionRatio() * AS_PERCENT;
float absorptionRatio = audioReflector->getAbsorptionRatio() * AS_PERCENT;
sprintf(reflectionsStatus, "Ratios: Reflective: %5.3f, Diffusion: %5.3f, Absorption: %5.3f",
reflectiveRatio, diffusionRatio, absorptionRatio);
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
sprintf(reflectionsStatus, "Comb Filter Window: %5.3f ms, Allowed: %d, Suppressed: %d",
audioReflector->getCombFilterWindow(),
audioReflector->getEchoesInjected(),
audioReflector->getEchoesSuppressed());
verticalOffset += STATS_PELS_PER_LINE;
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
}
}

View file

@ -18,15 +18,19 @@
#include "AudioRingBuffer.h"
AudioRingBuffer::AudioRingBuffer(int numFrameSamples) :
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode) :
NodeData(),
_sampleCapacity(numFrameSamples * RING_BUFFER_LENGTH_FRAMES),
_numFrameSamples(numFrameSamples),
_isStarved(true),
_hasStarted(false)
_hasStarted(false),
_randomAccessMode(randomAccessMode)
{
if (numFrameSamples) {
_buffer = new int16_t[_sampleCapacity];
if (_randomAccessMode) {
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
}
_nextOutput = _buffer;
_endOfLastWrite = _buffer;
} else {
@ -50,6 +54,9 @@ void AudioRingBuffer::resizeForFrameSize(qint64 numFrameSamples) {
delete[] _buffer;
_sampleCapacity = numFrameSamples * RING_BUFFER_LENGTH_FRAMES;
_buffer = new int16_t[_sampleCapacity];
if (_randomAccessMode) {
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
}
_nextOutput = _buffer;
_endOfLastWrite = _buffer;
}
@ -68,18 +75,34 @@ qint64 AudioRingBuffer::readData(char *data, qint64 maxSize) {
// only copy up to the number of samples we have available
int numReadSamples = std::min((unsigned) (maxSize / sizeof(int16_t)), samplesAvailable());
// If we're in random access mode, then we consider our number of available read samples slightly
// differently. Namely, if anything has been written, we say we have as many samples as they ask for
// otherwise we say we have nothing available
if (_randomAccessMode) {
numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : 0;
}
if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) {
// we're going to need to do two reads to get this data, it wraps around the edge
// read to the end of the buffer
int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput;
memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t));
if (_randomAccessMode) {
memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it
}
// read the rest from the beginning of the buffer
memcpy(data + (numSamplesToEnd * sizeof(int16_t)), _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t));
if (_randomAccessMode) {
memset(_buffer, 0, (numReadSamples - numSamplesToEnd) * sizeof(int16_t)); // clear it
}
} else {
// read the data
memcpy(data, _nextOutput, numReadSamples * sizeof(int16_t));
if (_randomAccessMode) {
memset(_nextOutput, 0, numReadSamples * sizeof(int16_t)); // clear it
}
}
// push the position of _nextOutput by the number of samples read
@ -128,6 +151,10 @@ int16_t& AudioRingBuffer::operator[](const int index) {
return *shiftedPositionAccomodatingWrap(_nextOutput, index);
}
const int16_t& AudioRingBuffer::operator[] (const int index) const {
return *shiftedPositionAccomodatingWrap(_nextOutput, index);
}
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
}

View file

@ -39,7 +39,7 @@ const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
class AudioRingBuffer : public NodeData {
Q_OBJECT
public:
AudioRingBuffer(int numFrameSamples);
AudioRingBuffer(int numFrameSamples, bool randomAccessMode = false);
~AudioRingBuffer();
void reset();
@ -50,8 +50,8 @@ public:
int parseData(const QByteArray& packet);
// assume callers using this will never wrap around the end
const int16_t* getNextOutput() { return _nextOutput; }
const int16_t* getBuffer() { return _buffer; }
const int16_t* getNextOutput() const { return _nextOutput; }
const int16_t* getBuffer() const { return _buffer; }
qint64 readSamples(int16_t* destination, qint64 maxSamples);
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
@ -60,6 +60,7 @@ public:
qint64 writeData(const char* data, qint64 maxSize);
int16_t& operator[](const int index);
const int16_t& operator[] (const int index) const;
void shiftReadPosition(unsigned int numSamples);
@ -87,6 +88,7 @@ protected:
int16_t* _buffer;
bool _isStarved;
bool _hasStarted;
bool _randomAccessMode; /// will this ringbuffer be used for random access? if so, do some special processing
};
#endif // hifi_AudioRingBuffer_h

View file

@ -99,6 +99,8 @@ class AvatarData : public QObject {
Q_PROPERTY(QString skeletonModelURL READ getSkeletonModelURLFromScript WRITE setSkeletonModelURLFromScript)
Q_PROPERTY(QString billboardURL READ getBillboardURL WRITE setBillboardFromURL)
Q_PROPERTY(QStringList jointNames READ getJointNames)
Q_PROPERTY(QUuid sessionUUID READ getSessionUUID);
public:
AvatarData();

View file

@ -0,0 +1,38 @@
cmake_minimum_required(VERSION 2.8)
if (WIN32)
cmake_policy (SET CMP0020 NEW)
endif (WIN32)
set(ROOT_DIR ../..)
set(MACRO_DIR "${ROOT_DIR}/cmake/macros")
# setup for find modules
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../../cmake/modules/")
set(TARGET_NAME fbx)
include(${MACRO_DIR}/SetupHifiLibrary.cmake)
setup_hifi_library(${TARGET_NAME})
include(${MACRO_DIR}/IncludeGLM.cmake)
include_glm(${TARGET_NAME} "${ROOT_DIR}")
include(${MACRO_DIR}/LinkHifiLibrary.cmake)
link_hifi_library(shared ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(networking ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(octree ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(voxels ${TARGET_NAME} "${ROOT_DIR}")
# link ZLIB and GnuTLS
find_package(ZLIB)
find_package(GnuTLS REQUIRED)
# add a definition for ssize_t so that windows doesn't bail on gnutls.h
if (WIN32)
add_definitions(-Dssize_t=long)
endif ()
include_directories(SYSTEM "${ZLIB_INCLUDE_DIRS}" "${GNUTLS_INCLUDE_DIR}")
target_link_libraries(${TARGET_NAME} "${ZLIB_LIBRARIES}" Qt5::Widgets "${GNUTLS_LIBRARY}")

View file

@ -22,14 +22,14 @@
#include <glm/gtx/quaternion.hpp>
#include <glm/gtx/transform.hpp>
#include <OctalCode.h>
#include <GeometryUtil.h>
#include <OctalCode.h>
#include <Shape.h>
#include <SharedUtil.h>
#include <VoxelTree.h>
#include "FBXReader.h"
#include "Util.h"
using namespace std;
@ -72,6 +72,8 @@ bool FBXGeometry::hasBlendedMeshes() const {
}
static int fbxGeometryMetaTypeId = qRegisterMetaType<FBXGeometry>();
static int fbxAnimationFrameMetaTypeId = qRegisterMetaType<FBXAnimationFrame>();
static int fbxAnimationFrameVectorMetaTypeId = qRegisterMetaType<QVector<FBXAnimationFrame> >();
template<class T> QVariant readBinaryArray(QDataStream& in) {
quint32 arrayLength;
@ -444,6 +446,34 @@ QVector<int> getIntVector(const QVariantList& properties, int index) {
return vector;
}
QVector<qlonglong> getLongVector(const QVariantList& properties, int index) {
if (index >= properties.size()) {
return QVector<qlonglong>();
}
QVector<qlonglong> vector = properties.at(index).value<QVector<qlonglong> >();
if (!vector.isEmpty()) {
return vector;
}
for (; index < properties.size(); index++) {
vector.append(properties.at(index).toLongLong());
}
return vector;
}
QVector<float> getFloatVector(const QVariantList& properties, int index) {
if (index >= properties.size()) {
return QVector<float>();
}
QVector<float> vector = properties.at(index).value<QVector<float> >();
if (!vector.isEmpty()) {
return vector;
}
for (; index < properties.size(); index++) {
vector.append(properties.at(index).toFloat());
}
return vector;
}
QVector<double> getDoubleVector(const QVariantList& properties, int index) {
if (index >= properties.size()) {
return QVector<double>();
@ -478,8 +508,7 @@ glm::vec3 parseVec3(const QString& string) {
QString processID(const QString& id) {
// Blender (at least) prepends a type to the ID, so strip it out
int index = id.indexOf("::");
return (index == -1) ? id : id.mid(index + 2);
return id.mid(id.lastIndexOf(':') + 1);
}
QString getID(const QVariantList& properties, int index = 0) {
@ -901,6 +930,19 @@ public:
float averageRadius; // average distance from mesh points to averageVertex
};
class AnimationCurve {
public:
QVector<float> values;
};
FBXTexture getTexture(const QString& textureID, const QHash<QString, QByteArray>& textureFilenames,
const QHash<QByteArray, QByteArray>& textureContent) {
FBXTexture texture;
texture.filename = textureFilenames.value(textureID);
texture.content = textureContent.value(texture.filename);
return texture;
}
FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping) {
QHash<QString, ExtractedMesh> meshes;
QVector<ExtractedBlendshape> blendshapes;
@ -908,10 +950,16 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
QMultiHash<QString, QString> childMap;
QHash<QString, FBXModel> models;
QHash<QString, Cluster> clusters;
QHash<QString, AnimationCurve> animationCurves;
QHash<QString, QByteArray> textureFilenames;
QHash<QByteArray, QByteArray> textureContent;
QHash<QString, Material> materials;
QHash<QString, QString> diffuseTextures;
QHash<QString, QString> bumpTextures;
QHash<QString, QString> localRotations;
QHash<QString, QString> xComponents;
QHash<QString, QString> yComponents;
QHash<QString, QString> zComponents;
QVariantHash joints = mapping.value("joint").toHash();
QString jointEyeLeftName = processID(getString(joints.value("jointEyeLeft", "jointEyeLeft")));
@ -974,7 +1022,7 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
QString name;
if (object.properties.size() == 3) {
name = object.properties.at(1).toString();
name = name.left(name.indexOf(QChar('\0')));
name = processID(name.left(name.indexOf(QChar('\0'))));
} else {
name = getID(object.properties);
@ -1124,7 +1172,7 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
model.postRotation = glm::quat(glm::radians(postRotation));
model.postTransform = glm::translate(-rotationPivot) * glm::translate(scalePivot) *
glm::scale(scale) * glm::translate(-scalePivot);
// NOTE: anbgles from the FBX file are in degrees
// NOTE: angles from the FBX file are in degrees
// so we convert them to radians for the FBXModel class
model.rotationMin = glm::radians(glm::vec3(rotationMinX ? rotationMin.x : -180.0f,
rotationMinY ? rotationMin.y : -180.0f, rotationMinZ ? rotationMin.z : -180.0f));
@ -1141,6 +1189,21 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
textureFilenames.insert(getID(object.properties), filename);
}
}
} else if (object.name == "Video") {
QByteArray filename;
QByteArray content;
foreach (const FBXNode& subobject, object.children) {
if (subobject.name == "RelativeFilename") {
filename = subobject.properties.at(0).toByteArray();
filename = filename.mid(qMax(filename.lastIndexOf('\\'), filename.lastIndexOf('/')) + 1);
} else if (subobject.name == "Content" && !subobject.properties.isEmpty()) {
content = subobject.properties.at(0).toByteArray();
}
}
if (!content.isEmpty()) {
textureContent.insert(filename, content);
}
} else if (object.name == "Material") {
Material material = { glm::vec3(1.0f, 1.0f, 1.0f), glm::vec3(1.0f, 1.0f, 1.0f), 96.0f };
foreach (const FBXNode& subobject, object.children) {
@ -1204,6 +1267,14 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
blendshapeChannelIndices.insert(id, index);
}
}
} else if (object.name == "AnimationCurve") {
AnimationCurve curve;
foreach (const FBXNode& subobject, object.children) {
if (subobject.name == "KeyValueFloat") {
curve.values = getFloatVector(subobject.properties, 0);
}
}
animationCurves.insert(getID(object.properties), curve);
}
}
} else if (child.name == "Connections") {
@ -1214,8 +1285,20 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
if (type.contains("diffuse")) {
diffuseTextures.insert(getID(connection.properties, 2), getID(connection.properties, 1));
} else if (type.contains("bump")) {
} else if (type.contains("bump") || type.contains("normal")) {
bumpTextures.insert(getID(connection.properties, 2), getID(connection.properties, 1));
} else if (type == "lcl rotation") {
localRotations.insert(getID(connection.properties, 2), getID(connection.properties, 1));
} else if (type == "d|x") {
xComponents.insert(getID(connection.properties, 2), getID(connection.properties, 1));
} else if (type == "d|y") {
yComponents.insert(getID(connection.properties, 2), getID(connection.properties, 1));
} else if (type == "d|z") {
zComponents.insert(getID(connection.properties, 2), getID(connection.properties, 1));
}
}
parentMap.insert(getID(connection.properties, 1), getID(connection.properties, 2));
@ -1239,7 +1322,8 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
glm::quat offsetRotation = glm::quat(glm::radians(glm::vec3(mapping.value("rx").toFloat(),
mapping.value("ry").toFloat(), mapping.value("rz").toFloat())));
geometry.offset = glm::translate(glm::vec3(mapping.value("tx").toFloat(), mapping.value("ty").toFloat(),
mapping.value("tz").toFloat())) * glm::mat4_cast(offsetRotation) * glm::scale(glm::vec3(offsetScale, offsetScale, offsetScale));
mapping.value("tz").toFloat())) * glm::mat4_cast(offsetRotation) *
glm::scale(glm::vec3(offsetScale, offsetScale, offsetScale));
// get the list of models in depth-first traversal order
QVector<QString> modelIDs;
@ -1277,6 +1361,17 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
appendModelIDs(parentMap.value(topID), childMap, models, remainingModels, modelIDs);
}
// figure the number of animation frames from the curves
int frameCount = 0;
foreach (const AnimationCurve& curve, animationCurves) {
frameCount = qMax(frameCount, curve.values.size());
}
for (int i = 0; i < frameCount; i++) {
FBXAnimationFrame frame;
frame.rotations.resize(modelIDs.size());
geometry.animationFrames.append(frame);
}
// convert the models to joints
QVariantList freeJoints = mapping.values("freeJoint");
foreach (const QString& modelID, modelIDs) {
@ -1286,7 +1381,8 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
joint.parentIndex = model.parentIndex;
// get the indices of all ancestors starting with the first free one (if any)
joint.freeLineage.append(geometry.joints.size());
int jointIndex = geometry.joints.size();
joint.freeLineage.append(jointIndex);
int lastFreeIndex = joint.isFree ? 0 : -1;
for (int index = joint.parentIndex; index != -1; index = geometry.joints.at(index).parentIndex) {
if (geometry.joints.at(index).isFree) {
@ -1325,6 +1421,18 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
joint.shapeType = Shape::UNKNOWN_SHAPE;
geometry.joints.append(joint);
geometry.jointIndices.insert(model.name, geometry.joints.size());
QString rotationID = localRotations.value(modelID);
AnimationCurve xCurve = animationCurves.value(xComponents.value(rotationID));
AnimationCurve yCurve = animationCurves.value(yComponents.value(rotationID));
AnimationCurve zCurve = animationCurves.value(zComponents.value(rotationID));
glm::vec3 defaultValues = glm::degrees(safeEulerAngles(joint.rotation));
for (int i = 0; i < frameCount; i++) {
geometry.animationFrames[i].rotations[jointIndex] = glm::quat(glm::radians(glm::vec3(
xCurve.values.isEmpty() ? defaultValues.x : xCurve.values.at(i % xCurve.values.size()),
yCurve.values.isEmpty() ? defaultValues.y : yCurve.values.at(i % yCurve.values.size()),
zCurve.values.isEmpty() ? defaultValues.z : zCurve.values.at(i % zCurve.values.size()))));
}
}
// for each joint we allocate a JointShapeInfo in which we'll store collision shape info
QVector<JointShapeInfo> jointShapeInfos;
@ -1377,23 +1485,23 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
if (materials.contains(childID)) {
Material material = materials.value(childID);
QByteArray diffuseFilename;
FBXTexture diffuseTexture;
QString diffuseTextureID = diffuseTextures.value(childID);
if (!diffuseTextureID.isNull()) {
diffuseFilename = textureFilenames.value(diffuseTextureID);
diffuseTexture = getTexture(diffuseTextureID, textureFilenames, textureContent);
// FBX files generated by 3DSMax have an intermediate texture parent, apparently
foreach (const QString& childTextureID, childMap.values(diffuseTextureID)) {
if (textureFilenames.contains(childTextureID)) {
diffuseFilename = textureFilenames.value(childTextureID);
diffuseTexture = getTexture(diffuseTextureID, textureFilenames, textureContent);
}
}
}
QByteArray normalFilename;
FBXTexture normalTexture;
QString bumpTextureID = bumpTextures.value(childID);
if (!bumpTextureID.isNull()) {
normalFilename = textureFilenames.value(bumpTextureID);
normalTexture = getTexture(bumpTextureID, textureFilenames, textureContent);
generateTangents = true;
}
@ -1403,21 +1511,21 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
part.diffuseColor = material.diffuse;
part.specularColor = material.specular;
part.shininess = material.shininess;
if (!diffuseFilename.isNull()) {
part.diffuseFilename = diffuseFilename;
if (!diffuseTexture.filename.isNull()) {
part.diffuseTexture = diffuseTexture;
}
if (!normalFilename.isNull()) {
part.normalFilename = normalFilename;
if (!normalTexture.filename.isNull()) {
part.normalTexture = normalTexture;
}
}
}
materialIndex++;
} else if (textureFilenames.contains(childID)) {
QByteArray filename = textureFilenames.value(childID);
FBXTexture texture = getTexture(childID, textureFilenames, textureContent);
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
if (extracted.partMaterialTextures.at(j).second == textureIndex) {
extracted.mesh.parts[j].diffuseFilename = filename;
extracted.mesh.parts[j].diffuseTexture = texture;
}
}
textureIndex++;

View file

@ -103,6 +103,14 @@ public:
glm::mat4 inverseBindMatrix;
};
/// A texture map in an FBX document.
class FBXTexture {
public:
QByteArray filename;
QByteArray content;
};
/// A single part of a mesh (with the same material).
class FBXMeshPart {
public:
@ -114,8 +122,8 @@ public:
glm::vec3 specularColor;
float shininess;
QByteArray diffuseFilename;
QByteArray normalFilename;
FBXTexture diffuseTexture;
FBXTexture normalTexture;
};
/// A single mesh (with optional blendshapes) extracted from an FBX document.
@ -139,6 +147,16 @@ public:
QVector<FBXBlendshape> blendshapes;
};
/// A single animation frame extracted from an FBX document.
class FBXAnimationFrame {
public:
QVector<glm::quat> rotations;
};
Q_DECLARE_METATYPE(FBXAnimationFrame)
Q_DECLARE_METATYPE(QVector<FBXAnimationFrame>)
/// An attachment to an FBX document.
class FBXAttachment {
public:
@ -183,6 +201,8 @@ public:
Extents bindExtents;
Extents meshExtents;
QVector<FBXAnimationFrame> animationFrames;
QVector<FBXAttachment> attachments;
int getJointIndex(const QString& name) const { return jointIndices.value(name) - 1; }

View file

@ -23,7 +23,8 @@ enum BoxFace {
MIN_Y_FACE,
MAX_Y_FACE,
MIN_Z_FACE,
MAX_Z_FACE
MAX_Z_FACE,
UNKNOWN_FACE
};
enum BoxVertex {

View file

@ -425,10 +425,6 @@ bool ViewFrustum::matches(const ViewFrustum& compareTo, bool debug) const {
return result;
}
bool isNaN(float f) {
return f != f;
}
bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const {
// Compute distance between the two positions
@ -450,7 +446,7 @@ bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const
float angleEyeOffsetOrientation = compareTo._eyeOffsetOrientation == _eyeOffsetOrientation
? 0.0f : glm::degrees(glm::angle(dQEyeOffsetOrientation));
if (isNaN(angleEyeOffsetOrientation)) {
angleOrientation = 0.0f;
angleEyeOffsetOrientation = 0.0f;
}
bool result =

View file

@ -23,6 +23,7 @@ include_glm(${TARGET_NAME} "${ROOT_DIR}")
include(${MACRO_DIR}/LinkHifiLibrary.cmake)
link_hifi_library(shared ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(octree ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(fbx ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(networking ${TARGET_NAME} "${ROOT_DIR}")
# link ZLIB and GnuTLS
@ -35,4 +36,4 @@ if (WIN32)
endif ()
include_directories(SYSTEM "${ZLIB_INCLUDE_DIRS}" "${GNUTLS_INCLUDE_DIR}")
target_link_libraries(${TARGET_NAME} "${ZLIB_LIBRARIES}" Qt5::Widgets "${GNUTLS_LIBRARY}")
target_link_libraries(${TARGET_NAME} "${ZLIB_LIBRARIES}" Qt5::Widgets "${GNUTLS_LIBRARY}")

View file

@ -24,6 +24,7 @@ include(${MACRO_DIR}/LinkHifiLibrary.cmake)
link_hifi_library(shared ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(octree ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(voxels ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(fbx ${TARGET_NAME} "${ROOT_DIR}")
link_hifi_library(particles ${TARGET_NAME} "${ROOT_DIR}")
# link ZLIB
@ -36,4 +37,4 @@ if (WIN32)
endif ()
include_directories(SYSTEM "${ZLIB_INCLUDE_DIRS}" "${GNUTLS_INCLUDE_DIR}")
target_link_libraries(${TARGET_NAME} "${ZLIB_LIBRARIES}" "${GNUTLS_LIBRARY}" Qt5::Widgets)
target_link_libraries(${TARGET_NAME} "${ZLIB_LIBRARIES}" "${GNUTLS_LIBRARY}" Qt5::Widgets)

View file

@ -0,0 +1,102 @@
//
// AnimationCache.cpp
// libraries/script-engine/src/
//
// Created by Andrzej Kapolka on 4/14/14.
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <QRunnable>
#include <QThreadPool>
#include "AnimationCache.h"
static int animationPointerMetaTypeId = qRegisterMetaType<AnimationPointer>();
AnimationCache::AnimationCache(QObject* parent) :
ResourceCache(parent) {
}
AnimationPointer AnimationCache::getAnimation(const QUrl& url) {
if (QThread::currentThread() != thread()) {
AnimationPointer result;
QMetaObject::invokeMethod(this, "getAnimation", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(AnimationPointer, result), Q_ARG(const QUrl&, url));
return result;
}
return getResource(url).staticCast<Animation>();
}
QSharedPointer<Resource> AnimationCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
bool delayLoad, const void* extra) {
return QSharedPointer<Resource>(new Animation(url), &Resource::allReferencesCleared);
}
Animation::Animation(const QUrl& url) :
Resource(url) {
}
class AnimationReader : public QRunnable {
public:
AnimationReader(const QWeakPointer<Resource>& animation, QNetworkReply* reply);
virtual void run();
private:
QWeakPointer<Resource> _animation;
QNetworkReply* _reply;
};
AnimationReader::AnimationReader(const QWeakPointer<Resource>& animation, QNetworkReply* reply) :
_animation(animation),
_reply(reply) {
}
void AnimationReader::run() {
QSharedPointer<Resource> animation = _animation.toStrongRef();
if (!animation.isNull()) {
QMetaObject::invokeMethod(animation.data(), "setGeometry",
Q_ARG(const FBXGeometry&, readFBX(_reply->readAll(), QVariantHash())));
}
_reply->deleteLater();
}
QStringList Animation::getJointNames() const {
if (QThread::currentThread() != thread()) {
QStringList result;
QMetaObject::invokeMethod(const_cast<Animation*>(this), "getJointNames", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(QStringList, result));
return result;
}
QStringList names;
foreach (const FBXJoint& joint, _geometry.joints) {
names.append(joint.name);
}
return names;
}
QVector<FBXAnimationFrame> Animation::getFrames() const {
if (QThread::currentThread() != thread()) {
QVector<FBXAnimationFrame> result;
QMetaObject::invokeMethod(const_cast<Animation*>(this), "getFrames", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(QVector<FBXAnimationFrame>, result));
return result;
}
return _geometry.animationFrames;
}
void Animation::setGeometry(const FBXGeometry& geometry) {
_geometry = geometry;
finishedLoading(true);
}
void Animation::downloadFinished(QNetworkReply* reply) {
// send the reader off to the thread pool
QThreadPool::globalInstance()->start(new AnimationReader(_self, reply));
}

View file

@ -0,0 +1,68 @@
//
// AnimationCache.h
// libraries/script-engine/src/
//
// Created by Andrzej Kapolka on 4/14/14.
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AnimationCache_h
#define hifi_AnimationCache_h
#include <ResourceCache.h>
#include <FBXReader.h>
class Animation;
typedef QSharedPointer<Animation> AnimationPointer;
/// Scriptable interface for FBX animation loading.
class AnimationCache : public ResourceCache {
Q_OBJECT
public:
AnimationCache(QObject* parent = NULL);
Q_INVOKABLE AnimationPointer getAnimation(const QString& url) { return getAnimation(QUrl(url)); }
Q_INVOKABLE AnimationPointer getAnimation(const QUrl& url);
protected:
virtual QSharedPointer<Resource> createResource(const QUrl& url,
const QSharedPointer<Resource>& fallback, bool delayLoad, const void* extra);
};
Q_DECLARE_METATYPE(AnimationPointer)
/// An animation loaded from the network.
class Animation : public Resource {
Q_OBJECT
public:
Animation(const QUrl& url);
const FBXGeometry& getGeometry() const { return _geometry; }
Q_INVOKABLE QStringList getJointNames() const;
Q_INVOKABLE QVector<FBXAnimationFrame> getFrames() const;
protected:
Q_INVOKABLE void setGeometry(const FBXGeometry& geometry);
virtual void downloadFinished(QNetworkReply* reply);
private:
FBXGeometry _geometry;
};
#endif // hifi_AnimationCache_h

View file

@ -0,0 +1,36 @@
//
// AnimationObject.cpp
// libraries/script-engine/src/
//
// Created by Andrzej Kapolka on 4/17/14.
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <QScriptEngine>
#include "AnimationCache.h"
#include "AnimationObject.h"
QStringList AnimationObject::getJointNames() const {
return qscriptvalue_cast<AnimationPointer>(thisObject())->getJointNames();
}
QVector<FBXAnimationFrame> AnimationObject::getFrames() const {
return qscriptvalue_cast<AnimationPointer>(thisObject())->getFrames();
}
QVector<glm::quat> AnimationFrameObject::getRotations() const {
return qscriptvalue_cast<FBXAnimationFrame>(thisObject()).rotations;
}
void registerAnimationTypes(QScriptEngine* engine) {
qScriptRegisterSequenceMetaType<QVector<FBXAnimationFrame> >(engine);
engine->setDefaultPrototype(qMetaTypeId<FBXAnimationFrame>(), engine->newQObject(
new AnimationFrameObject(), QScriptEngine::ScriptOwnership));
engine->setDefaultPrototype(qMetaTypeId<AnimationPointer>(), engine->newQObject(
new AnimationObject(), QScriptEngine::ScriptOwnership));
}

View file

@ -0,0 +1,47 @@
//
// AnimationObject.h
// libraries/script-engine/src/
//
// Created by Andrzej Kapolka on 4/17/14.
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AnimationObject_h
#define hifi_AnimationObject_h
#include <QObject>
#include <QScriptable>
#include <FBXReader.h>
class QScriptEngine;
/// Scriptable wrapper for animation pointers.
class AnimationObject : public QObject, protected QScriptable {
Q_OBJECT
Q_PROPERTY(QStringList jointNames READ getJointNames)
Q_PROPERTY(QVector<FBXAnimationFrame> frames READ getFrames)
public:
Q_INVOKABLE QStringList getJointNames() const;
Q_INVOKABLE QVector<FBXAnimationFrame> getFrames() const;
};
/// Scriptable wrapper for animation frames.
class AnimationFrameObject : public QObject, protected QScriptable {
Q_OBJECT
Q_PROPERTY(QVector<glm::quat> rotations READ getRotations)
public:
Q_INVOKABLE QVector<glm::quat> getRotations() const;
};
void registerAnimationTypes(QScriptEngine* engine);
#endif // hifi_AnimationObject_h

View file

@ -28,6 +28,7 @@
#include <Sound.h>
#include "AnimationObject.h"
#include "MenuItemProperties.h"
#include "LocalVoxels.h"
#include "ScriptEngine.h"
@ -64,7 +65,8 @@ ScriptEngine::ScriptEngine(const QString& scriptContents, const QString& fileNam
_fileNameString(fileNameString),
_quatLibrary(),
_vec3Library(),
_uuidLibrary()
_uuidLibrary(),
_animationCache(this)
{
}
@ -88,7 +90,8 @@ ScriptEngine::ScriptEngine(const QUrl& scriptURL,
_fileNameString(),
_quatLibrary(),
_vec3Library(),
_uuidLibrary()
_uuidLibrary(),
_animationCache(this)
{
QString scriptURLString = scriptURL.toString();
_fileNameString = scriptURLString;
@ -180,11 +183,13 @@ void ScriptEngine::init() {
registerVoxelMetaTypes(&_engine);
registerEventTypes(&_engine);
registerMenuItemProperties(&_engine);
registerAnimationTypes(&_engine);
qScriptRegisterMetaType(&_engine, ParticlePropertiesToScriptValue, ParticlePropertiesFromScriptValue);
qScriptRegisterMetaType(&_engine, ParticleIDtoScriptValue, ParticleIDfromScriptValue);
qScriptRegisterSequenceMetaType<QVector<ParticleID> >(&_engine);
qScriptRegisterSequenceMetaType<QVector<glm::vec2> >(&_engine);
qScriptRegisterSequenceMetaType<QVector<glm::quat> >(&_engine);
qScriptRegisterSequenceMetaType<QVector<QString> >(&_engine);
QScriptValue soundConstructorValue = _engine.newFunction(soundConstructor);
@ -204,7 +209,8 @@ void ScriptEngine::init() {
registerGlobalObject("Quat", &_quatLibrary);
registerGlobalObject("Vec3", &_vec3Library);
registerGlobalObject("Uuid", &_uuidLibrary);
registerGlobalObject("AnimationCache", &_animationCache);
registerGlobalObject("Voxels", &_voxelsScriptingInterface);
QScriptValue treeScaleValue = _engine.newVariant(QVariant(TREE_SCALE));

View file

@ -23,6 +23,7 @@
#include <AvatarData.h>
#include "AnimationCache.h"
#include "AbstractControllerScriptingInterface.h"
#include "Quat.h"
#include "ScriptUUID.h"
@ -125,6 +126,7 @@ private:
Quat _quatLibrary;
Vec3 _vec3Library;
ScriptUUID _uuidLibrary;
AnimationCache _animationCache;
};
#endif // hifi_ScriptEngine_h

View file

@ -12,6 +12,7 @@
#include <cfloat>
#include <cmath>
#include <QThread>
#include <QTimer>
#include <QtDebug>
@ -174,6 +175,10 @@ float Resource::getLoadPriority() {
}
void Resource::allReferencesCleared() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "allReferencesCleared");
return;
}
if (_cache) {
// create and reinsert new shared pointer
QSharedPointer<Resource> self(this, &Resource::allReferencesCleared);

View file

@ -123,7 +123,7 @@ public:
void setCache(ResourceCache* cache) { _cache = cache; }
void allReferencesCleared();
Q_INVOKABLE void allReferencesCleared();
protected slots:

View file

@ -51,6 +51,10 @@ float randFloatInRange (float min,float max) {
return min + ((rand() % 10000)/10000.f * (max-min));
}
float randomSign() {
return randomBoolean() ? -1.0 : 1.0;
}
unsigned char randomColorValue(int miniumum) {
return miniumum + (rand() % (256 - miniumum));
}
@ -659,3 +663,125 @@ glm::vec3 safeEulerAngles(const glm::quat& q) {
}
}
// Helper function returns the positive angle (in radians) between two 3D vectors
float angleBetween(const glm::vec3& v1, const glm::vec3& v2) {
return acosf((glm::dot(v1, v2)) / (glm::length(v1) * glm::length(v2)));
}
// Helper function return the rotation from the first vector onto the second
glm::quat rotationBetween(const glm::vec3& v1, const glm::vec3& v2) {
float angle = angleBetween(v1, v2);
if (glm::isnan(angle) || angle < EPSILON) {
return glm::quat();
}
glm::vec3 axis;
if (angle > 179.99f * RADIANS_PER_DEGREE) { // 180 degree rotation; must use another axis
axis = glm::cross(v1, glm::vec3(1.0f, 0.0f, 0.0f));
float axisLength = glm::length(axis);
if (axisLength < EPSILON) { // parallel to x; y will work
axis = glm::normalize(glm::cross(v1, glm::vec3(0.0f, 1.0f, 0.0f)));
} else {
axis /= axisLength;
}
} else {
axis = glm::normalize(glm::cross(v1, v2));
}
return glm::angleAxis(angle, axis);
}
glm::vec3 extractTranslation(const glm::mat4& matrix) {
return glm::vec3(matrix[3][0], matrix[3][1], matrix[3][2]);
}
void setTranslation(glm::mat4& matrix, const glm::vec3& translation) {
matrix[3][0] = translation.x;
matrix[3][1] = translation.y;
matrix[3][2] = translation.z;
}
glm::quat extractRotation(const glm::mat4& matrix, bool assumeOrthogonal) {
// uses the iterative polar decomposition algorithm described by Ken Shoemake at
// http://www.cs.wisc.edu/graphics/Courses/838-s2002/Papers/polar-decomp.pdf
// code adapted from Clyde, https://github.com/threerings/clyde/blob/master/src/main/java/com/threerings/math/Matrix4f.java
// start with the contents of the upper 3x3 portion of the matrix
glm::mat3 upper = glm::mat3(matrix);
if (!assumeOrthogonal) {
for (int i = 0; i < 10; i++) {
// store the results of the previous iteration
glm::mat3 previous = upper;
// compute average of the matrix with its inverse transpose
float sd00 = previous[1][1] * previous[2][2] - previous[2][1] * previous[1][2];
float sd10 = previous[0][1] * previous[2][2] - previous[2][1] * previous[0][2];
float sd20 = previous[0][1] * previous[1][2] - previous[1][1] * previous[0][2];
float det = previous[0][0] * sd00 + previous[2][0] * sd20 - previous[1][0] * sd10;
if (fabs(det) == 0.0f) {
// determinant is zero; matrix is not invertible
break;
}
float hrdet = 0.5f / det;
upper[0][0] = +sd00 * hrdet + previous[0][0] * 0.5f;
upper[1][0] = -sd10 * hrdet + previous[1][0] * 0.5f;
upper[2][0] = +sd20 * hrdet + previous[2][0] * 0.5f;
upper[0][1] = -(previous[1][0] * previous[2][2] - previous[2][0] * previous[1][2]) * hrdet + previous[0][1] * 0.5f;
upper[1][1] = +(previous[0][0] * previous[2][2] - previous[2][0] * previous[0][2]) * hrdet + previous[1][1] * 0.5f;
upper[2][1] = -(previous[0][0] * previous[1][2] - previous[1][0] * previous[0][2]) * hrdet + previous[2][1] * 0.5f;
upper[0][2] = +(previous[1][0] * previous[2][1] - previous[2][0] * previous[1][1]) * hrdet + previous[0][2] * 0.5f;
upper[1][2] = -(previous[0][0] * previous[2][1] - previous[2][0] * previous[0][1]) * hrdet + previous[1][2] * 0.5f;
upper[2][2] = +(previous[0][0] * previous[1][1] - previous[1][0] * previous[0][1]) * hrdet + previous[2][2] * 0.5f;
// compute the difference; if it's small enough, we're done
glm::mat3 diff = upper - previous;
if (diff[0][0] * diff[0][0] + diff[1][0] * diff[1][0] + diff[2][0] * diff[2][0] + diff[0][1] * diff[0][1] +
diff[1][1] * diff[1][1] + diff[2][1] * diff[2][1] + diff[0][2] * diff[0][2] + diff[1][2] * diff[1][2] +
diff[2][2] * diff[2][2] < EPSILON) {
break;
}
}
}
// now that we have a nice orthogonal matrix, we can extract the rotation quaternion
// using the method described in http://en.wikipedia.org/wiki/Rotation_matrix#Conversions
float x2 = fabs(1.0f + upper[0][0] - upper[1][1] - upper[2][2]);
float y2 = fabs(1.0f - upper[0][0] + upper[1][1] - upper[2][2]);
float z2 = fabs(1.0f - upper[0][0] - upper[1][1] + upper[2][2]);
float w2 = fabs(1.0f + upper[0][0] + upper[1][1] + upper[2][2]);
return glm::normalize(glm::quat(0.5f * sqrtf(w2),
0.5f * sqrtf(x2) * (upper[1][2] >= upper[2][1] ? 1.0f : -1.0f),
0.5f * sqrtf(y2) * (upper[2][0] >= upper[0][2] ? 1.0f : -1.0f),
0.5f * sqrtf(z2) * (upper[0][1] >= upper[1][0] ? 1.0f : -1.0f)));
}
glm::vec3 extractScale(const glm::mat4& matrix) {
return glm::vec3(glm::length(matrix[0]), glm::length(matrix[1]), glm::length(matrix[2]));
}
float extractUniformScale(const glm::mat4& matrix) {
return extractUniformScale(extractScale(matrix));
}
float extractUniformScale(const glm::vec3& scale) {
return (scale.x + scale.y + scale.z) / 3.0f;
}
bool isNaN(float value) {
return value != value;
}
bool isSimilarOrientation(const glm::quat& orientionA, const glm::quat& orientionB, float similarEnough) {
// Compute the angular distance between the two orientations
float angleOrientation = orientionA == orientionB ? 0.0f : glm::degrees(glm::angle(orientionA * glm::inverse(orientionB)));
if (isNaN(angleOrientation)) {
angleOrientation = 0.0f;
}
return (angleOrientation <= similarEnough);
}
bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, float similarEnough) {
// Compute the distance between the two points
float positionDistance = glm::distance(positionA, positionB);
return (positionDistance <= similarEnough);
}

View file

@ -73,6 +73,7 @@ void usecTimestampNowForceClockSkew(int clockSkew);
float randFloat();
int randIntInRange (int min, int max);
float randFloatInRange (float min,float max);
float randomSign(); /// \return -1.0 or 1.0
unsigned char randomColorValue(int minimum);
bool randomBoolean();
@ -166,4 +167,30 @@ int unpackFloatVec3FromSignedTwoByteFixed(const unsigned char* sourceBuffer, glm
/// \return vec3 with euler angles in radians
glm::vec3 safeEulerAngles(const glm::quat& q);
float angleBetween(const glm::vec3& v1, const glm::vec3& v2);
glm::quat rotationBetween(const glm::vec3& v1, const glm::vec3& v2);
glm::vec3 extractTranslation(const glm::mat4& matrix);
void setTranslation(glm::mat4& matrix, const glm::vec3& translation);
glm::quat extractRotation(const glm::mat4& matrix, bool assumeOrthogonal = false);
glm::vec3 extractScale(const glm::mat4& matrix);
float extractUniformScale(const glm::mat4& matrix);
float extractUniformScale(const glm::vec3& scale);
/// \return bool are two orientations similar to each other
const float ORIENTATION_SIMILAR_ENOUGH = 5.0f; // 10 degrees in any direction
bool isSimilarOrientation(const glm::quat& orientionA, const glm::quat& orientionB,
float similarEnough = ORIENTATION_SIMILAR_ENOUGH);
const float POSITION_SIMILAR_ENOUGH = 0.1f; // 0.1 meter
bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, float similarEnough = POSITION_SIMILAR_ENOUGH);
/// \return bool is the float NaN
bool isNaN(float value);
#endif // hifi_SharedUtil_h

View file

@ -75,6 +75,9 @@ QScriptValue rayToVoxelIntersectionResultToScriptValue(QScriptEngine* engine, co
case MAX_Z_FACE:
faceName = "MAX_Z_FACE";
break;
case UNKNOWN_FACE:
faceName = "UNKNOWN_FACE";
break;
}
obj.setProperty("face", faceName);