mirror of
https://github.com/overte-org/overte.git
synced 2025-04-19 15:43:50 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into placement_tool
Conflicts: interface/src/ui/Stats.cpp
This commit is contained in:
commit
8a7a590602
21 changed files with 567 additions and 102 deletions
|
@ -847,8 +847,9 @@ bool DomainServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url
|
|||
// this is a script upload - ask the HTTPConnection to parse the form data
|
||||
QList<FormData> formData = connection->parseFormData();
|
||||
|
||||
// check how many instances of this assignment the user wants by checking the ASSIGNMENT-INSTANCES header
|
||||
// check optional headers for # of instances and pool
|
||||
const QString ASSIGNMENT_INSTANCES_HEADER = "ASSIGNMENT-INSTANCES";
|
||||
const QString ASSIGNMENT_POOL_HEADER = "ASSIGNMENT-POOL";
|
||||
|
||||
QByteArray assignmentInstancesValue = connection->requestHeaders().value(ASSIGNMENT_INSTANCES_HEADER.toLocal8Bit());
|
||||
|
||||
|
@ -860,25 +861,34 @@ bool DomainServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url
|
|||
|
||||
numInstances = assignmentInstancesValue.toInt();
|
||||
}
|
||||
|
||||
|
||||
QString assignmentPool = emptyPool;
|
||||
QByteArray assignmentPoolValue = connection->requestHeaders().value(ASSIGNMENT_POOL_HEADER.toLocal8Bit());
|
||||
|
||||
if (!assignmentPoolValue.isEmpty()) {
|
||||
// specific pool requested, set that on the created assignment
|
||||
assignmentPool = QString(assignmentPoolValue);
|
||||
}
|
||||
|
||||
const char ASSIGNMENT_SCRIPT_HOST_LOCATION[] = "resources/web/assignment";
|
||||
|
||||
for (int i = 0; i < numInstances; i++) {
|
||||
|
||||
// create an assignment for this saved script
|
||||
Assignment* scriptAssignment = new Assignment(Assignment::CreateCommand, Assignment::AgentType);
|
||||
Assignment* scriptAssignment = new Assignment(Assignment::CreateCommand, Assignment::AgentType, assignmentPool);
|
||||
|
||||
QString newPath(ASSIGNMENT_SCRIPT_HOST_LOCATION);
|
||||
newPath += "/";
|
||||
// append the UUID for this script as the new filename, remove the curly braces
|
||||
newPath += uuidStringWithoutCurlyBraces(scriptAssignment->getUUID());
|
||||
|
||||
// create a file with the GUID of the assignment in the script host locaiton
|
||||
// create a file with the GUID of the assignment in the script host location
|
||||
QFile scriptFile(newPath);
|
||||
scriptFile.open(QIODevice::WriteOnly);
|
||||
scriptFile.write(formData[0].second);
|
||||
|
||||
qDebug("Saved a script for assignment at %s", qPrintable(newPath));
|
||||
qDebug() << qPrintable(QString("Saved a script for assignment at %1%2")
|
||||
.arg(newPath).arg(assignmentPool == emptyPool ? "" : " - pool is " + assignmentPool));
|
||||
|
||||
// add the script assigment to the assignment queue
|
||||
_assignmentQueue.enqueue(SharedAssignmentPointer(scriptAssignment));
|
||||
|
|
|
@ -19,6 +19,8 @@ var reflectiveScale = 100.0;
|
|||
var diffusionScale = 100.0;
|
||||
var absorptionScale = 100.0;
|
||||
var combFilterScale = 50.0;
|
||||
var originalScale = 2.0;
|
||||
var echoesScale = 2.0;
|
||||
|
||||
// these three properties are bound together, if you change one, the others will also change
|
||||
var reflectiveRatio = AudioReflector.getReflectiveRatio();
|
||||
|
@ -421,6 +423,84 @@ var absorptionThumb = Overlays.addOverlay("image", {
|
|||
alpha: 1
|
||||
});
|
||||
|
||||
var originalY = topY;
|
||||
topY += sliderHeight;
|
||||
|
||||
var originalLabel = Overlays.addOverlay("text", {
|
||||
x: 40,
|
||||
y: originalY,
|
||||
width: 60,
|
||||
height: sliderHeight,
|
||||
color: { red: 0, green: 0, blue: 0},
|
||||
textColor: { red: 255, green: 255, blue: 255},
|
||||
topMargin: 6,
|
||||
leftMargin: 5,
|
||||
text: "Original\nMix:"
|
||||
});
|
||||
|
||||
|
||||
var originalSlider = Overlays.addOverlay("image", {
|
||||
// alternate form of expressing bounds
|
||||
bounds: { x: 100, y: originalY, width: 150, height: sliderHeight},
|
||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||
color: { red: 255, green: 255, blue: 255},
|
||||
alpha: 1
|
||||
});
|
||||
|
||||
|
||||
var originalMinThumbX = 110;
|
||||
var originalMaxThumbX = originalMinThumbX + 110;
|
||||
var originalThumbX = originalMinThumbX + ((originalMaxThumbX - originalMinThumbX) * (AudioReflector.getOriginalSourceAttenuation() / originalScale));
|
||||
var originalThumb = Overlays.addOverlay("image", {
|
||||
x: originalThumbX,
|
||||
y: originalY+9,
|
||||
width: 18,
|
||||
height: 17,
|
||||
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||
color: { red: 128, green: 128, blue: 0},
|
||||
alpha: 1
|
||||
});
|
||||
|
||||
var echoesY = topY;
|
||||
topY += sliderHeight;
|
||||
|
||||
var echoesLabel = Overlays.addOverlay("text", {
|
||||
x: 40,
|
||||
y: echoesY,
|
||||
width: 60,
|
||||
height: sliderHeight,
|
||||
color: { red: 0, green: 0, blue: 0},
|
||||
textColor: { red: 255, green: 255, blue: 255},
|
||||
topMargin: 6,
|
||||
leftMargin: 5,
|
||||
text: "Echoes\nMix:"
|
||||
});
|
||||
|
||||
|
||||
var echoesSlider = Overlays.addOverlay("image", {
|
||||
// alternate form of expressing bounds
|
||||
bounds: { x: 100, y: echoesY, width: 150, height: sliderHeight},
|
||||
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||
color: { red: 255, green: 255, blue: 255},
|
||||
alpha: 1
|
||||
});
|
||||
|
||||
|
||||
var echoesMinThumbX = 110;
|
||||
var echoesMaxThumbX = echoesMinThumbX + 110;
|
||||
var echoesThumbX = echoesMinThumbX + ((echoesMaxThumbX - echoesMinThumbX) * (AudioReflector.getEchoesAttenuation() / echoesScale));
|
||||
var echoesThumb = Overlays.addOverlay("image", {
|
||||
x: echoesThumbX,
|
||||
y: echoesY+9,
|
||||
width: 18,
|
||||
height: 17,
|
||||
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||
color: { red: 128, green: 128, blue: 0},
|
||||
alpha: 1
|
||||
});
|
||||
|
||||
|
||||
// When our script shuts down, we should clean up all of our overlays
|
||||
function scriptEnding() {
|
||||
|
@ -460,6 +540,14 @@ function scriptEnding() {
|
|||
Overlays.deleteOverlay(absorptionThumb);
|
||||
Overlays.deleteOverlay(absorptionSlider);
|
||||
|
||||
Overlays.deleteOverlay(echoesLabel);
|
||||
Overlays.deleteOverlay(echoesThumb);
|
||||
Overlays.deleteOverlay(echoesSlider);
|
||||
|
||||
Overlays.deleteOverlay(originalLabel);
|
||||
Overlays.deleteOverlay(originalThumb);
|
||||
Overlays.deleteOverlay(originalSlider);
|
||||
|
||||
}
|
||||
Script.scriptEnding.connect(scriptEnding);
|
||||
|
||||
|
@ -483,6 +571,8 @@ var movingSliderLocalFactor = false;
|
|||
var movingSliderReflective = false;
|
||||
var movingSliderDiffusion = false;
|
||||
var movingSliderAbsorption = false;
|
||||
var movingSliderOriginal = false;
|
||||
var movingSliderEchoes = false;
|
||||
|
||||
var thumbClickOffsetX = 0;
|
||||
function mouseMoveEvent(event) {
|
||||
|
@ -546,7 +636,6 @@ function mouseMoveEvent(event) {
|
|||
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
|
||||
AudioReflector.setCombFilterWindow(combFilter);
|
||||
}
|
||||
|
||||
if (movingSliderLocalFactor) {
|
||||
newThumbX = event.x - thumbClickOffsetX;
|
||||
if (newThumbX < localFactorMinThumbX) {
|
||||
|
@ -598,6 +687,30 @@ function mouseMoveEvent(event) {
|
|||
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
|
||||
setDiffusionRatio(diffusion);
|
||||
}
|
||||
if (movingSliderEchoes) {
|
||||
newThumbX = event.x - thumbClickOffsetX;
|
||||
if (newThumbX < echoesMinThumbX) {
|
||||
newThumbX = echoesMminThumbX;
|
||||
}
|
||||
if (newThumbX > echoesMaxThumbX) {
|
||||
newThumbX = echoesMaxThumbX;
|
||||
}
|
||||
Overlays.editOverlay(echoesThumb, { x: newThumbX } );
|
||||
var echoes = ((newThumbX - echoesMinThumbX) / (echoesMaxThumbX - echoesMinThumbX)) * echoesScale;
|
||||
AudioReflector.setEchoesAttenuation(echoes);
|
||||
}
|
||||
if (movingSliderOriginal) {
|
||||
newThumbX = event.x - thumbClickOffsetX;
|
||||
if (newThumbX < originalMinThumbX) {
|
||||
newThumbX = originalMminThumbX;
|
||||
}
|
||||
if (newThumbX > originalMaxThumbX) {
|
||||
newThumbX = originalMaxThumbX;
|
||||
}
|
||||
Overlays.editOverlay(originalThumb, { x: newThumbX } );
|
||||
var original = ((newThumbX - originalMinThumbX) / (originalMaxThumbX - originalMinThumbX)) * originalScale;
|
||||
AudioReflector.setOriginalSourceAttenuation(original);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -640,7 +753,16 @@ function mousePressEvent(event) {
|
|||
movingSliderReflective = true;
|
||||
thumbClickOffsetX = event.x - reflectiveThumbX;
|
||||
}
|
||||
if (clickedOverlay == originalThumb) {
|
||||
movingSliderOriginal = true;
|
||||
thumbClickOffsetX = event.x - originalThumbX;
|
||||
}
|
||||
if (clickedOverlay == echoesThumb) {
|
||||
movingSliderEchoes = true;
|
||||
thumbClickOffsetX = event.x - echoesThumbX;
|
||||
}
|
||||
}
|
||||
|
||||
function mouseReleaseEvent(event) {
|
||||
if (movingSliderDelay) {
|
||||
movingSliderDelay = false;
|
||||
|
@ -672,14 +794,12 @@ function mouseReleaseEvent(event) {
|
|||
AudioReflector.setCombFilterWindow(combFilter);
|
||||
combFilterThumbX = newThumbX;
|
||||
}
|
||||
|
||||
if (movingSliderLocalFactor) {
|
||||
movingSliderLocalFactor = false;
|
||||
var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale;
|
||||
AudioReflector.setLocalAudioAttenuationFactor(localFactor);
|
||||
localFactorThumbX = newThumbX;
|
||||
}
|
||||
|
||||
if (movingSliderReflective) {
|
||||
movingSliderReflective = false;
|
||||
var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale;
|
||||
|
@ -687,7 +807,6 @@ function mouseReleaseEvent(event) {
|
|||
reflectiveThumbX = newThumbX;
|
||||
updateRatioSliders();
|
||||
}
|
||||
|
||||
if (movingSliderDiffusion) {
|
||||
movingSliderDiffusion = false;
|
||||
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
|
||||
|
@ -695,7 +814,6 @@ function mouseReleaseEvent(event) {
|
|||
diffusionThumbX = newThumbX;
|
||||
updateRatioSliders();
|
||||
}
|
||||
|
||||
if (movingSliderAbsorption) {
|
||||
movingSliderAbsorption = false;
|
||||
var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale;
|
||||
|
@ -703,6 +821,18 @@ function mouseReleaseEvent(event) {
|
|||
absorptionThumbX = newThumbX;
|
||||
updateRatioSliders();
|
||||
}
|
||||
if (movingSliderEchoes) {
|
||||
movingSliderEchoes = false;
|
||||
var echoes = ((newThumbX - echoesMinThumbX) / (echoesMaxThumbX - echoesMinThumbX)) * echoesScale;
|
||||
AudioReflector.setEchoesAttenuation(echoes);
|
||||
echoesThumbX = newThumbX;
|
||||
}
|
||||
if (movingSliderOriginal) {
|
||||
movingSliderOriginal = false;
|
||||
var original = ((newThumbX - originalMinThumbX) / (originalMaxThumbX - originalMinThumbX)) * originalScale;
|
||||
AudioReflector.setOriginalSourceAttenuation(original);
|
||||
originalThumbX = newThumbX;
|
||||
}
|
||||
}
|
||||
|
||||
Controller.mouseMoveEvent.connect(mouseMoveEvent);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
var animation = AnimationCache.getAnimation("FBX_URL");
|
||||
var animation = AnimationCache.getAnimation("http://www.fungibleinsight.com/faces/gangnam_style_2.fbx");
|
||||
|
||||
Avatar.skeletonModelURL = "http://www.fungibleinsight.com/faces/beta.fst";
|
||||
|
||||
|
@ -27,7 +27,7 @@ Script.update.connect(function(deltaTime) {
|
|||
if (!jointMapping) {
|
||||
var avatarJointNames = Avatar.jointNames;
|
||||
var animationJointNames = animation.jointNames;
|
||||
if (avatarJointNames === 0 || animationJointNames.length === 0) {
|
||||
if (avatarJointNames.length === 0 || animationJointNames.length === 0) {
|
||||
return;
|
||||
}
|
||||
jointMapping = new Array(avatarJointNames.length);
|
||||
|
|
|
@ -356,6 +356,8 @@ Application::Application(int& argc, char** argv, timeval &startup_time) :
|
|||
QMutexLocker locker(&_settingsMutex);
|
||||
_previousScriptLocation = _settings->value("LastScriptLocation", QVariant("")).toString();
|
||||
}
|
||||
//When -url in command line, teleport to location
|
||||
urlGoTo(argc, constArgv);
|
||||
}
|
||||
|
||||
Application::~Application() {
|
||||
|
@ -1679,8 +1681,12 @@ void Application::init() {
|
|||
_audioReflector.setMyAvatar(getAvatar());
|
||||
_audioReflector.setVoxels(_voxels.getTree());
|
||||
_audioReflector.setAudio(getAudio());
|
||||
_audioReflector.setAvatarManager(&_avatarManager);
|
||||
|
||||
connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection);
|
||||
connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection);
|
||||
connect(getAudio(), &Audio::preProcessOriginalInboundAudio, &_audioReflector,
|
||||
&AudioReflector::preProcessOriginalInboundAudio,Qt::DirectConnection);
|
||||
|
||||
// save settings when avatar changes
|
||||
connect(_myAvatar, &MyAvatar::transformChanged, this, &Application::bumpSettings);
|
||||
|
@ -3020,6 +3026,7 @@ void Application::resetSensors() {
|
|||
_mouseX = _glWidget->width() / 2;
|
||||
_mouseY = _glWidget->height() / 2;
|
||||
|
||||
_faceplus.reset();
|
||||
_faceshift.reset();
|
||||
_visage.reset();
|
||||
|
||||
|
@ -3561,3 +3568,38 @@ void Application::takeSnapshot() {
|
|||
|
||||
Snapshot::saveSnapshot(_glWidget, _myAvatar);
|
||||
}
|
||||
|
||||
void Application::urlGoTo(int argc, const char * constArgv[]) {
|
||||
//Gets the url (hifi://domain/destination/orientation)
|
||||
QString customUrl = getCmdOption(argc, constArgv, "-url");
|
||||
|
||||
if (customUrl.startsWith("hifi://")) {
|
||||
QStringList urlParts = customUrl.remove(0, CUSTOM_URL_SCHEME.length() + 2).split('/', QString::SkipEmptyParts);
|
||||
if (urlParts.count() > 1) {
|
||||
// if url has 2 or more parts, the first one is domain name
|
||||
QString domain = urlParts[0];
|
||||
|
||||
// second part is either a destination coordinate or
|
||||
// a place name
|
||||
QString destination = urlParts[1];
|
||||
|
||||
// any third part is an avatar orientation.
|
||||
QString orientation = urlParts.count() > 2 ? urlParts[2] : QString();
|
||||
|
||||
Menu::goToDomain(domain);
|
||||
|
||||
// goto either @user, #place, or x-xx,y-yy,z-zz
|
||||
// style co-ordinate.
|
||||
Menu::goTo(destination);
|
||||
|
||||
if (!orientation.isEmpty()) {
|
||||
// location orientation
|
||||
Menu::goToOrientation(orientation);
|
||||
}
|
||||
} else if (urlParts.count() == 1) {
|
||||
// location coordinates or place name
|
||||
QString destination = urlParts[0];
|
||||
Menu::goTo(destination);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,6 +129,7 @@ public:
|
|||
void initializeGL();
|
||||
void paintGL();
|
||||
void resizeGL(int width, int height);
|
||||
void urlGoTo(int argc, const char * constArgv[]);
|
||||
|
||||
void keyPressEvent(QKeyEvent* event);
|
||||
void keyReleaseEvent(QKeyEvent* event);
|
||||
|
|
|
@ -784,6 +784,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
_ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
|
||||
// Accumulate direct transmission of audio from sender to receiver
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
||||
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
||||
}
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ public slots:
|
|||
|
||||
signals:
|
||||
bool muteToggled();
|
||||
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
|
||||
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@ const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on
|
|||
|
||||
const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed
|
||||
const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused
|
||||
const float DEFAULT_ORIGINAL_ATTENUATION = 1.0f;
|
||||
const float DEFAULT_ECHO_ATTENUATION = 1.0f;
|
||||
|
||||
AudioReflector::AudioReflector(QObject* parent) :
|
||||
QObject(parent),
|
||||
|
@ -36,6 +38,8 @@ AudioReflector::AudioReflector(QObject* parent) :
|
|||
_diffusionFanout(DEFAULT_DIFFUSION_FANOUT),
|
||||
_absorptionRatio(DEFAULT_ABSORPTION_RATIO),
|
||||
_diffusionRatio(DEFAULT_DIFFUSION_RATIO),
|
||||
_originalSourceAttenuation(DEFAULT_ORIGINAL_ATTENUATION),
|
||||
_allEchoesAttenuation(DEFAULT_ECHO_ATTENUATION),
|
||||
_withDiffusion(false),
|
||||
_lastPreDelay(DEFAULT_PRE_DELAY),
|
||||
_lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
|
||||
|
@ -43,20 +47,29 @@ AudioReflector::AudioReflector(QObject* parent) :
|
|||
_lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
|
||||
_lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT),
|
||||
_lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO),
|
||||
_lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO)
|
||||
_lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO),
|
||||
_lastDontDistanceAttenuate(false),
|
||||
_lastAlternateDistanceAttenuate(false)
|
||||
{
|
||||
_reflections = 0;
|
||||
_diffusionPathCount = 0;
|
||||
_averageAttenuation = 0.0f;
|
||||
_maxAttenuation = 0.0f;
|
||||
_minAttenuation = 0.0f;
|
||||
_averageDelay = 0;
|
||||
_maxDelay = 0;
|
||||
_minDelay = 0;
|
||||
_officialAverageAttenuation = _averageAttenuation = 0.0f;
|
||||
_officialMaxAttenuation = _maxAttenuation = 0.0f;
|
||||
_officialMinAttenuation = _minAttenuation = 0.0f;
|
||||
_officialAverageDelay = _averageDelay = 0;
|
||||
_officialMaxDelay = _maxDelay = 0;
|
||||
_officialMinDelay = _minDelay = 0;
|
||||
_inboundEchoesCount = 0;
|
||||
_inboundEchoesSuppressedCount = 0;
|
||||
_localEchoesCount = 0;
|
||||
_localEchoesSuppressedCount = 0;
|
||||
}
|
||||
|
||||
bool AudioReflector::haveAttributesChanged() {
|
||||
bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
|
||||
bool dontDistanceAttenuate = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
|
||||
bool alternateDistanceAttenuate = Menu::getInstance()->isOptionChecked(
|
||||
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
|
||||
|
||||
bool attributesChange = (_withDiffusion != withDiffusion
|
||||
|| _lastPreDelay != _preDelay
|
||||
|
@ -64,7 +77,9 @@ bool AudioReflector::haveAttributesChanged() {
|
|||
|| _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor
|
||||
|| _lastDiffusionFanout != _diffusionFanout
|
||||
|| _lastAbsorptionRatio != _absorptionRatio
|
||||
|| _lastDiffusionRatio != _diffusionRatio);
|
||||
|| _lastDiffusionRatio != _diffusionRatio
|
||||
|| _lastDontDistanceAttenuate != dontDistanceAttenuate
|
||||
|| _lastAlternateDistanceAttenuate != alternateDistanceAttenuate);
|
||||
|
||||
if (attributesChange) {
|
||||
_withDiffusion = withDiffusion;
|
||||
|
@ -74,6 +89,8 @@ bool AudioReflector::haveAttributesChanged() {
|
|||
_lastDiffusionFanout = _diffusionFanout;
|
||||
_lastAbsorptionRatio = _absorptionRatio;
|
||||
_lastDiffusionRatio = _diffusionRatio;
|
||||
_lastDontDistanceAttenuate = dontDistanceAttenuate;
|
||||
_lastAlternateDistanceAttenuate = alternateDistanceAttenuate;
|
||||
}
|
||||
|
||||
return attributesChange;
|
||||
|
@ -107,19 +124,47 @@ float AudioReflector::getDelayFromDistance(float distance) {
|
|||
|
||||
// attenuation = from the Audio Mixer
|
||||
float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
|
||||
const float DISTANCE_SCALE = 2.5f;
|
||||
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
||||
const float DISTANCE_LOG_BASE = 2.5f;
|
||||
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
||||
|
||||
|
||||
bool doDistanceAttenuation = !Menu::getInstance()->isOptionChecked(
|
||||
MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
|
||||
|
||||
bool originalFormula = !Menu::getInstance()->isOptionChecked(
|
||||
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
|
||||
|
||||
float distanceSquareToSource = distance * distance;
|
||||
|
||||
float distanceCoefficient = 1.0f;
|
||||
|
||||
if (doDistanceAttenuation) {
|
||||
|
||||
if (originalFormula) {
|
||||
const float DISTANCE_SCALE = 2.5f;
|
||||
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
||||
const float DISTANCE_LOG_BASE = 2.5f;
|
||||
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
||||
|
||||
float distanceSquareToSource = distance * distance;
|
||||
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
||||
DISTANCE_SCALE_LOG +
|
||||
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
||||
|
||||
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
||||
DISTANCE_SCALE_LOG +
|
||||
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
||||
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
|
||||
} else {
|
||||
|
||||
// From Fred: If we wanted something that would produce a tail that could go up to 5 seconds in a
|
||||
// really big room, that would suggest the sound still has to be in the audible after traveling about
|
||||
// 1500 meters. If it’s a sound of average volume, we probably have about 30 db, or 5 base2 orders
|
||||
// of magnitude we can drop down before the sound becomes inaudible. (That’s approximate headroom
|
||||
// based on a few sloppy assumptions.) So we could try a factor like 1 / (2^(D/300)) for starters.
|
||||
// 1 / (2^(D/300))
|
||||
const float DISTANCE_BASE = 2.0f;
|
||||
const float DISTANCE_DENOMINATOR = 300.0f;
|
||||
const float DISTANCE_NUMERATOR = 300.0f;
|
||||
distanceCoefficient = DISTANCE_NUMERATOR / powf(DISTANCE_BASE, (distance / DISTANCE_DENOMINATOR ));
|
||||
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
|
||||
}
|
||||
}
|
||||
|
||||
return distanceCoefficient;
|
||||
}
|
||||
|
@ -236,11 +281,13 @@ void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint&
|
|||
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
|
||||
}
|
||||
|
||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
|
||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] =
|
||||
leftSample * leftEarAttenuation * _allEchoesAttenuation;
|
||||
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
|
||||
|
||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
|
||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
|
||||
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] =
|
||||
rightSample * rightEarAttenuation * _allEchoesAttenuation;
|
||||
}
|
||||
|
||||
// now inject the attenuated array with the appropriate delay
|
||||
|
@ -249,9 +296,25 @@ void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint&
|
|||
|
||||
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
|
||||
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
|
||||
|
||||
_injectedEchoes++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void AudioReflector::preProcessOriginalInboundAudio(unsigned int sampleTime,
|
||||
QByteArray& samples, const QAudioFormat& format) {
|
||||
|
||||
if (_originalSourceAttenuation != 1.0f) {
|
||||
int numberOfSamples = (samples.size() / sizeof(int16_t));
|
||||
int16_t* sampleData = (int16_t*)samples.data();
|
||||
for (int i = 0; i < numberOfSamples; i++) {
|
||||
sampleData[i] = sampleData[i] * _originalSourceAttenuation;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) {
|
||||
const int NUM_CHANNELS_INPUT = 1;
|
||||
|
@ -272,6 +335,8 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray
|
|||
_localAudioDelays.clear();
|
||||
_localEchoesSuppressed.clear();
|
||||
echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat);
|
||||
_localEchoesCount = _localAudioDelays.size();
|
||||
_localEchoesSuppressedCount = _localEchoesSuppressed.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -280,9 +345,13 @@ void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArr
|
|||
_inboundAudioDelays.clear();
|
||||
_inboundEchoesSuppressed.clear();
|
||||
echoAudio(INBOUND_AUDIO, sampleTime, samples, format);
|
||||
_inboundEchoesCount = _inboundAudioDelays.size();
|
||||
_inboundEchoesSuppressedCount = _inboundEchoesSuppressed.size();
|
||||
}
|
||||
|
||||
void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
||||
QMutexLocker locker(&_mutex);
|
||||
|
||||
_maxDelay = 0;
|
||||
_maxAttenuation = 0.0f;
|
||||
_minDelay = std::numeric_limits<int>::max();
|
||||
|
@ -292,14 +361,20 @@ void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, cons
|
|||
_totalAttenuation = 0.0f;
|
||||
_attenuationCount = 0;
|
||||
|
||||
QMutexLocker locker(&_mutex);
|
||||
|
||||
// depending on if we're processing local or external audio, pick the correct points vector
|
||||
QVector<AudiblePoint>& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
|
||||
|
||||
int injectCalls = 0;
|
||||
_injectedEchoes = 0;
|
||||
foreach(const AudiblePoint& audiblePoint, audiblePoints) {
|
||||
injectCalls++;
|
||||
injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate());
|
||||
}
|
||||
|
||||
/*
|
||||
qDebug() << "injectCalls=" << injectCalls;
|
||||
qDebug() << "_injectedEchoes=" << _injectedEchoes;
|
||||
*/
|
||||
|
||||
_averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount;
|
||||
_averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount;
|
||||
|
@ -308,6 +383,14 @@ void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, cons
|
|||
_minDelay = 0.0f;
|
||||
_minAttenuation = 0.0f;
|
||||
}
|
||||
|
||||
_officialMaxDelay = _maxDelay;
|
||||
_officialMinDelay = _minDelay;
|
||||
_officialMaxAttenuation = _maxAttenuation;
|
||||
_officialMinAttenuation = _minAttenuation;
|
||||
_officialAverageDelay = _averageDelay;
|
||||
_officialAverageAttenuation = _averageAttenuation;
|
||||
|
||||
}
|
||||
|
||||
void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) {
|
||||
|
@ -359,6 +442,19 @@ void AudioReflector::addAudioPath(AudioSource source, const glm::vec3& origin, c
|
|||
audioPaths.push_back(path);
|
||||
}
|
||||
|
||||
// NOTE: This is a prototype of an eventual utility that will identify the speaking sources for the inbound audio
|
||||
// stream. It's not currently called but will be added soon.
|
||||
void AudioReflector::identifyAudioSources() {
|
||||
// looking for audio sources....
|
||||
foreach (const AvatarSharedPointer& avatarPointer, _avatarManager->getAvatarHash()) {
|
||||
Avatar* avatar = static_cast<Avatar*>(avatarPointer.data());
|
||||
if (!avatar->isInitialized()) {
|
||||
continue;
|
||||
}
|
||||
qDebug() << "avatar["<< avatar <<"] loudness:" << avatar->getAudioLoudness();
|
||||
}
|
||||
}
|
||||
|
||||
void AudioReflector::calculateAllReflections() {
|
||||
// only recalculate when we've moved, or if the attributes have changed
|
||||
// TODO: what about case where new voxels are added in front of us???
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "Audio.h"
|
||||
#include "avatar/MyAvatar.h"
|
||||
#include "avatar/AvatarManager.h"
|
||||
|
||||
enum AudioSource {
|
||||
LOCAL_AUDIO,
|
||||
|
@ -69,25 +70,27 @@ public:
|
|||
void setVoxels(VoxelTree* voxels) { _voxels = voxels; }
|
||||
void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; }
|
||||
void setAudio(Audio* audio) { _audio = audio; }
|
||||
void setAvatarManager(AvatarManager* avatarManager) { _avatarManager = avatarManager; }
|
||||
|
||||
void render(); /// must be called in the application render loop
|
||||
|
||||
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
|
||||
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||
|
||||
public slots:
|
||||
// statistics
|
||||
int getReflections() const { return _reflections; }
|
||||
float getAverageDelayMsecs() const { return _averageDelay; }
|
||||
float getAverageAttenuation() const { return _averageAttenuation; }
|
||||
float getMaxDelayMsecs() const { return _maxDelay; }
|
||||
float getMaxAttenuation() const { return _maxAttenuation; }
|
||||
float getMinDelayMsecs() const { return _minDelay; }
|
||||
float getMinAttenuation() const { return _minAttenuation; }
|
||||
float getAverageDelayMsecs() const { return _officialAverageDelay; }
|
||||
float getAverageAttenuation() const { return _officialAverageAttenuation; }
|
||||
float getMaxDelayMsecs() const { return _officialMaxDelay; }
|
||||
float getMaxAttenuation() const { return _officialMaxAttenuation; }
|
||||
float getMinDelayMsecs() const { return _officialMinDelay; }
|
||||
float getMinAttenuation() const { return _officialMinAttenuation; }
|
||||
float getDelayFromDistance(float distance);
|
||||
int getDiffusionPathCount() const { return _diffusionPathCount; }
|
||||
int getEchoesInjected() const { return _inboundAudioDelays.size() + _localAudioDelays.size(); }
|
||||
int getEchoesSuppressed() const { return _inboundEchoesSuppressed.size() + _localEchoesSuppressed.size(); }
|
||||
int getEchoesInjected() const { return _inboundEchoesCount + _localEchoesCount; }
|
||||
int getEchoesSuppressed() const { return _inboundEchoesSuppressedCount + _localEchoesSuppressedCount; }
|
||||
|
||||
/// ms of delay added to all echos
|
||||
float getPreDelay() const { return _preDelay; }
|
||||
|
@ -126,12 +129,19 @@ public slots:
|
|||
float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); }
|
||||
void setReflectiveRatio(float ratio);
|
||||
|
||||
// wet/dry mix - these don't affect any reflection calculations, only the final mix volumes
|
||||
float getOriginalSourceAttenuation() const { return _originalSourceAttenuation; }
|
||||
void setOriginalSourceAttenuation(float value) { _originalSourceAttenuation = value; }
|
||||
float getEchoesAttenuation() const { return _allEchoesAttenuation; }
|
||||
void setEchoesAttenuation(float value) { _allEchoesAttenuation = value; }
|
||||
|
||||
signals:
|
||||
|
||||
private:
|
||||
VoxelTree* _voxels; // used to access voxel scene
|
||||
MyAvatar* _myAvatar; // access to listener
|
||||
Audio* _audio; // access to audio API
|
||||
AvatarManager* _avatarManager; // access to avatar manager API
|
||||
|
||||
// Helpers for drawing
|
||||
void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color);
|
||||
|
@ -147,11 +157,18 @@ private:
|
|||
float _averageDelay;
|
||||
float _maxDelay;
|
||||
float _minDelay;
|
||||
float _officialAverageDelay;
|
||||
float _officialMaxDelay;
|
||||
float _officialMinDelay;
|
||||
int _attenuationCount;
|
||||
float _totalAttenuation;
|
||||
float _averageAttenuation;
|
||||
float _maxAttenuation;
|
||||
float _minAttenuation;
|
||||
float _officialAverageAttenuation;
|
||||
float _officialMaxAttenuation;
|
||||
float _officialMinAttenuation;
|
||||
|
||||
|
||||
glm::vec3 _listenerPosition;
|
||||
glm::vec3 _origin;
|
||||
|
@ -161,11 +178,15 @@ private:
|
|||
QVector<AudiblePoint> _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths
|
||||
QMap<float, float> _inboundAudioDelays; /// delay times for currently injected audio points
|
||||
QVector<float> _inboundEchoesSuppressed; /// delay times for currently injected audio points
|
||||
int _inboundEchoesCount;
|
||||
int _inboundEchoesSuppressedCount;
|
||||
|
||||
QVector<AudioPath*> _localAudioPaths; /// audio paths we're processing for local audio
|
||||
QVector<AudiblePoint> _localAudiblePoints; /// the audible points that have been calculated from the local audio paths
|
||||
QMap<float, float> _localAudioDelays; /// delay times for currently injected audio points
|
||||
QVector<float> _localEchoesSuppressed; /// delay times for currently injected audio points
|
||||
int _localEchoesCount;
|
||||
int _localEchoesSuppressedCount;
|
||||
|
||||
// adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties,
|
||||
// as well as diffusion sound sources
|
||||
|
@ -182,6 +203,7 @@ private:
|
|||
void calculateAllReflections();
|
||||
int countDiffusionPaths();
|
||||
glm::vec3 getFaceNormal(BoxFace face);
|
||||
void identifyAudioSources();
|
||||
|
||||
void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
|
||||
void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||
|
@ -197,13 +219,16 @@ private:
|
|||
float _distanceAttenuationScalingFactor;
|
||||
float _localAudioAttenuationFactor;
|
||||
float _combFilterWindow;
|
||||
|
||||
int _diffusionFanout; // number of points of diffusion from each reflection point
|
||||
|
||||
// all elements have the same material for now...
|
||||
float _absorptionRatio;
|
||||
float _diffusionRatio;
|
||||
float _reflectiveRatio;
|
||||
|
||||
// wet/dry mix - these don't affect any reflection calculations, only the final mix volumes
|
||||
float _originalSourceAttenuation; /// each sample of original signal will be multiplied by this
|
||||
float _allEchoesAttenuation; /// each sample of all echo signals will be multiplied by this
|
||||
|
||||
// remember the last known values at calculation
|
||||
bool haveAttributesChanged();
|
||||
|
@ -216,6 +241,10 @@ private:
|
|||
int _lastDiffusionFanout;
|
||||
float _lastAbsorptionRatio;
|
||||
float _lastDiffusionRatio;
|
||||
bool _lastDontDistanceAttenuate;
|
||||
bool _lastAlternateDistanceAttenuate;
|
||||
|
||||
int _injectedEchoes;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ Camera::Camera() :
|
|||
_idealPosition(0.0f, 0.0f, 0.0f),
|
||||
_targetPosition(0.0f, 0.0f, 0.0f),
|
||||
_fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES),
|
||||
_aspectRatio(16.f/9.f),
|
||||
_aspectRatio(16.0f/9.0f),
|
||||
_nearClip(0.08f), // default
|
||||
_farClip(50.0f * TREE_SCALE), // default
|
||||
_upShift(0.0f),
|
||||
|
@ -94,8 +94,8 @@ void Camera::updateFollowMode(float deltaTime) {
|
|||
|
||||
// derive t from tightness
|
||||
float t = _tightness * _modeShift * deltaTime;
|
||||
if (t > 1.0) {
|
||||
t = 1.0;
|
||||
if (t > 1.0f) {
|
||||
t = 1.0f;
|
||||
}
|
||||
|
||||
// handle keepLookingAt
|
||||
|
|
|
@ -429,6 +429,14 @@ Menu::Menu() :
|
|||
Qt::CTRL | Qt::SHIFT | Qt::Key_A,
|
||||
true);
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingDontDistanceAttenuate,
|
||||
Qt::CTRL | Qt::SHIFT | Qt::Key_Y,
|
||||
false);
|
||||
|
||||
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate,
|
||||
Qt::CTRL | Qt::SHIFT | Qt::Key_U,
|
||||
false);
|
||||
|
||||
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
||||
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
|
||||
this,
|
||||
|
|
|
@ -135,10 +135,10 @@ public:
|
|||
|
||||
void removeAction(QMenu* menu, const QString& actionName);
|
||||
|
||||
bool goToDestination(QString destination);
|
||||
void goToOrientation(QString orientation);
|
||||
void goToDomain(const QString newDomain);
|
||||
void goTo(QString destination);
|
||||
bool static goToDestination(QString destination);
|
||||
void static goToOrientation(QString orientation);
|
||||
void static goToDomain(const QString newDomain);
|
||||
void static goTo(QString destination);
|
||||
|
||||
public slots:
|
||||
|
||||
|
@ -268,6 +268,10 @@ namespace MenuOption {
|
|||
const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces";
|
||||
const QString AudioSpatialProcessingStereoSource = "Stereo Source";
|
||||
const QString AudioSpatialProcessingWithDiffusions = "With Diffusions";
|
||||
const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation";
|
||||
const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation";
|
||||
|
||||
|
||||
|
||||
const QString Avatars = "Avatars";
|
||||
const QString Bandwidth = "Bandwidth Display";
|
||||
|
|
|
@ -41,8 +41,15 @@ void Faceplus::init() {
|
|||
updateEnabled();
|
||||
}
|
||||
|
||||
void Faceplus::setState(const glm::quat& headRotation, float estimatedEyePitch, float estimatedEyeYaw,
|
||||
const QVector<float>& blendshapeCoefficients) {
|
||||
void Faceplus::reset() {
|
||||
if (_enabled) {
|
||||
QMetaObject::invokeMethod(_reader, "reset");
|
||||
}
|
||||
}
|
||||
|
||||
void Faceplus::setState(const glm::vec3& headTranslation, const glm::quat& headRotation,
|
||||
float estimatedEyePitch, float estimatedEyeYaw, const QVector<float>& blendshapeCoefficients) {
|
||||
_headTranslation = headTranslation;
|
||||
_headRotation = headRotation;
|
||||
_estimatedEyePitch = estimatedEyePitch;
|
||||
_estimatedEyeYaw = estimatedEyeYaw;
|
||||
|
@ -150,7 +157,7 @@ FaceplusReader::~FaceplusReader() {
|
|||
|
||||
void FaceplusReader::init() {
|
||||
#ifdef HAVE_FACEPLUS
|
||||
if (!faceplus_init("VGA")) {
|
||||
if (!faceplus_init("hHD")) {
|
||||
qDebug() << "Failed to initialized Faceplus.";
|
||||
return;
|
||||
}
|
||||
|
@ -191,7 +198,8 @@ void FaceplusReader::init() {
|
|||
}
|
||||
}
|
||||
_blendshapeCoefficients.resize(maxIndex + 1);
|
||||
|
||||
_referenceInitialized = false;
|
||||
|
||||
QMetaObject::invokeMethod(this, "update", Qt::QueuedConnection);
|
||||
#endif
|
||||
}
|
||||
|
@ -203,10 +211,24 @@ void FaceplusReader::shutdown() {
|
|||
|
||||
void FaceplusReader::update() {
|
||||
#ifdef HAVE_FACEPLUS
|
||||
if (!(faceplus_synchronous_track() && faceplus_current_output_vector(_outputVector.data()))) {
|
||||
float x, y, rotation, scale;
|
||||
if (!(faceplus_synchronous_track() && faceplus_current_face_location(&x, &y, &rotation, &scale) && !glm::isnan(x) &&
|
||||
faceplus_current_output_vector(_outputVector.data()))) {
|
||||
QMetaObject::invokeMethod(this, "update", Qt::QueuedConnection);
|
||||
return;
|
||||
}
|
||||
if (!_referenceInitialized) {
|
||||
_referenceX = x;
|
||||
_referenceY = y;
|
||||
_referenceScale = scale;
|
||||
_referenceInitialized = true;
|
||||
}
|
||||
const float TRANSLATION_SCALE = 10.0f;
|
||||
const float REFERENCE_DISTANCE = 10.0f;
|
||||
float depthScale = _referenceScale / scale;
|
||||
float z = REFERENCE_DISTANCE * (depthScale - 1.0f);
|
||||
glm::vec3 headTranslation((x - _referenceX) * depthScale * TRANSLATION_SCALE,
|
||||
(y - _referenceY) * depthScale * TRANSLATION_SCALE, z);
|
||||
glm::quat headRotation(glm::radians(glm::vec3(-_outputVector.at(_headRotationIndices[0]),
|
||||
_outputVector.at(_headRotationIndices[1]), -_outputVector.at(_headRotationIndices[2]))));
|
||||
float estimatedEyePitch = (_outputVector.at(_leftEyeRotationIndices[0]) +
|
||||
|
@ -222,10 +244,16 @@ void FaceplusReader::update() {
|
|||
}
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(Application::getInstance()->getFaceplus(), "setState", Q_ARG(const glm::quat&, headRotation),
|
||||
Q_ARG(float, estimatedEyePitch), Q_ARG(float, estimatedEyeYaw), Q_ARG(const QVector<float>&, _blendshapeCoefficients));
|
||||
QMetaObject::invokeMethod(Application::getInstance()->getFaceplus(), "setState", Q_ARG(const glm::vec3&, headTranslation),
|
||||
Q_ARG(const glm::quat&, headRotation), Q_ARG(float, estimatedEyePitch), Q_ARG(float, estimatedEyeYaw),
|
||||
Q_ARG(const QVector<float>&, _blendshapeCoefficients));
|
||||
|
||||
QMetaObject::invokeMethod(this, "update", Qt::QueuedConnection);
|
||||
#endif
|
||||
}
|
||||
|
||||
void FaceplusReader::reset() {
|
||||
#ifdef HAVE_FACEPLUS
|
||||
_referenceInitialized = false;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -30,11 +30,12 @@ public:
|
|||
virtual ~Faceplus();
|
||||
|
||||
void init();
|
||||
|
||||
void reset();
|
||||
|
||||
bool isActive() const { return _active; }
|
||||
|
||||
Q_INVOKABLE void setState(const glm::quat& headRotation, float estimatedEyePitch, float estimatedEyeYaw,
|
||||
const QVector<float>& blendshapeCoefficients);
|
||||
Q_INVOKABLE void setState(const glm::vec3& headTranslation, const glm::quat& headRotation,
|
||||
float estimatedEyePitch, float estimatedEyeYaw, const QVector<float>& blendshapeCoefficients);
|
||||
|
||||
public slots:
|
||||
|
||||
|
@ -63,6 +64,7 @@ public:
|
|||
Q_INVOKABLE void init();
|
||||
Q_INVOKABLE void shutdown();
|
||||
Q_INVOKABLE void update();
|
||||
Q_INVOKABLE void reset();
|
||||
|
||||
private:
|
||||
|
||||
|
@ -72,6 +74,10 @@ private:
|
|||
int _headRotationIndices[3];
|
||||
int _leftEyeRotationIndices[2];
|
||||
int _rightEyeRotationIndices[2];
|
||||
float _referenceX;
|
||||
float _referenceY;
|
||||
float _referenceScale;
|
||||
bool _referenceInitialized;
|
||||
QVector<float> _blendshapeCoefficients;
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -60,11 +60,11 @@ Model::SkinLocations Model::_skinNormalMapLocations;
|
|||
Model::SkinLocations Model::_skinShadowLocations;
|
||||
|
||||
void Model::setScale(const glm::vec3& scale) {
|
||||
glm::vec3 deltaScale = _scale - scale;
|
||||
float scaleLength = glm::length(_scale);
|
||||
float relativeDeltaScale = glm::length(_scale - scale) / scaleLength;
|
||||
|
||||
// decreased epsilon because this wasn't handling scale changes of 0.01
|
||||
const float SMALLER_EPSILON = EPSILON * 0.0001f;
|
||||
if (glm::length2(deltaScale) > SMALLER_EPSILON) {
|
||||
const float ONE_PERCENT = 0.01f;
|
||||
if (relativeDeltaScale > ONE_PERCENT || scaleLength < EPSILON) {
|
||||
_scale = scale;
|
||||
rebuildShapes();
|
||||
}
|
||||
|
@ -468,20 +468,56 @@ void Model::clearShapes() {
|
|||
|
||||
void Model::rebuildShapes() {
|
||||
clearShapes();
|
||||
|
||||
if (_jointStates.isEmpty()) {
|
||||
|
||||
if (!_geometry) {
|
||||
return;
|
||||
}
|
||||
|
||||
// make sure all the joints are updated correctly before we try to create their shapes
|
||||
for (int i = 0; i < _jointStates.size(); i++) {
|
||||
updateJointState(i);
|
||||
}
|
||||
|
||||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
|
||||
if (geometry.joints.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
int numJoints = geometry.joints.size();
|
||||
QVector<glm::mat4> transforms;
|
||||
transforms.fill(glm::mat4(), numJoints);
|
||||
QVector<glm::quat> combinedRotations;
|
||||
combinedRotations.fill(glm::quat(), numJoints);
|
||||
QVector<bool> shapeIsSet;
|
||||
shapeIsSet.fill(false, numJoints);
|
||||
int rootIndex = 0;
|
||||
|
||||
float uniformScale = extractUniformScale(_scale);
|
||||
glm::quat inverseRotation = glm::inverse(_rotation);
|
||||
glm::vec3 rootPosition(0.f);
|
||||
int numShapesSet = 0;
|
||||
int lastNumShapesSet = -1;
|
||||
while (numShapesSet < numJoints && numShapesSet != lastNumShapesSet) {
|
||||
lastNumShapesSet = numShapesSet;
|
||||
for (int i = 0; i < numJoints; ++i) {
|
||||
if (shapeIsSet[i]) {
|
||||
continue;
|
||||
}
|
||||
const FBXJoint& joint = geometry.joints[i];
|
||||
int parentIndex = joint.parentIndex;
|
||||
if (parentIndex == -1) {
|
||||
rootIndex = i;
|
||||
glm::mat4 baseTransform = glm::mat4_cast(_rotation) * uniformScale * glm::translate(_offset);
|
||||
glm::quat combinedRotation = joint.preRotation * joint.rotation * joint.postRotation;
|
||||
transforms[i] = baseTransform * geometry.offset * glm::translate(joint.translation) * joint.preTransform *
|
||||
glm::mat4_cast(combinedRotation) * joint.postTransform;
|
||||
combinedRotations[i] = _rotation * combinedRotation;
|
||||
++numShapesSet;
|
||||
shapeIsSet[i] = true;
|
||||
} else if (shapeIsSet[parentIndex]) {
|
||||
glm::quat combinedRotation = joint.preRotation * joint.rotation * joint.postRotation;
|
||||
transforms[i] = transforms[parentIndex] * glm::translate(joint.translation) * joint.preTransform *
|
||||
glm::mat4_cast(combinedRotation) * joint.postTransform;
|
||||
combinedRotations[i] = combinedRotations[parentIndex] * combinedRotation;
|
||||
++numShapesSet;
|
||||
shapeIsSet[i] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// joint shapes
|
||||
Extents totalExtents;
|
||||
|
@ -489,48 +525,70 @@ void Model::rebuildShapes() {
|
|||
for (int i = 0; i < _jointStates.size(); i++) {
|
||||
const FBXJoint& joint = geometry.joints[i];
|
||||
|
||||
glm::vec3 jointToShapeOffset = uniformScale * (_jointStates[i].combinedRotation * joint.shapePosition);
|
||||
glm::vec3 worldPosition = extractTranslation(_jointStates[i].transform) + jointToShapeOffset + _translation;
|
||||
glm::vec3 worldPosition = extractTranslation(transforms[i]);
|
||||
Extents shapeExtents;
|
||||
shapeExtents.reset();
|
||||
|
||||
if (joint.parentIndex == -1) {
|
||||
rootPosition = worldPosition;
|
||||
}
|
||||
|
||||
float radius = uniformScale * joint.boneRadius;
|
||||
float halfHeight = 0.5f * uniformScale * joint.distanceToParent;
|
||||
if (joint.shapeType == Shape::CAPSULE_SHAPE && halfHeight > EPSILON) {
|
||||
Shape::Type type = joint.shapeType;
|
||||
if (type == Shape::CAPSULE_SHAPE && halfHeight < EPSILON) {
|
||||
// this capsule is effectively a sphere
|
||||
type = Shape::SPHERE_SHAPE;
|
||||
}
|
||||
if (type == Shape::CAPSULE_SHAPE) {
|
||||
CapsuleShape* capsule = new CapsuleShape(radius, halfHeight);
|
||||
capsule->setPosition(worldPosition);
|
||||
capsule->setRotation(_jointStates[i].combinedRotation * joint.shapeRotation);
|
||||
capsule->setRotation(combinedRotations[i] * joint.shapeRotation);
|
||||
_jointShapes.push_back(capsule);
|
||||
|
||||
glm::vec3 endPoint;
|
||||
capsule->getEndPoint(endPoint);
|
||||
glm::vec3 startPoint;
|
||||
capsule->getStartPoint(startPoint);
|
||||
glm::vec3 axis = (halfHeight + radius) * glm::normalize(endPoint - startPoint);
|
||||
|
||||
// add some points that bound a sphere at the center of the capsule
|
||||
glm::vec3 axis = glm::vec3(radius);
|
||||
shapeExtents.addPoint(worldPosition + axis);
|
||||
shapeExtents.addPoint(worldPosition - axis);
|
||||
} else {
|
||||
|
||||
// add the two furthest surface points of the capsule
|
||||
axis = (halfHeight + radius) * glm::normalize(endPoint - startPoint);
|
||||
shapeExtents.addPoint(worldPosition + axis);
|
||||
shapeExtents.addPoint(worldPosition - axis);
|
||||
|
||||
totalExtents.addExtents(shapeExtents);
|
||||
} else if (type == Shape::SPHERE_SHAPE) {
|
||||
SphereShape* sphere = new SphereShape(radius, worldPosition);
|
||||
_jointShapes.push_back(sphere);
|
||||
|
||||
glm::vec3 axis = glm::vec3(radius);
|
||||
shapeExtents.addPoint(worldPosition + axis);
|
||||
shapeExtents.addPoint(worldPosition - axis);
|
||||
totalExtents.addExtents(shapeExtents);
|
||||
} else {
|
||||
// this shape type is not handled and the joint shouldn't collide,
|
||||
// however we must have a shape for each joint,
|
||||
// so we make a bogus sphere with zero radius.
|
||||
// TODO: implement collision groups for more control over what collides with what
|
||||
SphereShape* sphere = new SphereShape(0.f, worldPosition);
|
||||
_jointShapes.push_back(sphere);
|
||||
}
|
||||
totalExtents.addExtents(shapeExtents);
|
||||
}
|
||||
|
||||
// bounding shape
|
||||
// NOTE: we assume that the longest side of totalExtents is the yAxis
|
||||
glm::vec3 diagonal = totalExtents.maximum - totalExtents.minimum;
|
||||
float capsuleRadius = 0.25f * (diagonal.x + diagonal.z); // half the average of x and z
|
||||
// the radius is half the RMS of the X and Z sides:
|
||||
float capsuleRadius = 0.5f * sqrtf(0.5f * (diagonal.x * diagonal.x + diagonal.z * diagonal.z));
|
||||
_boundingShape.setRadius(capsuleRadius);
|
||||
_boundingShape.setHalfHeight(0.5f * diagonal.y - capsuleRadius);
|
||||
|
||||
glm::quat inverseRotation = glm::inverse(_rotation);
|
||||
glm::vec3 rootPosition = extractTranslation(transforms[rootIndex]);
|
||||
_boundingShapeLocalOffset = inverseRotation * (0.5f * (totalExtents.maximum + totalExtents.minimum) - rootPosition);
|
||||
_boundingShape.setPosition(_translation - _rotation * _boundingShapeLocalOffset);
|
||||
_boundingShape.setRotation(_rotation);
|
||||
}
|
||||
|
||||
void Model::updateShapePositions() {
|
||||
|
@ -557,6 +615,7 @@ void Model::updateShapePositions() {
|
|||
_boundingRadius = sqrtf(_boundingRadius);
|
||||
_shapesAreDirty = false;
|
||||
_boundingShape.setPosition(rootPosition + _rotation * _boundingShapeLocalOffset);
|
||||
_boundingShape.setRotation(_rotation);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -346,7 +346,7 @@ void Stats::display(
|
|||
|
||||
lines = _expanded ? 12 : 3;
|
||||
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
|
||||
lines += 8; // spatial audio processing adds 1 spacing line and 7 extra lines of info
|
||||
lines += 9; // spatial audio processing adds 1 spacing line and 8 extra lines of info
|
||||
}
|
||||
|
||||
drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10);
|
||||
|
@ -542,12 +542,21 @@ void Stats::display(
|
|||
audioReflector->getSoundMsPerMeter());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
||||
|
||||
bool distanceAttenuationDisabled = Menu::getInstance()->isOptionChecked(
|
||||
MenuOption::AudioSpatialProcessingDontDistanceAttenuate);
|
||||
|
||||
sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, Factor: %5.3f",
|
||||
bool alternateDistanceAttenuationEnabled = Menu::getInstance()->isOptionChecked(
|
||||
MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate);
|
||||
|
||||
sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, %s: %5.3f",
|
||||
audioReflector->getAverageAttenuation(),
|
||||
audioReflector->getMaxAttenuation(),
|
||||
audioReflector->getMinAttenuation(),
|
||||
(distanceAttenuationDisabled ? "Distance Factor [DISABLED]" :
|
||||
alternateDistanceAttenuationEnabled ? "Distance Factor [ALTERNATE]" : "Distance Factor [STANARD]"),
|
||||
audioReflector->getDistanceAttenuationScalingFactor());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
|
@ -588,6 +597,13 @@ void Stats::display(
|
|||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, reflectionsStatus, color);
|
||||
|
||||
sprintf(reflectionsStatus, "Wet/Dry Mix: Original: %5.3f Echoes: %5.3f",
|
||||
audioReflector->getOriginalSourceAttenuation(),
|
||||
audioReflector->getEchoesAttenuation());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -67,21 +67,25 @@ Sound::Sound(float volume, float frequency, float duration, float decay, QObject
|
|||
}
|
||||
|
||||
Sound::Sound(const QUrl& sampleURL, QObject* parent) :
|
||||
QObject(parent)
|
||||
QObject(parent),
|
||||
_hasDownloaded(false)
|
||||
{
|
||||
// assume we have a QApplication or QCoreApplication instance and use the
|
||||
// QNetworkAccess manager to grab the raw audio file at the given URL
|
||||
|
||||
QNetworkAccessManager *manager = new QNetworkAccessManager(this);
|
||||
connect(manager, SIGNAL(finished(QNetworkReply*)),
|
||||
this, SLOT(replyFinished(QNetworkReply*)));
|
||||
|
||||
qDebug() << "Requesting audio file" << sampleURL.toDisplayString();
|
||||
manager->get(QNetworkRequest(sampleURL));
|
||||
|
||||
QNetworkReply* soundDownload = manager->get(QNetworkRequest(sampleURL));
|
||||
connect(soundDownload, &QNetworkReply::finished, this, &Sound::replyFinished);
|
||||
connect(soundDownload, SIGNAL(error(QNetworkReply::NetworkError)), this, SLOT(replyError(QNetworkReply::NetworkError)));
|
||||
}
|
||||
|
||||
void Sound::replyFinished(QNetworkReply* reply) {
|
||||
void Sound::replyFinished() {
|
||||
|
||||
QNetworkReply* reply = reinterpret_cast<QNetworkReply*>(sender());
|
||||
|
||||
// replace our byte array with the downloaded data
|
||||
QByteArray rawAudioByteArray = reply->readAll();
|
||||
|
||||
|
@ -108,6 +112,13 @@ void Sound::replyFinished(QNetworkReply* reply) {
|
|||
} else {
|
||||
qDebug() << "Network reply without 'Content-Type'.";
|
||||
}
|
||||
|
||||
_hasDownloaded = true;
|
||||
}
|
||||
|
||||
void Sound::replyError(QNetworkReply::NetworkError code) {
|
||||
QNetworkReply* reply = reinterpret_cast<QNetworkReply*>(sender());
|
||||
qDebug() << "Error downloading sound file at" << reply->url().toString() << "-" << reply->errorString();
|
||||
}
|
||||
|
||||
void Sound::downSample(const QByteArray& rawAudioByteArray) {
|
||||
|
|
|
@ -13,25 +13,30 @@
|
|||
#define hifi_Sound_h
|
||||
|
||||
#include <QtCore/QObject>
|
||||
|
||||
class QNetworkReply;
|
||||
#include <QtNetwork/QNetworkReply>
|
||||
|
||||
class Sound : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
Q_PROPERTY(bool downloaded READ hasDownloaded)
|
||||
public:
|
||||
Sound(const QUrl& sampleURL, QObject* parent = NULL);
|
||||
Sound(float volume, float frequency, float duration, float decay, QObject* parent = NULL);
|
||||
|
||||
bool hasDownloaded() const { return _hasDownloaded; }
|
||||
|
||||
const QByteArray& getByteArray() { return _byteArray; }
|
||||
|
||||
private:
|
||||
QByteArray _byteArray;
|
||||
|
||||
bool _hasDownloaded;
|
||||
|
||||
void downSample(const QByteArray& rawAudioByteArray);
|
||||
void interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||
|
||||
private slots:
|
||||
void replyFinished(QNetworkReply* reply);
|
||||
void replyFinished();
|
||||
void replyError(QNetworkReply::NetworkError code);
|
||||
};
|
||||
|
||||
#endif // hifi_Sound_h
|
||||
|
|
|
@ -97,13 +97,14 @@ class AvatarData : public QObject {
|
|||
Q_PROPERTY(float audioLoudness READ getAudioLoudness WRITE setAudioLoudness)
|
||||
Q_PROPERTY(float audioAverageLoudness READ getAudioAverageLoudness WRITE setAudioAverageLoudness)
|
||||
|
||||
Q_PROPERTY(QString displayName READ getDisplayName WRITE setDisplayName)
|
||||
Q_PROPERTY(QString faceModelURL READ getFaceModelURLFromScript WRITE setFaceModelURLFromScript)
|
||||
Q_PROPERTY(QString skeletonModelURL READ getSkeletonModelURLFromScript WRITE setSkeletonModelURLFromScript)
|
||||
Q_PROPERTY(QString billboardURL READ getBillboardURL WRITE setBillboardFromURL)
|
||||
|
||||
Q_PROPERTY(QStringList jointNames READ getJointNames)
|
||||
|
||||
Q_PROPERTY(QUuid sessionUUID READ getSessionUUID);
|
||||
Q_PROPERTY(QUuid sessionUUID READ getSessionUUID)
|
||||
public:
|
||||
AvatarData();
|
||||
virtual ~AvatarData();
|
||||
|
|
|
@ -1624,7 +1624,6 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
|
|||
}
|
||||
float radiusScale = extractUniformScale(joint.transform * fbxCluster.inverseBindMatrix);
|
||||
JointShapeInfo& jointShapeInfo = jointShapeInfos[jointIndex];
|
||||
jointShapeInfo.boneBegin = rotateMeshToJoint * (radiusScale * (boneBegin - boneEnd));
|
||||
|
||||
float totalWeight = 0.0f;
|
||||
for (int j = 0; j < cluster.indices.size(); j++) {
|
||||
|
@ -1686,7 +1685,6 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
|
|||
}
|
||||
}
|
||||
float radiusScale = extractUniformScale(joint.transform * firstFBXCluster.inverseBindMatrix);
|
||||
jointShapeInfo.boneBegin = rotateMeshToJoint * (radiusScale * (boneBegin - boneEnd));
|
||||
|
||||
glm::vec3 averageVertex(0.f);
|
||||
foreach (const glm::vec3& vertex, extracted.mesh.vertices) {
|
||||
|
@ -1722,6 +1720,14 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
|
|||
FBXJoint& joint = geometry.joints[i];
|
||||
JointShapeInfo& jointShapeInfo = jointShapeInfos[i];
|
||||
|
||||
if (joint.parentIndex == -1) {
|
||||
jointShapeInfo.boneBegin = glm::vec3(0.0f);
|
||||
} else {
|
||||
const FBXJoint& parentJoint = geometry.joints[joint.parentIndex];
|
||||
glm::quat inverseRotation = glm::inverse(extractRotation(joint.transform));
|
||||
jointShapeInfo.boneBegin = inverseRotation * (extractTranslation(parentJoint.transform) - extractTranslation(joint.transform));
|
||||
}
|
||||
|
||||
// we use a capsule if the joint ANY mesh vertices successfully projected onto the bone
|
||||
// AND its boneRadius is not too close to zero
|
||||
bool collideLikeCapsule = jointShapeInfo.numProjectedVertices > 0
|
||||
|
@ -1733,12 +1739,12 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
|
|||
joint.shapeType = Shape::CAPSULE_SHAPE;
|
||||
} else {
|
||||
// collide the joint like a sphere
|
||||
joint.shapeType = Shape::SPHERE_SHAPE;
|
||||
if (jointShapeInfo.numVertices > 0) {
|
||||
jointShapeInfo.averageVertex /= (float)jointShapeInfo.numVertices;
|
||||
joint.shapePosition = jointShapeInfo.averageVertex;
|
||||
} else {
|
||||
joint.shapePosition = glm::vec3(0.f);
|
||||
joint.shapeType = Shape::SPHERE_SHAPE;
|
||||
}
|
||||
if (jointShapeInfo.numProjectedVertices == 0
|
||||
&& jointShapeInfo.numVertices > 0) {
|
||||
|
@ -1747,6 +1753,15 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping)
|
|||
jointShapeInfo.averageRadius /= (float)jointShapeInfo.numVertices;
|
||||
joint.boneRadius = jointShapeInfo.averageRadius;
|
||||
}
|
||||
|
||||
float distanceFromEnd = glm::length(joint.shapePosition);
|
||||
float distanceFromBegin = glm::distance(joint.shapePosition, jointShapeInfo.boneBegin);
|
||||
if (distanceFromEnd > joint.distanceToParent && distanceFromBegin > joint.distanceToParent) {
|
||||
// The shape is further from both joint endpoints than the endpoints are from each other
|
||||
// which probably means the model has a bad transform somewhere. We disable this shape
|
||||
// by setting its type to UNKNOWN_SHAPE.
|
||||
joint.shapeType = Shape::UNKNOWN_SHAPE;
|
||||
}
|
||||
}
|
||||
}
|
||||
geometry.palmDirection = parseVec3(mapping.value("palmDirection", "0, -1, 0").toString());
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#include <QVariant>
|
||||
#include <QVector>
|
||||
|
||||
#include <Shape.h>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
|
@ -91,7 +93,7 @@ public:
|
|||
QString name;
|
||||
glm::vec3 shapePosition; // in joint frame
|
||||
glm::quat shapeRotation; // in joint frame
|
||||
int shapeType;
|
||||
Shape::Type shapeType;
|
||||
};
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue