Merge remote-tracking branch 'upstream/master' into moreshelfstuff

This commit is contained in:
James B. Pollack 2016-06-03 14:24:33 -07:00
commit e87cb2bd54
53 changed files with 803 additions and 332 deletions

View file

@ -339,21 +339,18 @@ bool AudioMixer::prepareMixForListeningNode(Node* node) {
}
});
int nonZeroSamples = 0;
// use the per listner AudioLimiter to render the mixed data...
listenerNodeData->audioLimiter.render(_mixedSamples, _clampedSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// enumerate the mixed samples and clamp any samples outside the min/max
// also check if we ended up with a silent frame
// check for silent audio after the peak limitor has converted the samples
bool hasAudio = false;
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
_clampedSamples[i] = int16_t(glm::clamp(int(_mixedSamples[i] * AudioConstants::MAX_SAMPLE_VALUE),
AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE));
if (_clampedSamples[i] != 0.0f) {
++nonZeroSamples;
if (_clampedSamples[i] != 0) {
hasAudio = true;
break;
}
}
return (nonZeroSamples > 0);
return hasAudio;
}
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {

View file

@ -25,6 +25,7 @@
AudioMixerClientData::AudioMixerClientData(const QUuid& nodeID) :
NodeData(nodeID),
audioLimiter(AudioConstants::SAMPLE_RATE, AudioConstants::STEREO),
_outgoingMixedAudioSequenceNumber(0),
_downstreamAudioStreamStats()
{

View file

@ -16,11 +16,13 @@
#include <AABox.h>
#include <AudioHRTF.h>
#include <AudioLimiter.h>
#include <UUIDHasher.h>
#include "PositionalAudioStream.h"
#include "AvatarAudioStream.h"
class AudioMixerClientData : public NodeData {
Q_OBJECT
public:
@ -61,6 +63,8 @@ public:
// uses randomization to have the AudioMixer send a stats packet to this node around every second
bool shouldSendStats(int frameNumber);
AudioLimiter audioLimiter;
signals:
void injectorStreamFinished(const QUuid& streamIdentifier);

View file

@ -111,7 +111,7 @@
{
"name": "maximum_user_capacity",
"label": "Maximum User Capacity",
"help": "The limit on how many avatars can be connected at once. 0 means no limit.",
"help": "The limit on how many users can be connected at once (0 means no limit). Avatars connected from the same machine will not count towards this limit.",
"placeholder": "0",
"default": "0",
"advanced": false

View file

@ -715,9 +715,13 @@ void DomainServer::processListRequestPacket(QSharedPointer<ReceivedMessage> mess
unsigned int DomainServer::countConnectedUsers() {
unsigned int result = 0;
auto nodeList = DependencyManager::get<LimitedNodeList>();
nodeList->eachNode([&](const SharedNodePointer& otherNode){
if (otherNode->getType() == NodeType::Agent) {
result++;
nodeList->eachNode([&](const SharedNodePointer& node){
// only count unassigned agents (i.e., users)
if (node->getType() == NodeType::Agent) {
auto nodeData = static_cast<DomainServerNodeData*>(node->getLinkedData());
if (nodeData && !nodeData->wasAssigned()) {
result++;
}
}
});
return result;

View file

@ -26,10 +26,10 @@ import "fileDialog"
ModalWindow {
id: root
resizable: true
implicitWidth: 640
implicitHeight: 480
implicitWidth: 480
implicitHeight: 360
minSize: Qt.vector2d(300, 240)
minSize: Qt.vector2d(360, 240)
draggable: true
HifiConstants { id: hifi }
@ -79,6 +79,9 @@ ModalWindow {
fileTableModel.folder = initialFolder;
iconText = root.title !== "" ? hifi.glyphs.scriptUpload : "";
// Clear selection when click on external frame.
frameClicked.connect(function() { d.clearSelection(); });
}
Item {
@ -87,6 +90,13 @@ ModalWindow {
height: pane.height
anchors.margins: 0
MouseArea {
// Clear selection when click on internal unused area.
anchors.fill: parent
drag.target: root
onClicked: d.clearSelection()
}
Row {
id: navControls
anchors {
@ -202,6 +212,8 @@ ModalWindow {
function update() {
var row = fileTableView.currentRow;
openButton.text = root.selectDirectory && row === -1 ? "Choose" : "Open"
if (row === -1) {
return;
}
@ -226,6 +238,12 @@ ModalWindow {
fileTableModel.folder = homeDestination;
return true;
}
function clearSelection() {
fileTableView.selection.clear();
fileTableView.currentRow = -1;
update();
}
}
FolderListModel {
@ -389,6 +407,8 @@ ModalWindow {
rows++;
}
d.clearSelection();
}
}
@ -633,8 +653,15 @@ ModalWindow {
Action {
id: okAction
text: root.saveDialog ? "Save" : (root.selectDirectory ? "Choose" : "Open")
enabled: currentSelection.text ? true : false
onTriggered: okActionTimer.start();
enabled: currentSelection.text || !root.selectDirectory && d.currentSelectionIsFolder ? true : false
onTriggered: {
if (!root.selectDirectory && !d.currentSelectionIsFolder
|| root.selectDirectory && fileTableView.currentRow === -1) {
okActionTimer.start();
} else {
fileTableView.navigateToCurrentRow();
}
}
}
Timer {

View file

@ -27,6 +27,8 @@ Frame {
readonly property int frameMarginTop: hifi.dimensions.modalDialogMargin.y + (frameContent.hasTitle ? hifi.dimensions.modalDialogTitleHeight + 10 : 0)
readonly property int frameMarginBottom: hifi.dimensions.modalDialogMargin.y
signal frameClicked();
anchors {
fill: parent
topMargin: -frameMarginTop
@ -47,6 +49,7 @@ Frame {
anchors.fill: parent
drag.target: window
enabled: window.draggable
onClicked: window.frameClicked();
}
Item {

View file

@ -22,5 +22,7 @@ Window {
property int colorScheme: hifi.colorSchemes.light
property bool draggable: false
signal frameClicked();
anchors.centerIn: draggable ? undefined : parent
}

View file

@ -994,7 +994,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
if (_keyboardFocusedItem != entityItemID) {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
auto properties = entityScriptingInterface->getEntityProperties(entityItemID);
if (EntityTypes::Web == properties.getType() && !properties.getLocked()) {
if (EntityTypes::Web == properties.getType() && !properties.getLocked() && properties.getVisible()) {
auto entity = entityScriptingInterface->getEntityTree()->findEntityByID(entityItemID);
RenderableWebEntityItem* webEntity = dynamic_cast<RenderableWebEntityItem*>(entity.get());
if (webEntity) {
@ -1049,6 +1049,13 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer) :
}
});
connect(this, &Application::aboutToQuit, [=]() {
_keyboardFocusedItem = UNKNOWN_ENTITY_ID;
if (_keyboardFocusHighlight) {
_keyboardFocusHighlight->setVisible(false);
}
});
// Make sure we don't time out during slow operations at startup
updateHeartbeat();
@ -1503,7 +1510,13 @@ void Application::paintGL() {
// FIXME not needed anymore?
_offscreenContext->makeCurrent();
displayPlugin->beginFrameRender(_frameCount);
// If a display plugin loses it's underlying support, it
// needs to be able to signal us to not use it
if (!displayPlugin->beginFrameRender(_frameCount)) {
_inPaint = false;
updateDisplayMode();
return;
}
// update the avatar with a fresh HMD pose
getMyAvatar()->updateFromHMDSensorMatrix(getHMDSensorPose());
@ -5098,9 +5111,17 @@ void Application::updateDisplayMode() {
foreach(auto displayPlugin, standard) {
addDisplayPluginToMenu(displayPlugin, first);
auto displayPluginName = displayPlugin->getName();
QObject::connect(displayPlugin.get(), &DisplayPlugin::recommendedFramebufferSizeChanged, [this](const QSize & size) {
resizeGL();
});
QObject::connect(displayPlugin.get(), &DisplayPlugin::outputDeviceLost, [this, displayPluginName] {
PluginManager::getInstance()->disableDisplayPlugin(displayPluginName);
auto menu = Menu::getInstance();
if (menu->menuItemExists(MenuOption::OutputMenu, displayPluginName)) {
menu->removeMenuItem(MenuOption::OutputMenu, displayPluginName);
}
});
first = false;
}
@ -5116,6 +5137,10 @@ void Application::updateDisplayMode() {
foreach(DisplayPluginPointer displayPlugin, PluginManager::getInstance()->getDisplayPlugins()) {
QString name = displayPlugin->getName();
QAction* action = menu->getActionForOption(name);
// Menu might have been removed if the display plugin lost
if (!action) {
continue;
}
if (action->isChecked()) {
newDisplayPlugin = displayPlugin;
break;

View file

@ -322,7 +322,7 @@ void AvatarManager::handleCollisionEvents(const CollisionEvents& collisionEvents
const auto characterController = myAvatar->getCharacterController();
const float avatarVelocityChange = (characterController ? glm::length(characterController->getVelocityChange()) : 0.0f);
const float velocityChange = glm::length(collision.velocityChange) + avatarVelocityChange;
const float MIN_AVATAR_COLLISION_ACCELERATION = 0.01f;
const float MIN_AVATAR_COLLISION_ACCELERATION = 2.4f; // walking speed
const bool isSound = (collision.type == CONTACT_EVENT_TYPE_START) && (velocityChange > MIN_AVATAR_COLLISION_ACCELERATION);
if (!isSound) {
@ -330,14 +330,24 @@ void AvatarManager::handleCollisionEvents(const CollisionEvents& collisionEvents
}
// Your avatar sound is personal to you, so let's say the "mass" part of the kinetic energy is already accounted for.
const float energy = velocityChange * velocityChange;
const float COLLISION_ENERGY_AT_FULL_VOLUME = 0.5f;
const float COLLISION_ENERGY_AT_FULL_VOLUME = 10.0f;
const float energyFactorOfFull = fmin(1.0f, energy / COLLISION_ENERGY_AT_FULL_VOLUME);
// For general entity collisionSoundURL, playSound supports changing the pitch for the sound based on the size of the object,
// but most avatars are roughly the same size, so let's not be so fancy yet.
const float AVATAR_STRETCH_FACTOR = 1.0f;
AudioInjector::playSound(collisionSound, energyFactorOfFull, AVATAR_STRETCH_FACTOR, myAvatar->getPosition());
_collisionInjectors.remove_if([](QPointer<AudioInjector>& injector) {
return !injector || injector->isFinished();
});
static const int MAX_INJECTOR_COUNT = 3;
if (_collisionInjectors.size() < MAX_INJECTOR_COUNT) {
auto injector = AudioInjector::playSound(collisionSound, energyFactorOfFull, AVATAR_STRETCH_FACTOR,
myAvatar->getPosition());
_collisionInjectors.emplace_back(injector);
}
myAvatar->collisionWithEntity(collision);
return;
}

View file

@ -25,6 +25,7 @@
#include "AvatarMotionState.h"
class MyAvatar;
class AudioInjector;
class AvatarManager : public AvatarHashMap {
Q_OBJECT
@ -94,6 +95,8 @@ private:
bool _shouldShowReceiveStats = false;
std::list<QPointer<AudioInjector>> _collisionInjectors;
SetOfAvatarMotionStates _motionStatesThatMightUpdate;
SetOfMotionStates _motionStatesToAddToPhysics;
VectorOfMotionStates _motionStatesToRemoveFromPhysics;

View file

@ -18,6 +18,9 @@
namespace AudioConstants {
const int SAMPLE_RATE = 24000;
const int MONO = 1;
const int STEREO = 2;
typedef int16_t AudioSample;

View file

@ -25,24 +25,43 @@ layout(location = 2) out vec4 _fragColor2;
// the alpha threshold
uniform float alphaThreshold;
uniform sampler2D normalFittingMap;
vec3 bestFitNormal(vec3 normal) {
vec3 absNorm = abs(normal);
float maxNAbs = max(absNorm.z, max(absNorm.x, absNorm.y));
vec2 signNotZero(vec2 v) {
return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);
}
vec2 texcoord = (absNorm.z < maxNAbs ?
(absNorm.y < maxNAbs ? absNorm.yz : absNorm.xz) :
absNorm.xy);
texcoord = (texcoord.x < texcoord.y ? texcoord.yx : texcoord.xy);
texcoord.y /= texcoord.x;
vec3 cN = normal / maxNAbs;
float fittingScale = texture(normalFittingMap, texcoord).a;
cN *= fittingScale;
return (cN * 0.5 + 0.5);
vec2 float32x3_to_oct(in vec3 v) {
vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));
return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);
}
vec3 oct_to_float32x3(in vec2 e) {
vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));
if (v.z < 0) {
v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);
}
return normalize(v);
}
vec3 snorm12x2_to_unorm8x3(vec2 f) {
vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));
float t = floor(u.y / 256.0);
return floor(vec3(
u.x / 16.0,
fract(u.x / 16.0) * 256.0 + t,
u.y - t * 256.0
)) / 255.0;
}
vec2 unorm8x3_to_snorm12x2(vec3 u) {
u *= 255.0;
u.y *= (1.0 / 16.0);
vec2 s = vec2( u.x * 16.0 + floor(u.y),
fract(u.y) * (16.0 * 256.0) + u.z);
return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));
}
float mod289(float x) {
return x - floor(x * (1.0 / 289.0)) * 289.0;
@ -322,7 +341,7 @@ void main(void) {
}
vec4 diffuse = vec4(_color.rgb, alpha);
vec4 normal = vec4(normalize(bestFitNormal(_normal)), 0.5);
vec4 normal = vec4(packNormal(normalize(_normal)), 0.5);
_fragColor0 = diffuse;
_fragColor1 = normal;
@ -355,7 +374,7 @@ void main(void) {
float emissiveAmount = getProceduralColors(diffuse, specular, shininess);
_fragColor0 = vec4(diffuse.rgb, 1.0);
_fragColor1 = vec4(bestFitNormal(normalize(_normal.xyz)), 1.0 - (emissiveAmount / 2.0));
_fragColor1 = vec4(packNormal(normalize(_normal.xyz)), 1.0 - (emissiveAmount / 2.0));
_fragColor2 = vec4(specular, shininess / 128.0);
}
)SCRIBE";

View file

@ -226,10 +226,15 @@ void RenderableWebEntityItem::setSourceUrl(const QString& value) {
}
void RenderableWebEntityItem::setProxyWindow(QWindow* proxyWindow) {
_webSurface->setProxyWindow(proxyWindow);
if (_webSurface) {
_webSurface->setProxyWindow(proxyWindow);
}
}
QObject* RenderableWebEntityItem::getEventHandler() {
if (!_webSurface) {
return nullptr;
}
return _webSurface->getEventHandler();
}

View file

@ -687,15 +687,80 @@ int EntityItem::readEntityDataFromBuffer(const unsigned char* data, int bytesLef
}
}
{ // When we own the simulation we don't accept updates to the entity's transform/velocities
// but since we're using macros below we have to temporarily modify overwriteLocalData.
bool oldOverwrite = overwriteLocalData;
overwriteLocalData = overwriteLocalData && !weOwnSimulation;
READ_ENTITY_PROPERTY(PROP_POSITION, glm::vec3, updatePositionFromNetwork);
READ_ENTITY_PROPERTY(PROP_ROTATION, glm::quat, updateRotationFromNetwork);
READ_ENTITY_PROPERTY(PROP_VELOCITY, glm::vec3, updateVelocityFromNetwork);
READ_ENTITY_PROPERTY(PROP_ANGULAR_VELOCITY, glm::vec3, updateAngularVelocityFromNetwork);
READ_ENTITY_PROPERTY(PROP_ACCELERATION, glm::vec3, setAcceleration);
overwriteLocalData = oldOverwrite;
// we also want to ignore any duplicate packets that have the same "recently updated" values
// as a packet we've already recieved. This is because we want multiple edits of the same
// information to be idempotent, but if we applied new physics properties we'd resimulation
// with small differences in results.
// Because the regular streaming property "setters" only have access to the new value, we've
// made these lambdas that can access other details about the previous updates to suppress
// any duplicates.
// Note: duplicate packets are expected and not wrong. They may be sent for any number of
// reasons and the contract is that the client handles them in an idempotent manner.
auto lastEdited = lastEditedFromBufferAdjusted;
auto customUpdatePositionFromNetwork = [this, lastEdited, overwriteLocalData, weOwnSimulation](glm::vec3 value){
bool simulationChanged = lastEdited > _lastUpdatedPositionTimestamp;
bool valueChanged = value != _lastUpdatedPositionValue;
bool shouldUpdate = overwriteLocalData && !weOwnSimulation && simulationChanged && valueChanged;
if (shouldUpdate) {
updatePositionFromNetwork(value);
_lastUpdatedPositionTimestamp = lastEdited;
_lastUpdatedPositionValue = value;
}
};
auto customUpdateRotationFromNetwork = [this, lastEdited, overwriteLocalData, weOwnSimulation](glm::quat value){
bool simulationChanged = lastEdited > _lastUpdatedRotationTimestamp;
bool valueChanged = value != _lastUpdatedRotationValue;
bool shouldUpdate = overwriteLocalData && !weOwnSimulation && simulationChanged && valueChanged;
if (shouldUpdate) {
updateRotationFromNetwork(value);
_lastUpdatedRotationTimestamp = lastEdited;
_lastUpdatedRotationValue = value;
}
};
auto customUpdateVelocityFromNetwork = [this, lastEdited, overwriteLocalData, weOwnSimulation](glm::vec3 value){
bool simulationChanged = lastEdited > _lastUpdatedVelocityTimestamp;
bool valueChanged = value != _lastUpdatedVelocityValue;
bool shouldUpdate = overwriteLocalData && !weOwnSimulation && simulationChanged && valueChanged;
if (shouldUpdate) {
updateVelocityFromNetwork(value);
_lastUpdatedVelocityTimestamp = lastEdited;
_lastUpdatedVelocityValue = value;
}
};
auto customUpdateAngularVelocityFromNetwork = [this, lastEdited, overwriteLocalData, weOwnSimulation](glm::vec3 value){
bool simulationChanged = lastEdited > _lastUpdatedAngularVelocityTimestamp;
bool valueChanged = value != _lastUpdatedAngularVelocityValue;
bool shouldUpdate = overwriteLocalData && !weOwnSimulation && simulationChanged && valueChanged;
if (shouldUpdate) {
updateAngularVelocityFromNetwork(value);
_lastUpdatedAngularVelocityTimestamp = lastEdited;
_lastUpdatedAngularVelocityValue = value;
}
};
auto customSetAcceleration = [this, lastEdited, overwriteLocalData, weOwnSimulation](glm::vec3 value){
bool simulationChanged = lastEdited > _lastUpdatedAccelerationTimestamp;
bool valueChanged = value != _lastUpdatedAccelerationValue;
bool shouldUpdate = overwriteLocalData && !weOwnSimulation && simulationChanged && valueChanged;
if (shouldUpdate) {
setAcceleration(value);
_lastUpdatedAccelerationTimestamp = lastEdited;
_lastUpdatedAccelerationValue = value;
}
};
READ_ENTITY_PROPERTY(PROP_POSITION, glm::vec3, customUpdatePositionFromNetwork);
READ_ENTITY_PROPERTY(PROP_ROTATION, glm::quat, customUpdateRotationFromNetwork);
READ_ENTITY_PROPERTY(PROP_VELOCITY, glm::vec3, customUpdateVelocityFromNetwork);
READ_ENTITY_PROPERTY(PROP_ANGULAR_VELOCITY, glm::vec3, customUpdateAngularVelocityFromNetwork);
READ_ENTITY_PROPERTY(PROP_ACCELERATION, glm::vec3, customSetAcceleration);
}
READ_ENTITY_PROPERTY(PROP_DIMENSIONS, glm::vec3, updateDimensions);
@ -922,13 +987,11 @@ void EntityItem::simulate(const quint64& now) {
qCDebug(entities) << " ********** EntityItem::simulate() .... SETTING _lastSimulated=" << _lastSimulated;
#endif
if (!hasActions()) {
if (!stepKinematicMotion(timeElapsed)) {
// this entity is no longer moving
// flag it to transition from KINEMATIC to STATIC
_dirtyFlags |= Simulation::DIRTY_MOTION_TYPE;
setAcceleration(Vectors::ZERO);
}
if (!stepKinematicMotion(timeElapsed)) {
// this entity is no longer moving
// flag it to transition from KINEMATIC to STATIC
_dirtyFlags |= Simulation::DIRTY_MOTION_TYPE;
setAcceleration(Vectors::ZERO);
}
_lastSimulated = now;
}

View file

@ -550,6 +550,22 @@ protected:
bool _clientOnly { false };
QUuid _owningAvatarID;
// physics related changes from the network to suppress any duplicates and make
// sure redundant applications are idempotent
glm::vec3 _lastUpdatedPositionValue;
glm::quat _lastUpdatedRotationValue;
glm::vec3 _lastUpdatedVelocityValue;
glm::vec3 _lastUpdatedAngularVelocityValue;
glm::vec3 _lastUpdatedAccelerationValue;
quint64 _lastUpdatedPositionTimestamp { 0 };
quint64 _lastUpdatedRotationTimestamp { 0 };
quint64 _lastUpdatedVelocityTimestamp { 0 };
quint64 _lastUpdatedAngularVelocityTimestamp { 0 };
quint64 _lastUpdatedAccelerationTimestamp { 0 };
};
#endif // hifi_EntityItem_h

View file

@ -119,6 +119,15 @@ TransformObject getTransformObject() {
}
<@endfunc@>
<@func transformModelToWorldDir(cameraTransform, objectTransform, modelDir, worldDir)@>
{ // transformModelToEyeDir
vec3 mr0 = <$objectTransform$>._modelInverse[0].xyz;
vec3 mr1 = <$objectTransform$>._modelInverse[1].xyz;
vec3 mr2 = <$objectTransform$>._modelInverse[2].xyz;
<$worldDir$> = vec3(dot(mr0, <$modelDir$>), dot(mr1, <$modelDir$>), dot(mr2, <$modelDir$>));
}
<@endfunc@>
<@func transformModelToEyeDir(cameraTransform, objectTransform, modelDir, eyeDir)@>
{ // transformModelToEyeDir

View file

@ -401,11 +401,10 @@ glm::vec3 CharacterController::getLinearVelocity() const {
}
glm::vec3 CharacterController::getVelocityChange() const {
glm::vec3 velocity(0.0f);
if (_rigidBody) {
velocity = bulletToGLM(_rigidBody->getLinearVelocity());
return bulletToGLM(_velocityChange);
}
return velocity;
return glm::vec3(0.0f);
}
void CharacterController::clearMotors() {

View file

@ -137,7 +137,7 @@ public:
}
// will query the underlying hmd api to compute the most recent head pose
virtual void beginFrameRender(uint32_t frameIndex) {}
virtual bool beginFrameRender(uint32_t frameIndex) { return true; }
// returns a copy of the most recent head pose, computed via updateHeadPose
virtual glm::mat4 getHeadPose() const {
@ -170,6 +170,10 @@ public:
signals:
void recommendedFramebufferSizeChanged(const QSize & size);
// Indicates that this display plugin is no longer valid for use.
// For instance if a user exits Oculus Home or Steam VR while
// using the corresponding plugin, that plugin should be disabled.
void outputDeviceLost();
protected:
void incrementPresentCount();

View file

@ -62,11 +62,10 @@ PluginManager::PluginManager() {
extern DisplayPluginList getDisplayPlugins();
extern InputPluginList getInputPlugins();
extern void saveInputPluginSettings(const InputPluginList& plugins);
static DisplayPluginList displayPlugins;
const DisplayPluginList& PluginManager::getDisplayPlugins() {
static DisplayPluginList displayPlugins;
static std::once_flag once;
std::call_once(once, [&] {
// Grab the built in plugins
displayPlugins = ::getDisplayPlugins();
@ -90,6 +89,16 @@ const DisplayPluginList& PluginManager::getDisplayPlugins() {
return displayPlugins;
}
void PluginManager::disableDisplayPlugin(const QString& name) {
for (size_t i = 0; i < displayPlugins.size(); ++i) {
if (displayPlugins[i]->getName() == name) {
displayPlugins.erase(displayPlugins.begin() + i);
break;
}
}
}
const InputPluginList& PluginManager::getInputPlugins() {
static InputPluginList inputPlugins;
static std::once_flag once;

View file

@ -17,6 +17,7 @@ public:
PluginManager();
const DisplayPluginList& getDisplayPlugins();
void disableDisplayPlugin(const QString& name);
const InputPluginList& getInputPlugins();
void saveSettings();
};

View file

@ -51,4 +51,70 @@ float packUnlit() {
return FRAG_PACK_UNLIT;
}
vec2 signNotZero(vec2 v) {
return vec2((v.x >= 0.0) ? +1.0 : -1.0, (v.y >= 0.0) ? +1.0 : -1.0);
}
vec2 float32x3_to_oct(in vec3 v) {
vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));
return ((v.z <= 0.0) ? ((1.0 - abs(p.yx)) * signNotZero(p)) : p);
}
vec3 oct_to_float32x3(in vec2 e) {
vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));
if (v.z < 0) {
v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);
}
return normalize(v);
}
vec3 snorm12x2_to_unorm8x3(vec2 f) {
vec2 u = vec2(round(clamp(f, -1.0, 1.0) * 2047.0 + 2047.0));
float t = floor(u.y / 256.0);
return floor(vec3(
u.x / 16.0,
fract(u.x / 16.0) * 256.0 + t,
u.y - t * 256.0
)) / 255.0;
}
vec2 unorm8x3_to_snorm12x2(vec3 u) {
u *= 255.0;
u.y *= (1.0 / 16.0);
vec2 s = vec2( u.x * 16.0 + floor(u.y),
fract(u.y) * (16.0 * 256.0) + u.z);
return clamp(s * (1.0 / 2047.0) - 1.0, vec2(-1.0), vec2(1.0));
}
uniform sampler2D normalFittingMap;
vec3 bestFitNormal(vec3 normal) {
vec3 absNorm = abs(normal);
float maxNAbs = max(absNorm.z, max(absNorm.x, absNorm.y));
vec2 texcoord = (absNorm.z < maxNAbs ?
(absNorm.y < maxNAbs ? absNorm.yz : absNorm.xz) :
absNorm.xy);
texcoord = (texcoord.x < texcoord.y ? texcoord.yx : texcoord.xy);
texcoord.y /= texcoord.x;
vec3 cN = normal / maxNAbs;
float fittingScale = texture(normalFittingMap, texcoord).a;
cN *= fittingScale;
return (cN * 0.5 + 0.5);
}
vec3 packNormal(in vec3 n) {
return snorm12x2_to_unorm8x3(float32x3_to_oct(n));
}
vec3 unpackNormal(in vec3 p) {
return oct_to_float32x3(unorm8x3_to_snorm12x2(p));
}
<@endif@>

View file

@ -100,7 +100,7 @@ DeferredFragment unpackDeferredFragmentNoPosition(vec2 texcoord) {
frag.obscurance = texture(obscuranceMap, texcoord).x;
// Unpack the normal from the map
frag.normal = normalize(frag.normalVal.xyz * 2.0 - vec3(1.0));
frag.normal = unpackNormal(frag.normalVal.xyz);
frag.roughness = frag.normalVal.a;
// Diffuse color and unpack the mode and the metallicness

View file

@ -17,24 +17,6 @@ layout(location = 0) out vec4 _fragColor0;
layout(location = 1) out vec4 _fragColor1;
layout(location = 2) out vec4 _fragColor2;
uniform sampler2D normalFittingMap;
vec3 bestFitNormal(vec3 normal) {
vec3 absNorm = abs(normal);
float maxNAbs = max(absNorm.z, max(absNorm.x, absNorm.y));
vec2 texcoord = (absNorm.z < maxNAbs ?
(absNorm.y < maxNAbs ? absNorm.yz : absNorm.xz) :
absNorm.xy);
texcoord = (texcoord.x < texcoord.y ? texcoord.yx : texcoord.xy);
texcoord.y /= texcoord.x;
vec3 cN = normal / maxNAbs;
float fittingScale = texture(normalFittingMap, texcoord).a;
cN *= fittingScale;
return (cN * 0.5 + 0.5);
}
// the alpha threshold
const float alphaThreshold = 0.5;
@ -55,7 +37,7 @@ void packDeferredFragment(vec3 normal, float alpha, vec3 albedo, float roughness
discard;
}
_fragColor0 = vec4(albedo, packShadedMetallic(metallic));
_fragColor1 = vec4(bestFitNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(emissive, occlusion);
}
@ -65,7 +47,7 @@ void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 albedo, float r
discard;
}
_fragColor0 = vec4(albedo, packLightmappedMetallic(metallic));
_fragColor1 = vec4(bestFitNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor1 = vec4(packNormal(normal), clamp(roughness, 0.0, 1.0));
_fragColor2 = vec4(emissive, 1.0);
}
@ -74,7 +56,7 @@ void packDeferredFragmentUnlit(vec3 normal, float alpha, vec3 color) {
discard;
}
_fragColor0 = vec4(color, packUnlit());
_fragColor1 = vec4(bestFitNormal(normal), 1.0);
_fragColor1 = vec4(packNormal(normal), 1.0);
//_fragColor2 = vec4(vec3(0.0), 1.0); // If unlit, do not worry about the emissive color target
}

View file

@ -66,7 +66,8 @@ vec3 evalGlobalSpecularIrradiance(Light light, vec3 fragEyeDir, vec3 fragNormal,
// prepareGlobalLight
// Transform directions to worldspace
vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0));
// vec3 fragNormal = vec3(invViewMat * vec4(normal, 0.0));
vec3 fragNormal = vec3((normal));
vec3 fragEyeVector = vec3(invViewMat * vec4(-position, 0.0));
vec3 fragEyeDir = normalize(fragEyeVector);

View file

@ -59,8 +59,8 @@ void FramebufferCache::createPrimaryFramebuffer() {
_deferredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
_deferredFramebufferDepthColor = gpu::FramebufferPointer(gpu::Framebuffer::create());
// auto colorFormat = gpu::Element::COLOR_RGBA_32;
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
auto linearFormat = gpu::Element::COLOR_RGBA_32;
auto width = _frameBufferSize.width();
auto height = _frameBufferSize.height();
@ -70,7 +70,8 @@ void FramebufferCache::createPrimaryFramebuffer() {
_primaryFramebuffer->setRenderBuffer(0, _primaryColorTexture);
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(linearFormat, width, height, defaultSampler));
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);

View file

@ -36,5 +36,5 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToWorldDir(cam, obj, inNormal.xyz, _normal)$>
}

View file

@ -39,6 +39,6 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToWorldDir(cam, obj, inNormal.xyz, _normal)$>
}

View file

@ -39,6 +39,6 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToEyeDir(cam, obj, inTangent.xyz, _tangent)$>
<$transformModelToWorldDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToWorldDir(cam, obj, inTangent.xyz, _tangent)$>
}

View file

@ -39,6 +39,6 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToEyeDir(cam, obj, inTangent.xyz, _tangent)$>
<$transformModelToWorldDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToWorldDir(cam, obj, inTangent.xyz, _tangent)$>
}

View file

@ -32,5 +32,5 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToWorldDir(cam, obj, inNormal.xyz, _normal)$>
}

View file

@ -61,7 +61,7 @@ void main(void) {
vec3 fragLightDir = fragLightVec / fragLightDistance;
// Eval shading
vec3 fragNormal = vec3(invViewMat * vec4(frag.normal, 0.0));
vec3 fragNormal = vec3(frag.normal);
vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
vec4 shading = evalFragShading(fragNormal, fragLightDir, fragEyeDir, frag.metallic, frag.specular, frag.roughness);

View file

@ -27,5 +27,5 @@ void main() {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal.xyz)$>
<$transformModelToWorldDir(cam, obj, inNormal.xyz, _normal.xyz)$>
}

View file

@ -34,5 +34,5 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToWorldDir(cam, obj, inNormal.xyz, _normal)$>
}

View file

@ -45,6 +45,5 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, position, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, interpolatedNormal.xyz, interpolatedNormal.xyz)$>
_normal = interpolatedNormal.xyz;
<$transformModelToWorldDir(cam, obj, interpolatedNormal.xyz, _normal.xyz)$>
}

View file

@ -50,8 +50,8 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, position, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, interpolatedNormal.xyz, interpolatedNormal.xyz)$>
<$transformModelToEyeDir(cam, obj, interpolatedTangent.xyz, interpolatedTangent.xyz)$>
<$transformModelToWorldDir(cam, obj, interpolatedNormal.xyz, interpolatedNormal.xyz)$>
<$transformModelToWorldDir(cam, obj, interpolatedTangent.xyz, interpolatedTangent.xyz)$>
_normal = interpolatedNormal.xyz;
_tangent = interpolatedTangent.xyz;

View file

@ -68,7 +68,7 @@ void main(void) {
}
// Eval shading
vec3 fragNormal = vec3(invViewMat * vec4(frag.normal, 0.0));
vec3 fragNormal = vec3(frag.normal);
vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
vec4 shading = evalFragShading(fragNormal, fragLightDir, fragEyeDir, frag.metallic, frag.specular, frag.roughness);

View file

@ -30,6 +30,6 @@ void main(void) {
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, varNormal)$>
<$transformModelToWorldDir(cam, obj, inNormal.xyz, varNormal)$>
varPosition = inPosition.xyz;
}

View file

@ -17,7 +17,7 @@ void OculusBaseDisplayPlugin::resetSensors() {
_currentRenderFrameInfo.renderPose = glm::mat4(); // identity
}
void OculusBaseDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
bool OculusBaseDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
_currentRenderFrameInfo = FrameInfo();
_currentRenderFrameInfo.sensorSampleTime = ovr_GetTimeInSeconds();;
_currentRenderFrameInfo.predictedDisplayTime = ovr_GetPredictedDisplayTime(_session, frameIndex);
@ -26,6 +26,7 @@ void OculusBaseDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
_currentRenderFrameInfo.presentPose = _currentRenderFrameInfo.renderPose;
Lock lock(_mutex);
_frameInfos[frameIndex] = _currentRenderFrameInfo;
return true;
}
bool OculusBaseDisplayPlugin::isSupported() const {

View file

@ -16,11 +16,11 @@
class OculusBaseDisplayPlugin : public HmdDisplayPlugin {
using Parent = HmdDisplayPlugin;
public:
virtual bool isSupported() const override;
bool isSupported() const override;
// Stereo specific methods
virtual void resetSensors() override final;
virtual void beginFrameRender(uint32_t frameIndex) override;
void resetSensors() override final;
bool beginFrameRender(uint32_t frameIndex) override;
float getTargetFrameRate() const override { return _hmdDesc.DisplayRefreshRate; }

View file

@ -40,13 +40,14 @@ void OculusLegacyDisplayPlugin::resetSensors() {
ovrHmd_RecenterPose(_hmd);
}
void OculusLegacyDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
bool OculusLegacyDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
_currentRenderFrameInfo = FrameInfo();
_currentRenderFrameInfo.predictedDisplayTime = _currentRenderFrameInfo.sensorSampleTime = ovr_GetTimeInSeconds();
_trackingState = ovrHmd_GetTrackingState(_hmd, _currentRenderFrameInfo.predictedDisplayTime);
_currentRenderFrameInfo.rawRenderPose = _currentRenderFrameInfo.renderPose = toGlm(_trackingState.HeadPose.ThePose);
Lock lock(_mutex);
_frameInfos[frameIndex] = _currentRenderFrameInfo;
return true;
}
bool OculusLegacyDisplayPlugin::isSupported() const {

View file

@ -27,7 +27,7 @@ public:
// Stereo specific methods
void resetSensors() override;
void beginFrameRender(uint32_t frameIndex) override;
bool beginFrameRender(uint32_t frameIndex) override;
float getTargetFrameRate() const override;

View file

@ -121,7 +121,12 @@ void OpenVrDisplayPlugin::resetSensors() {
}
void OpenVrDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
bool OpenVrDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
handleOpenVrEvents();
if (openVrQuitRequested()) {
emit outputDeviceLost();
return false;
}
double displayFrequency = _system->GetFloatTrackedDeviceProperty(vr::k_unTrackedDeviceIndex_Hmd, vr::Prop_DisplayFrequency_Float);
double frameDuration = 1.f / displayFrequency;
double vsyncToPhotons = _system->GetFloatTrackedDeviceProperty(vr::k_unTrackedDeviceIndex_Hmd, vr::Prop_SecondsFromVsyncToPhotons_Float);
@ -148,6 +153,7 @@ void OpenVrDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
Lock lock(_mutex);
_frameInfos[frameIndex] = _currentRenderFrameInfo;
return true;
}
void OpenVrDisplayPlugin::hmdPresent() {

View file

@ -18,16 +18,16 @@ const float TARGET_RATE_OpenVr = 90.0f; // FIXME: get from sdk tracked device p
class OpenVrDisplayPlugin : public HmdDisplayPlugin {
using Parent = HmdDisplayPlugin;
public:
virtual bool isSupported() const override;
virtual const QString& getName() const override { return NAME; }
bool isSupported() const override;
const QString& getName() const override { return NAME; }
virtual float getTargetFrameRate() const override { return TARGET_RATE_OpenVr; }
float getTargetFrameRate() const override { return TARGET_RATE_OpenVr; }
virtual void customizeContext() override;
void customizeContext() override;
// Stereo specific methods
virtual void resetSensors() override;
virtual void beginFrameRender(uint32_t frameIndex) override;
void resetSensors() override;
bool beginFrameRender(uint32_t frameIndex) override;
void cycleDebugOutput() override { _lockCurrentTexture = !_lockCurrentTexture; }
protected:

View file

@ -26,6 +26,11 @@ using Lock = std::unique_lock<Mutex>;
static int refCount { 0 };
static Mutex mutex;
static vr::IVRSystem* activeHmd { nullptr };
static bool _openVrQuitRequested { false };
bool openVrQuitRequested() {
return _openVrQuitRequested;
}
static const uint32_t RELEASE_OPENVR_HMD_DELAY_MS = 5000;
@ -56,17 +61,17 @@ vr::IVRSystem* acquireOpenVrSystem() {
if (hmdPresent) {
Lock lock(mutex);
if (!activeHmd) {
qCDebug(displayplugins) << "openvr: No vr::IVRSystem instance active, building";
qCDebug(displayplugins) << "OpenVR: No vr::IVRSystem instance active, building";
vr::EVRInitError eError = vr::VRInitError_None;
activeHmd = vr::VR_Init(&eError, vr::VRApplication_Scene);
qCDebug(displayplugins) << "openvr display: HMD is " << activeHmd << " error is " << eError;
qCDebug(displayplugins) << "OpenVR display: HMD is " << activeHmd << " error is " << eError;
}
if (activeHmd) {
qCDebug(displayplugins) << "openvr: incrementing refcount";
qCDebug(displayplugins) << "OpenVR: incrementing refcount";
++refCount;
}
} else {
qCDebug(displayplugins) << "openvr: no hmd present";
qCDebug(displayplugins) << "OpenVR: no hmd present";
}
return activeHmd;
}
@ -74,12 +79,38 @@ vr::IVRSystem* acquireOpenVrSystem() {
void releaseOpenVrSystem() {
if (activeHmd) {
Lock lock(mutex);
qCDebug(displayplugins) << "openvr: decrementing refcount";
qCDebug(displayplugins) << "OpenVR: decrementing refcount";
--refCount;
if (0 == refCount) {
qCDebug(displayplugins) << "openvr: zero refcount, deallocate VR system";
qCDebug(displayplugins) << "OpenVR: zero refcount, deallocate VR system";
vr::VR_Shutdown();
_openVrQuitRequested = false;
activeHmd = nullptr;
}
}
}
void handleOpenVrEvents() {
if (!activeHmd) {
return;
}
Lock lock(mutex);
if (!activeHmd) {
return;
}
vr::VREvent_t event;
while (activeHmd->PollNextEvent(&event, sizeof(event))) {
switch (event.eventType) {
case vr::VREvent_Quit:
_openVrQuitRequested = true;
activeHmd->AcknowledgeQuit_Exiting();
break;
default:
break;
}
qDebug() << "OpenVR: Event " << event.eventType;
}
}

View file

@ -16,6 +16,8 @@ bool openVrSupported();
vr::IVRSystem* acquireOpenVrSystem();
void releaseOpenVrSystem();
void handleOpenVrEvents();
bool openVrQuitRequested();
template<typename F>
void openvr_for_each_eye(F f) {

View file

@ -214,6 +214,11 @@ void ViveControllerManager::renderHand(const controller::Pose& pose, gpu::Batch&
void ViveControllerManager::pluginUpdate(float deltaTime, const controller::InputCalibrationData& inputCalibrationData) {
auto userInputMapper = DependencyManager::get<controller::UserInputMapper>();
handleOpenVrEvents();
if (openVrQuitRequested()) {
deactivate();
return;
}
// because update mutates the internal state we need to lock
userInputMapper->withLock([&, this]() {

View file

@ -0,0 +1,19 @@
var audioOptions = {
volume: 1.0,
loop: true,
position: MyAvatar.position
}
//var sineWave = Script.resolvePath("./1760sine.wav"); // use relative file
var sineWave = "https://s3-us-west-1.amazonaws.com/highfidelity-dev/1760sine.wav"; // use file from S3
var sound = SoundCache.getSound(sineWave);
var injectorCount = 0;
var MAX_INJECTOR_COUNT = 40;
Script.update.connect(function() {
if (sound.downloaded && injectorCount < MAX_INJECTOR_COUNT) {
injectorCount++;
print("stating injector:" + injectorCount);
Audio.playSound(sound, audioOptions);
}
});

View file

@ -9,10 +9,12 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <QVector>
#include "VHACDUtil.h"
const float COLLISION_TETRAHEDRON_SCALE = 0.25f;
#include <unordered_map>
#include <QVector>
#include <NumericalConstants.h>
// FBXReader jumbles the order of the meshes by reading them back out of a hashtable. This will put
@ -27,13 +29,16 @@ void reSortFBXGeometryMeshes(FBXGeometry& geometry) {
// Read all the meshes from provided FBX file
bool vhacd::VHACDUtil::loadFBX(const QString filename, FBXGeometry& result) {
if (_verbose) {
qDebug() << "reading FBX file =" << filename << "...";
}
// open the fbx file
QFile fbx(filename);
if (!fbx.open(QIODevice::ReadOnly)) {
qWarning() << "unable to open FBX file =" << filename;
return false;
}
std::cout << "Reading FBX.....\n";
try {
QByteArray fbxContents = fbx.readAll();
FBXGeometry* geom;
@ -42,14 +47,14 @@ bool vhacd::VHACDUtil::loadFBX(const QString filename, FBXGeometry& result) {
} else if (filename.toLower().endsWith(".fbx")) {
geom = readFBX(fbxContents, QVariantHash(), filename);
} else {
qDebug() << "unknown file extension";
qWarning() << "file has unknown extension" << filename;
return false;
}
result = *geom;
reSortFBXGeometryMeshes(result);
} catch (const QString& error) {
qDebug() << "Error reading " << filename << ": " << error;
qWarning() << "error reading" << filename << ":" << error;
return false;
}
@ -57,68 +62,62 @@ bool vhacd::VHACDUtil::loadFBX(const QString filename, FBXGeometry& result) {
}
unsigned int getTrianglesInMeshPart(const FBXMeshPart &meshPart, std::vector<int>& triangles) {
// append all the triangles (and converted quads) from this mesh-part to triangles
std::vector<int> meshPartTriangles = meshPart.triangleIndices.toStdVector();
triangles.insert(triangles.end(), meshPartTriangles.begin(), meshPartTriangles.end());
// convert quads to triangles
unsigned int triangleCount = meshPart.triangleIndices.size() / 3;
unsigned int quadCount = meshPart.quadIndices.size() / 4;
for (unsigned int i = 0; i < quadCount; i++) {
unsigned int p0Index = meshPart.quadIndices[i * 4];
unsigned int p1Index = meshPart.quadIndices[i * 4 + 1];
unsigned int p2Index = meshPart.quadIndices[i * 4 + 2];
unsigned int p3Index = meshPart.quadIndices[i * 4 + 3];
// split each quad into two triangles
triangles.push_back(p0Index);
triangles.push_back(p1Index);
triangles.push_back(p2Index);
triangles.push_back(p0Index);
triangles.push_back(p2Index);
triangles.push_back(p3Index);
triangleCount += 2;
void getTrianglesInMeshPart(const FBXMeshPart &meshPart, std::vector<int>& triangleIndices) {
// append triangle indices
triangleIndices.reserve(triangleIndices.size() + (size_t)meshPart.triangleIndices.size());
for (auto index : meshPart.triangleIndices) {
triangleIndices.push_back(index);
}
return triangleCount;
// convert quads to triangles
const uint32_t QUAD_STRIDE = 4;
uint32_t numIndices = (uint32_t)meshPart.quadIndices.size();
for (uint32_t i = 0; i < numIndices; i += QUAD_STRIDE) {
uint32_t p0Index = meshPart.quadIndices[i];
uint32_t p1Index = meshPart.quadIndices[i + 1];
uint32_t p2Index = meshPart.quadIndices[i + 2];
uint32_t p3Index = meshPart.quadIndices[i + 3];
// split each quad into two triangles
triangleIndices.push_back(p0Index);
triangleIndices.push_back(p1Index);
triangleIndices.push_back(p2Index);
triangleIndices.push_back(p0Index);
triangleIndices.push_back(p2Index);
triangleIndices.push_back(p3Index);
}
}
void vhacd::VHACDUtil::fattenMeshes(const FBXMesh& mesh, FBXMesh& result,
unsigned int& meshPartCount,
unsigned int startMeshIndex, unsigned int endMeshIndex) const {
void vhacd::VHACDUtil::fattenMesh(const FBXMesh& mesh, const glm::mat4& geometryOffset, FBXMesh& result) const {
// this is used to make meshes generated from a highfield collidable. each triangle
// is converted into a tetrahedron and made into its own mesh-part.
std::vector<int> triangles;
std::vector<int> triangleIndices;
foreach (const FBXMeshPart &meshPart, mesh.parts) {
if (meshPartCount < startMeshIndex || meshPartCount >= endMeshIndex) {
meshPartCount++;
continue;
}
getTrianglesInMeshPart(meshPart, triangles);
getTrianglesInMeshPart(meshPart, triangleIndices);
}
auto triangleCount = triangles.size() / 3;
if (triangleCount == 0) {
if (triangleIndices.size() == 0) {
return;
}
int indexStartOffset = result.vertices.size();
// new mesh gets the transformed points from the original
glm::mat4 totalTransform = geometryOffset * mesh.modelTransform;
for (int i = 0; i < mesh.vertices.size(); i++) {
// apply the source mesh's transform to the points
glm::vec4 v = mesh.modelTransform * glm::vec4(mesh.vertices[i], 1.0f);
glm::vec4 v = totalTransform * glm::vec4(mesh.vertices[i], 1.0f);
result.vertices += glm::vec3(v);
}
// turn each triangle into a tetrahedron
for (unsigned int i = 0; i < triangleCount; i++) {
int index0 = triangles[i * 3] + indexStartOffset;
int index1 = triangles[i * 3 + 1] + indexStartOffset;
int index2 = triangles[i * 3 + 2] + indexStartOffset;
const uint32_t TRIANGLE_STRIDE = 3;
const float COLLISION_TETRAHEDRON_SCALE = 0.25f;
for (uint32_t i = 0; i < triangleIndices.size(); i += TRIANGLE_STRIDE) {
int index0 = triangleIndices[i] + indexStartOffset;
int index1 = triangleIndices[i + 1] + indexStartOffset;
int index2 = triangleIndices[i + 2] + indexStartOffset;
// TODO: skip triangles with a normal that points more negative-y than positive-y
@ -155,156 +154,304 @@ void vhacd::VHACDUtil::fattenMeshes(const FBXMesh& mesh, FBXMesh& result,
}
}
AABox getAABoxForMeshPart(const FBXMesh& mesh, const FBXMeshPart &meshPart) {
AABox aaBox;
unsigned int triangleCount = meshPart.triangleIndices.size() / 3;
for (unsigned int i = 0; i < triangleCount; ++i) {
aaBox += mesh.vertices[meshPart.triangleIndices[i * 3]];
aaBox += mesh.vertices[meshPart.triangleIndices[i * 3 + 1]];
aaBox += mesh.vertices[meshPart.triangleIndices[i * 3 + 2]];
const int TRIANGLE_STRIDE = 3;
for (int i = 0; i < meshPart.triangleIndices.size(); i += TRIANGLE_STRIDE) {
aaBox += mesh.vertices[meshPart.triangleIndices[i]];
aaBox += mesh.vertices[meshPart.triangleIndices[i + 1]];
aaBox += mesh.vertices[meshPart.triangleIndices[i + 2]];
}
unsigned int quadCount = meshPart.quadIndices.size() / 4;
for (unsigned int i = 0; i < quadCount; ++i) {
aaBox += mesh.vertices[meshPart.quadIndices[i * 4]];
aaBox += mesh.vertices[meshPart.quadIndices[i * 4 + 1]];
aaBox += mesh.vertices[meshPart.quadIndices[i * 4 + 2]];
aaBox += mesh.vertices[meshPart.quadIndices[i * 4 + 3]];
const int QUAD_STRIDE = 4;
for (int i = 0; i < meshPart.quadIndices.size(); i += QUAD_STRIDE) {
aaBox += mesh.vertices[meshPart.quadIndices[i]];
aaBox += mesh.vertices[meshPart.quadIndices[i + 1]];
aaBox += mesh.vertices[meshPart.quadIndices[i + 2]];
aaBox += mesh.vertices[meshPart.quadIndices[i + 3]];
}
return aaBox;
}
class TriangleEdge {
public:
TriangleEdge() {}
TriangleEdge(uint32_t A, uint32_t B) {
setIndices(A, B);
}
void setIndices(uint32_t A, uint32_t B) {
if (A < B) {
_indexA = A;
_indexB = B;
} else {
_indexA = B;
_indexB = A;
}
}
bool operator==(const TriangleEdge& other) const {
return _indexA == other._indexA && _indexB == other._indexB;
}
uint32_t getIndexA() const { return _indexA; }
uint32_t getIndexB() const { return _indexB; }
private:
uint32_t _indexA { (uint32_t)(-1) };
uint32_t _indexB { (uint32_t)(-1) };
};
namespace std {
template <>
struct hash<TriangleEdge> {
std::size_t operator()(const TriangleEdge& edge) const {
// use Cantor's pairing function to generate a hash of ZxZ --> Z
uint32_t ab = edge.getIndexA() + edge.getIndexB();
return hash<int>()((ab * (ab + 1)) / 2 + edge.getIndexB());
}
};
}
// returns false if any edge has only one adjacent triangle
bool isClosedManifold(const std::vector<int>& triangleIndices) {
using EdgeList = std::unordered_map<TriangleEdge, int>;
EdgeList edges;
// count the triangles for each edge
const uint32_t TRIANGLE_STRIDE = 3;
for (uint32_t i = 0; i < triangleIndices.size(); i += TRIANGLE_STRIDE) {
TriangleEdge edge;
// the triangles indices are stored in sequential order
for (uint32_t j = 0; j < 3; ++j) {
edge.setIndices(triangleIndices[i + j], triangleIndices[i + ((j + 1) % 3)]);
EdgeList::iterator edgeEntry = edges.find(edge);
if (edgeEntry == edges.end()) {
edges.insert(std::pair<TriangleEdge, uint32_t>(edge, 1));
} else {
edgeEntry->second += 1;
}
}
}
// scan for outside edge
for (auto& edgeEntry : edges) {
if (edgeEntry.second == 1) {
return false;
}
}
return true;
}
void vhacd::VHACDUtil::getConvexResults(VHACD::IVHACD* convexifier, FBXMesh& resultMesh) const {
// Number of hulls for this input meshPart
uint32_t numHulls = convexifier->GetNConvexHulls();
if (_verbose) {
qDebug() << " hulls =" << numHulls;
}
// create an output meshPart for each convex hull
const uint32_t TRIANGLE_STRIDE = 3;
const uint32_t POINT_STRIDE = 3;
for (uint32_t j = 0; j < numHulls; j++) {
VHACD::IVHACD::ConvexHull hull;
convexifier->GetConvexHull(j, hull);
resultMesh.parts.append(FBXMeshPart());
FBXMeshPart& resultMeshPart = resultMesh.parts.last();
int hullIndexStart = resultMesh.vertices.size();
resultMesh.vertices.reserve(hullIndexStart + hull.m_nPoints);
uint32_t numIndices = hull.m_nPoints * POINT_STRIDE;
for (uint32_t i = 0; i < numIndices; i += POINT_STRIDE) {
float x = hull.m_points[i];
float y = hull.m_points[i + 1];
float z = hull.m_points[i + 2];
resultMesh.vertices.append(glm::vec3(x, y, z));
}
numIndices = hull.m_nTriangles * TRIANGLE_STRIDE;
resultMeshPart.triangleIndices.reserve(resultMeshPart.triangleIndices.size() + numIndices);
for (uint32_t i = 0; i < numIndices; i += TRIANGLE_STRIDE) {
resultMeshPart.triangleIndices.append(hull.m_triangles[i] + hullIndexStart);
resultMeshPart.triangleIndices.append(hull.m_triangles[i + 1] + hullIndexStart);
resultMeshPart.triangleIndices.append(hull.m_triangles[i + 2] + hullIndexStart);
}
if (_verbose) {
qDebug() << " hull" << j << " vertices =" << hull.m_nPoints
<< " triangles =" << hull.m_nTriangles
<< " FBXMeshVertices =" << resultMesh.vertices.size();
}
}
}
float computeDt(uint64_t start) {
return (float)(usecTimestampNow() - start) / (float)USECS_PER_SECOND;
}
bool vhacd::VHACDUtil::computeVHACD(FBXGeometry& geometry,
VHACD::IVHACD::Parameters params,
FBXGeometry& result,
int startMeshIndex,
int endMeshIndex,
float minimumMeshSize, float maximumMeshSize) {
if (_verbose) {
qDebug() << "meshes =" << geometry.meshes.size();
}
// count the mesh-parts
int meshCount = 0;
int numParts = 0;
foreach (const FBXMesh& mesh, geometry.meshes) {
meshCount += mesh.parts.size();
numParts += mesh.parts.size();
}
if (_verbose) {
qDebug() << "total parts =" << numParts;
}
VHACD::IVHACD * interfaceVHACD = VHACD::CreateVHACD();
if (startMeshIndex < 0) {
startMeshIndex = 0;
}
if (endMeshIndex < 0) {
endMeshIndex = meshCount;
}
std::cout << "Performing V-HACD computation on " << endMeshIndex - startMeshIndex << " meshes ..... " << std::endl;
VHACD::IVHACD * convexifier = VHACD::CreateVHACD();
result.meshExtents.reset();
result.meshes.append(FBXMesh());
FBXMesh &resultMesh = result.meshes.last();
int count = 0;
const uint32_t POINT_STRIDE = 3;
const uint32_t TRIANGLE_STRIDE = 3;
int meshIndex = 0;
int validPartsFound = 0;
foreach (const FBXMesh& mesh, geometry.meshes) {
// find duplicate points
int numDupes = 0;
std::vector<int> dupeIndexMap;
dupeIndexMap.reserve(mesh.vertices.size());
for (int i = 0; i < mesh.vertices.size(); ++i) {
dupeIndexMap.push_back(i);
for (int j = 0; j < i; ++j) {
float distance = glm::distance2(mesh.vertices[i], mesh.vertices[j]);
const float MAX_DUPE_DISTANCE_SQUARED = 0.000001f;
if (distance < MAX_DUPE_DISTANCE_SQUARED) {
dupeIndexMap[i] = j;
++numDupes;
break;
}
}
}
// each mesh has its own transform to move it to model-space
std::vector<glm::vec3> vertices;
glm::mat4 totalTransform = geometry.offset * mesh.modelTransform;
foreach (glm::vec3 vertex, mesh.vertices) {
vertices.push_back(glm::vec3(mesh.modelTransform * glm::vec4(vertex, 1.0f)));
vertices.push_back(glm::vec3(totalTransform * glm::vec4(vertex, 1.0f)));
}
uint32_t numVertices = (uint32_t)vertices.size();
if (_verbose) {
qDebug() << "mesh" << meshIndex << ": "
<< " parts =" << mesh.parts.size() << " clusters =" << mesh.clusters.size()
<< " vertices =" << numVertices;
}
++meshIndex;
std::vector<int> openParts;
int partIndex = 0;
std::vector<int> triangleIndices;
foreach (const FBXMeshPart &meshPart, mesh.parts) {
if (count < startMeshIndex || count >= endMeshIndex) {
count ++;
continue;
}
qDebug() << "--------------------";
std::vector<int> triangles;
unsigned int triangleCount = getTrianglesInMeshPart(meshPart, triangles);
triangleIndices.clear();
getTrianglesInMeshPart(meshPart, triangleIndices);
// only process meshes with triangles
if (triangles.size() <= 0) {
qDebug() << " Skipping (no triangles)...";
count++;
if (triangleIndices.size() <= 0) {
if (_verbose) {
qDebug() << " skip part" << partIndex << "(zero triangles)";
}
++partIndex;
continue;
}
auto nPoints = vertices.size();
// collapse dupe indices
for (auto& index : triangleIndices) {
index = dupeIndexMap[index];
}
AABox aaBox = getAABoxForMeshPart(mesh, meshPart);
const float largestDimension = aaBox.getLargestDimension();
qDebug() << "Mesh " << count << " -- " << nPoints << " points, " << triangleCount << " triangles, "
<< "size =" << largestDimension;
if (largestDimension < minimumMeshSize) {
qDebug() << " Skipping (too small)...";
count++;
if (_verbose) {
qDebug() << " skip part" << partIndex << ": dimension =" << largestDimension << "(too small)";
}
++partIndex;
continue;
}
if (maximumMeshSize > 0.0f && largestDimension > maximumMeshSize) {
qDebug() << " Skipping (too large)...";
count++;
if (_verbose) {
qDebug() << " skip part" << partIndex << ": dimension =" << largestDimension << "(too large)";
}
++partIndex;
continue;
}
// figure out if the mesh is a closed manifold or not
bool closed = isClosedManifold(triangleIndices);
if (closed) {
uint32_t triangleCount = (uint32_t)(triangleIndices.size()) / TRIANGLE_STRIDE;
if (_verbose) {
qDebug() << " process closed part" << partIndex << ": " << " triangles =" << triangleCount;
}
// compute approximate convex decomposition
bool success = convexifier->Compute(&vertices[0].x, POINT_STRIDE, numVertices,
&triangleIndices[0], TRIANGLE_STRIDE, triangleCount, params);
if (success) {
getConvexResults(convexifier, resultMesh);
} else if (_verbose) {
qDebug() << " failed to convexify";
}
} else {
if (_verbose) {
qDebug() << " postpone open part" << partIndex;
}
openParts.push_back(partIndex);
}
++partIndex;
++validPartsFound;
}
if (! openParts.empty()) {
// combine open meshes in an attempt to produce a closed mesh
triangleIndices.clear();
for (auto index : openParts) {
const FBXMeshPart &meshPart = mesh.parts[index];
getTrianglesInMeshPart(meshPart, triangleIndices);
}
// collapse dupe indices
for (auto& index : triangleIndices) {
index = dupeIndexMap[index];
}
// this time we don't care if the parts are closed or not
uint32_t triangleCount = (uint32_t)(triangleIndices.size()) / TRIANGLE_STRIDE;
if (_verbose) {
qDebug() << " process remaining open parts =" << openParts.size() << ": "
<< " triangles =" << triangleCount;
}
// compute approximate convex decomposition
bool res = interfaceVHACD->Compute(&vertices[0].x, 3, (uint)nPoints, &triangles[0], 3, triangleCount, params);
if (!res){
qDebug() << "V-HACD computation failed for Mesh : " << count;
count++;
continue;
bool success = convexifier->Compute(&vertices[0].x, POINT_STRIDE, numVertices,
&triangleIndices[0], TRIANGLE_STRIDE, triangleCount, params);
if (success) {
getConvexResults(convexifier, resultMesh);
} else if (_verbose) {
qDebug() << " failed to convexify";
}
// Number of hulls for this input meshPart
unsigned int nConvexHulls = interfaceVHACD->GetNConvexHulls();
// create an output meshPart for each convex hull
for (unsigned int j = 0; j < nConvexHulls; j++) {
VHACD::IVHACD::ConvexHull hull;
interfaceVHACD->GetConvexHull(j, hull);
resultMesh.parts.append(FBXMeshPart());
FBXMeshPart &resultMeshPart = resultMesh.parts.last();
int hullIndexStart = resultMesh.vertices.size();
for (unsigned int i = 0; i < hull.m_nPoints; i++) {
float x = hull.m_points[i * 3];
float y = hull.m_points[i * 3 + 1];
float z = hull.m_points[i * 3 + 2];
resultMesh.vertices.append(glm::vec3(x, y, z));
}
for (unsigned int i = 0; i < hull.m_nTriangles; i++) {
int index0 = hull.m_triangles[i * 3] + hullIndexStart;
int index1 = hull.m_triangles[i * 3 + 1] + hullIndexStart;
int index2 = hull.m_triangles[i * 3 + 2] + hullIndexStart;
resultMeshPart.triangleIndices.append(index0);
resultMeshPart.triangleIndices.append(index1);
resultMeshPart.triangleIndices.append(index2);
}
}
count++;
}
}
//release memory
interfaceVHACD->Clean();
interfaceVHACD->Release();
convexifier->Clean();
convexifier->Release();
if (count > 0){
return true;
}
else{
return false;
}
return validPartsFound > 0;
}
vhacd::VHACDUtil:: ~VHACDUtil(){
@ -319,16 +466,9 @@ void vhacd::ProgressCallback::Update(const double overallProgress,
const char* const operation) {
int progress = (int)(overallProgress + 0.5);
if (progress < 10){
std::cout << "\b\b";
}
else{
std::cout << "\b\b\b";
}
std::cout << progress << "%";
if (progress >= 100){
std::cout << "\b\b\b";
std::cout << progress << "%" << std::flush;
if (progress >= 100) {
std::cout << std::endl;
}
}

View file

@ -25,18 +25,23 @@
namespace vhacd {
class VHACDUtil {
public:
void setVerbose(bool verbose) { _verbose = verbose; }
bool loadFBX(const QString filename, FBXGeometry& result);
void fattenMeshes(const FBXMesh& mesh, FBXMesh& result,
unsigned int& meshPartCount,
unsigned int startMeshIndex, unsigned int endMeshIndex) const;
void fattenMesh(const FBXMesh& mesh, const glm::mat4& gometryOffset, FBXMesh& result) const;
bool computeVHACD(FBXGeometry& geometry,
VHACD::IVHACD::Parameters params,
FBXGeometry& result,
int startMeshIndex, int endMeshIndex,
float minimumMeshSize, float maximumMeshSize);
void getConvexResults(VHACD::IVHACD* convexifier, FBXMesh& resultMesh) const;
~VHACDUtil();
private:
bool _verbose { false };
};
class ProgressCallback : public VHACD::IVHACD::IUserCallback {
@ -45,7 +50,7 @@ namespace vhacd {
~ProgressCallback();
// Couldn't follow coding guideline here due to virtual function declared in IUserCallback
void Update(const double overallProgress, const double stageProgress, const double operationProgress,
void Update(const double overallProgress, const double stageProgress, const double operationProgress,
const char * const stage, const char * const operation);
};
}

View file

@ -19,7 +19,6 @@ using namespace std;
using namespace VHACD;
QString formatFloat(double n) {
// limit precision to 6, but don't output trailing zeros.
QString s = QString::number(n, 'f', 6);
@ -33,14 +32,15 @@ QString formatFloat(double n) {
}
bool writeOBJ(QString outFileName, FBXGeometry& geometry, bool outputCentimeters, int whichMeshPart = -1) {
bool VHACDUtilApp::writeOBJ(QString outFileName, FBXGeometry& geometry, bool outputCentimeters, int whichMeshPart) {
QFile file(outFileName);
if (!file.open(QIODevice::WriteOnly)) {
qDebug() << "Unable to write to " << outFileName;
qWarning() << "unable to write to" << outFileName;
_returnCode = VHACD_RETURN_CODE_FAILURE_TO_WRITE;
return false;
}
QTextStream out(&file);
QTextStream out(&file);
if (outputCentimeters) {
out << "# This file uses centimeters as units\n\n";
}
@ -105,6 +105,9 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
const QCommandLineOption helpOption = parser.addHelpOption();
const QCommandLineOption verboseOutput("v", "verbose output");
parser.addOption(verboseOutput);
const QCommandLineOption splitOption("split", "split input-file into one mesh per output-file");
parser.addOption(splitOption);
@ -123,12 +126,6 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
const QCommandLineOption outputCentimetersOption("c", "output units are centimeters");
parser.addOption(outputCentimetersOption);
const QCommandLineOption startMeshIndexOption("s", "start-mesh index", "0");
parser.addOption(startMeshIndexOption);
const QCommandLineOption endMeshIndexOption("e", "end-mesh index", "0");
parser.addOption(endMeshIndexOption);
const QCommandLineOption minimumMeshSizeOption("m", "minimum mesh (diagonal) size to consider", "0");
parser.addOption(minimumMeshSizeOption);
@ -195,8 +192,10 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
Q_UNREACHABLE();
}
bool outputCentimeters = parser.isSet(outputCentimetersOption);
bool verbose = parser.isSet(verboseOutput);
vUtil.setVerbose(verbose);
bool outputCentimeters = parser.isSet(outputCentimetersOption);
bool fattenFaces = parser.isSet(fattenFacesOption);
bool generateHulls = parser.isSet(generateHullsOption);
bool splitModel = parser.isSet(splitOption);
@ -225,16 +224,6 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
Q_UNREACHABLE();
}
int startMeshIndex = -1;
if (parser.isSet(startMeshIndexOption)) {
startMeshIndex = parser.value(startMeshIndexOption).toInt();
}
int endMeshIndex = -1;
if (parser.isSet(endMeshIndexOption)) {
endMeshIndex = parser.value(endMeshIndexOption).toInt();
}
float minimumMeshSize = 0.0f;
if (parser.isSet(minimumMeshSizeOption)) {
minimumMeshSize = parser.value(minimumMeshSizeOption).toFloat();
@ -301,17 +290,20 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
Q_UNREACHABLE();
}
// load the mesh
// load the mesh
FBXGeometry fbx;
auto begin = std::chrono::high_resolution_clock::now();
if (!vUtil.loadFBX(inputFilename, fbx)){
cout << "Error in opening FBX file....";
_returnCode = VHACD_RETURN_CODE_FAILURE_TO_READ;
return;
}
auto end = std::chrono::high_resolution_clock::now();
auto loadDuration = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
if (verbose) {
auto loadDuration = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
const double NANOSECS_PER_SECOND = 1.0e9;
qDebug() << "load time =" << (double)loadDuration / NANOSECS_PER_SECOND << "seconds";
}
if (splitModel) {
QVector<QString> infileExtensions = {"fbx", "obj"};
@ -329,10 +321,14 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
if (generateHulls) {
VHACD::IVHACD::Parameters params;
vhacd::ProgressCallback pCallBack;
vhacd::ProgressCallback progressCallback;
//set parameters for V-HACD
params.m_callback = &pCallBack; //progress callback
if (verbose) {
params.m_callback = &progressCallback; //progress callback
} else {
params.m_callback = nullptr;
}
params.m_resolution = vHacdResolution;
params.m_depth = vHacdDepth;
params.m_concavity = vHacdConcavity;
@ -346,44 +342,51 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
params.m_mode = 0; // 0: voxel-based (recommended), 1: tetrahedron-based
params.m_maxNumVerticesPerCH = vHacdMaxVerticesPerCH;
params.m_minVolumePerCH = 0.0001; // 0.0001
params.m_callback = 0; // 0
params.m_logger = 0; // 0
params.m_logger = nullptr;
params.m_convexhullApproximation = true; // true
params.m_oclAcceleration = true; // true
//perform vhacd computation
if (verbose) {
qDebug() << "running V-HACD algorithm ...";
}
begin = std::chrono::high_resolution_clock::now();
FBXGeometry result;
if (!vUtil.computeVHACD(fbx, params, result, startMeshIndex, endMeshIndex,
minimumMeshSize, maximumMeshSize)) {
cout << "Compute Failed...";
}
bool success = vUtil.computeVHACD(fbx, params, result, minimumMeshSize, maximumMeshSize);
end = std::chrono::high_resolution_clock::now();
auto computeDuration = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
if (verbose) {
qDebug() << "run time =" << (double)computeDuration / 1000000000.00 << " seconds";
}
if (!success) {
if (verbose) {
qDebug() << "failed to convexify model";
}
_returnCode = VHACD_RETURN_CODE_FAILURE_TO_CONVEXIFY;
return;
}
int totalVertices = 0;
int totalTriangles = 0;
int totalMeshParts = 0;
foreach (const FBXMesh& mesh, result.meshes) {
totalVertices += mesh.vertices.size();
foreach (const FBXMeshPart &meshPart, mesh.parts) {
totalTriangles += meshPart.triangleIndices.size() / 3;
// each quad was made into two triangles
totalTriangles += 2 * meshPart.quadIndices.size() / 4;
totalMeshParts++;
}
}
int totalHulls = result.meshes[0].parts.size();
cout << endl << "Summary of V-HACD Computation..................." << endl;
cout << "File Path : " << inputFilename.toStdString() << endl;
cout << "Number Of Meshes : " << totalMeshParts << endl;
cout << "Total vertices : " << totalVertices << endl;
cout << "Total Triangles : " << totalTriangles << endl;
cout << "Total Convex Hulls : " << totalHulls << endl;
cout << "Total FBX load time: " << (double)loadDuration / 1000000000.00 << " seconds" << endl;
cout << "V-HACD Compute time: " << (double)computeDuration / 1000000000.00 << " seconds" << endl;
if (verbose) {
int totalHulls = result.meshes[0].parts.size();
qDebug() << "output file =" << outputFilename;
qDebug() << "vertices =" << totalVertices;
qDebug() << "triangles =" << totalTriangles;
qDebug() << "hulls =" << totalHulls;
}
writeOBJ(outputFilename, result, outputCentimeters);
}
@ -398,17 +401,9 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
meshCount += mesh.parts.size();
}
if (startMeshIndex < 0) {
startMeshIndex = 0;
}
if (endMeshIndex < 0) {
endMeshIndex = meshCount;
}
unsigned int meshPartCount = 0;
result.modelTransform = glm::mat4(); // Identity matrix
foreach (const FBXMesh& mesh, fbx.meshes) {
vUtil.fattenMeshes(mesh, result, meshPartCount, startMeshIndex, endMeshIndex);
vUtil.fattenMesh(mesh, fbx.offset, result);
}
newFbx.meshes.append(result);

View file

@ -15,12 +15,25 @@
#include <QApplication>
#include <FBXReader.h>
const int VHACD_RETURN_CODE_FAILURE_TO_READ = 1;
const int VHACD_RETURN_CODE_FAILURE_TO_WRITE = 2;
const int VHACD_RETURN_CODE_FAILURE_TO_CONVEXIFY = 3;
class VHACDUtilApp : public QCoreApplication {
Q_OBJECT
public:
public:
VHACDUtilApp(int argc, char* argv[]);
~VHACDUtilApp();
bool writeOBJ(QString outFileName, FBXGeometry& geometry, bool outputCentimeters, int whichMeshPart = -1);
int getReturnCode() const { return _returnCode; }
private:
int _returnCode { 0 };
};

View file

@ -23,5 +23,5 @@ using namespace VHACD;
int main(int argc, char * argv[]) {
VHACDUtilApp app(argc, argv);
return 0;
return app.getReturnCode();
}