From df5684c4375193108fb64d2eee748d1ddd8db18b Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 10 Sep 2019 11:00:36 -0700 Subject: [PATCH 001/121] Introduce hfm::Shape object --- libraries/hfm/src/hfm/HFM.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 484a10aa3b..291d0fd6e3 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -287,6 +287,21 @@ public: bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; } }; +class ShapeTransform { + std::vector clusters; + Extents meshExtents; + Transform modelTransform; +}; + +// The lightweight model part description. +class Shape { +public: + uint32_t mesh; + uint32_t meshPart; + uint32_t material; + uint32_t shapeTransform; +}; + /// The runtime model format. class Model { public: @@ -297,6 +312,8 @@ public: QString author; QString applicationName; ///< the name of the application that generated the model + std::vector shapes; + QVector joints; QHash jointIndices; ///< 1-based, so as to more easily detect missing indices bool hasSkeletonJoints; From c4db6c78d8ec70ae4dfea3ff5f05773b308faf5c Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 10 Sep 2019 11:32:12 -0700 Subject: [PATCH 002/121] std::vector-ize meshes/mesh parts in hfm::Model --- libraries/fbx/src/FBXSerializer.cpp | 2 +- libraries/fbx/src/GLTFSerializer.cpp | 8 +++---- libraries/fbx/src/OBJSerializer.cpp | 24 +++++++++---------- libraries/hfm/src/hfm/HFM.cpp | 2 +- libraries/hfm/src/hfm/HFM.h | 6 ++--- .../model-baker/src/model-baker/Baker.cpp | 4 ++-- libraries/render-utils/src/Model.cpp | 16 ++++++------- 7 files changed, 31 insertions(+), 31 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index f8339ddd31..de6ab545e5 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1624,7 +1624,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } - hfmModel.meshes.append(extracted.mesh); + hfmModel.meshes.push_back(extracted.mesh); int meshIndex = hfmModel.meshes.size() - 1; meshIDsToMeshIndices.insert(it.key(), meshIndex); } diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index dca9e9fefa..e42b516559 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1018,7 +1018,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& if (node.defined["mesh"]) { - hfmModel.meshes.append(HFMMesh()); + hfmModel.meshes.push_back(HFMMesh()); HFMMesh& mesh = hfmModel.meshes[hfmModel.meshes.size() - 1]; if (!hfmModel.hasSkeletonJoints) { HFMCluster cluster; @@ -2038,9 +2038,9 @@ void GLTFSerializer::hfmDebugDump(const HFMModel& hfmModel) { qCDebug(modelformat) << " jointIndices.size() =" << hfmModel.jointIndices.size(); qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.count(); qCDebug(modelformat) << "---------------- Meshes ----------------"; - qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.count(); + qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.size(); qCDebug(modelformat) << " blendshapeChannelNames = " << hfmModel.blendshapeChannelNames; - foreach(HFMMesh mesh, hfmModel.meshes) { + for (const HFMMesh& mesh : hfmModel.meshes) { qCDebug(modelformat) << "\n"; qCDebug(modelformat) << " meshpointer =" << mesh._mesh.get(); qCDebug(modelformat) << " meshindex =" << mesh.meshIndex; @@ -2054,7 +2054,7 @@ void GLTFSerializer::hfmDebugDump(const HFMModel& hfmModel) { qCDebug(modelformat) << " clusterIndices.count() =" << mesh.clusterIndices.count(); qCDebug(modelformat) << " clusterWeights.count() =" << mesh.clusterWeights.count(); qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform; - qCDebug(modelformat) << " parts.count() =" << mesh.parts.count(); + qCDebug(modelformat) << " parts.count() =" << mesh.parts.size(); qCDebug(modelformat) << "---------------- Meshes (blendshapes)--------"; foreach(HFMBlendshape bshape, mesh.blendshapes) { qCDebug(modelformat) << "\n"; diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 416f343a47..5f60fe7927 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -492,8 +492,8 @@ bool OBJSerializer::parseOBJGroup(OBJTokenizer& tokenizer, const hifi::VariantHa float& scaleGuess, bool combineParts) { FaceGroup faces; HFMMesh& mesh = hfmModel.meshes[0]; - mesh.parts.append(HFMMeshPart()); - HFMMeshPart& meshPart = mesh.parts.last(); + mesh.parts.push_back(HFMMeshPart()); + HFMMeshPart& meshPart = mesh.parts.back(); bool sawG = false; bool result = true; int originalFaceCountForDebugging = 0; @@ -501,7 +501,7 @@ bool OBJSerializer::parseOBJGroup(OBJTokenizer& tokenizer, const hifi::VariantHa bool anyVertexColor { false }; int vertexCount { 0 }; - setMeshPartDefaults(meshPart, QString("dontknow") + QString::number(mesh.parts.count())); + setMeshPartDefaults(meshPart, QString("dontknow") + QString::number(mesh.parts.size())); while (true) { int tokenType = tokenizer.nextToken(); @@ -676,7 +676,7 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V _url = url; bool combineParts = mapping.value("combineParts").toBool(); hfmModel.meshExtents.reset(); - hfmModel.meshes.append(HFMMesh()); + hfmModel.meshes.push_back(HFMMesh()); try { // call parseOBJGroup as long as it's returning true. Each successful call will @@ -706,8 +706,8 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V mesh.clusters.append(cluster); QMap materialMeshIdMap; - QVector hfmMeshParts; - for (int i = 0, meshPartCount = 0; i < mesh.parts.count(); i++, meshPartCount++) { + std::vector hfmMeshParts; + for (uint32_t i = 0, meshPartCount = 0; i < (uint32_t)mesh.parts.size(); i++, meshPartCount++) { HFMMeshPart& meshPart = mesh.parts[i]; FaceGroup faceGroup = faceGroups[meshPartCount]; bool specifiesUV = false; @@ -718,8 +718,8 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V // Create a new HFMMesh for this material mapping. materialMeshIdMap.insert(face.materialName, materialMeshIdMap.count()); - hfmMeshParts.append(HFMMeshPart()); - HFMMeshPart& meshPartNew = hfmMeshParts.last(); + hfmMeshParts.push_back(HFMMeshPart()); + HFMMeshPart& meshPartNew = hfmMeshParts.back(); meshPartNew.quadIndices = QVector(meshPart.quadIndices); // Copy over quad indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway]. meshPartNew.quadTrianglesIndices = QVector(meshPart.quadTrianglesIndices); // Copy over quad triangulated indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway]. meshPartNew.triangleIndices = QVector(meshPart.triangleIndices); // Copy over triangle indices. @@ -752,9 +752,9 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V } // clean up old mesh parts. - int unmodifiedMeshPartCount = mesh.parts.count(); + int unmodifiedMeshPartCount = mesh.parts.size(); mesh.parts.clear(); - mesh.parts = QVector(hfmMeshParts); + mesh.parts = hfmMeshParts; for (int i = 0, meshPartCount = 0; i < unmodifiedMeshPartCount; i++, meshPartCount++) { FaceGroup faceGroup = faceGroups[meshPartCount]; @@ -1003,7 +1003,7 @@ void hfmDebugDump(const HFMModel& hfmModel) { qCDebug(modelformat) << "---------------- hfmModel ----------------"; qCDebug(modelformat) << " hasSkeletonJoints =" << hfmModel.hasSkeletonJoints; qCDebug(modelformat) << " offset =" << hfmModel.offset; - qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.count(); + qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.size(); foreach (HFMMesh mesh, hfmModel.meshes) { qCDebug(modelformat) << " vertices.count() =" << mesh.vertices.count(); qCDebug(modelformat) << " colors.count() =" << mesh.colors.count(); @@ -1021,7 +1021,7 @@ void hfmDebugDump(const HFMModel& hfmModel) { qCDebug(modelformat) << " clusterWeights.count() =" << mesh.clusterWeights.count(); qCDebug(modelformat) << " meshExtents =" << mesh.meshExtents; qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform; - qCDebug(modelformat) << " parts.count() =" << mesh.parts.count(); + qCDebug(modelformat) << " parts.count() =" << mesh.parts.size(); foreach (HFMMeshPart meshPart, mesh.parts) { qCDebug(modelformat) << " quadIndices.count() =" << meshPart.quadIndices.count(); qCDebug(modelformat) << " triangleIndices.count() =" << meshPart.triangleIndices.count(); diff --git a/libraries/hfm/src/hfm/HFM.cpp b/libraries/hfm/src/hfm/HFM.cpp index 236445bfda..b297db4bcb 100644 --- a/libraries/hfm/src/hfm/HFM.cpp +++ b/libraries/hfm/src/hfm/HFM.cpp @@ -76,7 +76,7 @@ QStringList HFMModel::getJointNames() const { } bool HFMModel::hasBlendedMeshes() const { - if (!meshes.isEmpty()) { + if (!meshes.empty()) { foreach (const HFMMesh& mesh, meshes) { if (!mesh.blendshapes.isEmpty()) { return true; diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 291d0fd6e3..fcbdd32150 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -228,7 +228,7 @@ public: class Mesh { public: - QVector parts; + std::vector parts; QVector vertices; QVector normals; @@ -314,11 +314,11 @@ public: std::vector shapes; + std::vector meshes; + QVector joints; QHash jointIndices; ///< 1-based, so as to more easily detect missing indices bool hasSkeletonJoints; - - QVector meshes; QVector scripts; QHash materials; diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index c896613df5..e1b2ff97cf 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -32,7 +32,7 @@ namespace baker { void run(const BakeContextPointer& context, const Input& input, Output& output) { const auto& hfmModelIn = input; - output.edit0() = hfmModelIn->meshes.toStdVector(); + output.edit0() = hfmModelIn->meshes; output.edit1() = hfmModelIn->originalURL; output.edit2() = hfmModelIn->meshIndicesToModelNames; auto& blendshapesPerMesh = output.edit3(); @@ -107,7 +107,7 @@ namespace baker { void run(const BakeContextPointer& context, const Input& input, Output& output) { auto hfmModelOut = input.get0(); - hfmModelOut->meshes = QVector::fromStdVector(input.get1()); + hfmModelOut->meshes = input.get1(); hfmModelOut->joints = QVector::fromStdVector(input.get2()); hfmModelOut->jointRotationOffsets = input.get3(); hfmModelOut->jointIndices = input.get4(); diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 74cf1ffa39..e4fefacb96 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -734,7 +734,7 @@ bool Model::replaceScriptableModelMeshPart(scriptable::ScriptableModelBasePointe for (int partID = 0; partID < numParts; partID++) { HFMMeshPart part; part.triangleIndices = buffer_helpers::bufferToVector(mesh._mesh->getIndexBuffer(), "part.triangleIndices"); - mesh.parts << part; + mesh.parts.push_back(part); } { foreach (const glm::vec3& vertex, mesh.vertices) { @@ -745,7 +745,7 @@ bool Model::replaceScriptableModelMeshPart(scriptable::ScriptableModelBasePointe mesh.meshExtents.maximum = glm::max(mesh.meshExtents.maximum, transformedVertex); } } - hfmModel.meshes << mesh; + hfmModel.meshes.push_back(mesh); } calculateTriangleSets(hfmModel); } @@ -762,9 +762,9 @@ scriptable::ScriptableModelBase Model::getScriptableModel() { } const HFMModel& hfmModel = getHFMModel(); - int numberOfMeshes = hfmModel.meshes.size(); + uint32_t numberOfMeshes = (uint32_t)hfmModel.meshes.size(); int shapeID = 0; - for (int i = 0; i < numberOfMeshes; i++) { + for (uint32_t i = 0; i < numberOfMeshes; i++) { const HFMMesh& hfmMesh = hfmModel.meshes.at(i); if (auto mesh = hfmMesh._mesh) { result.append(mesh); @@ -795,20 +795,20 @@ scriptable::ScriptableModelBase Model::getScriptableModel() { void Model::calculateTriangleSets(const HFMModel& hfmModel) { PROFILE_RANGE(render, __FUNCTION__); - int numberOfMeshes = hfmModel.meshes.size(); + uint32_t numberOfMeshes = (uint32_t)hfmModel.meshes.size(); _triangleSetsValid = true; _modelSpaceMeshTriangleSets.clear(); _modelSpaceMeshTriangleSets.resize(numberOfMeshes); - for (int i = 0; i < numberOfMeshes; i++) { + for (uint32_t i = 0; i < numberOfMeshes; i++) { const HFMMesh& mesh = hfmModel.meshes.at(i); - const int numberOfParts = mesh.parts.size(); + const uint32_t numberOfParts = mesh.parts.size(); auto& meshTriangleSets = _modelSpaceMeshTriangleSets[i]; meshTriangleSets.resize(numberOfParts); - for (int j = 0; j < numberOfParts; j++) { + for (uint32_t j = 0; j < numberOfParts; j++) { const HFMMeshPart& part = mesh.parts.at(j); auto& partTriangleSet = meshTriangleSets[j]; From b944db3e7904da8233c124ff0f3a0a1ecf55c3dd Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 11 Sep 2019 16:50:21 -0700 Subject: [PATCH 003/121] std::vector-ize joints and materials in hfm::Model --- interface/src/avatar/AvatarDoctor.cpp | 4 ++-- .../src/avatars-renderer/SkeletonModel.cpp | 2 +- libraries/baking/src/MaterialBaker.cpp | 2 +- libraries/baking/src/MaterialBaker.h | 2 +- libraries/baking/src/ModelBaker.cpp | 2 +- libraries/fbx/src/FBXSerializer.cpp | 9 ++++++--- libraries/fbx/src/GLTFSerializer.cpp | 6 +++--- libraries/fbx/src/OBJSerializer.cpp | 13 +++++++------ libraries/hfm/src/hfm/HFM.h | 4 ++-- libraries/model-baker/src/model-baker/Baker.cpp | 4 ++-- 10 files changed, 26 insertions(+), 22 deletions(-) diff --git a/interface/src/avatar/AvatarDoctor.cpp b/interface/src/avatar/AvatarDoctor.cpp index 01a40e89fd..a12b4dfcc0 100644 --- a/interface/src/avatar/AvatarDoctor.cpp +++ b/interface/src/avatar/AvatarDoctor.cpp @@ -99,12 +99,12 @@ void AvatarDoctor::startDiagnosing() { } // RIG - if (avatarModel.joints.isEmpty()) { + if (avatarModel.joints.empty()) { addError("Avatar has no rig.", "no-rig"); } else { auto jointNames = avatarModel.getJointNames(); - if (avatarModel.joints.length() > NETWORKED_JOINTS_LIMIT) { + if (avatarModel.joints.size() > NETWORKED_JOINTS_LIMIT) { addError(tr( "Avatar has over %n bones.", "", NETWORKED_JOINTS_LIMIT), "maximum-bone-limit"); } // Avatar does not have Hips bone mapped diff --git a/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp b/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp index 40b65c54a1..d082d515fc 100644 --- a/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp +++ b/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp @@ -312,7 +312,7 @@ void SkeletonModel::computeBoundingShape() { } const HFMModel& hfmModel = getHFMModel(); - if (hfmModel.joints.isEmpty() || _rig.indexOfJoint("Hips") == -1) { + if (hfmModel.joints.empty() || _rig.indexOfJoint("Hips") == -1) { // rootJointIndex == -1 if the avatar model has no skeleton return; } diff --git a/libraries/baking/src/MaterialBaker.cpp b/libraries/baking/src/MaterialBaker.cpp index 9a1b1b2d24..d177ddf358 100644 --- a/libraries/baking/src/MaterialBaker.cpp +++ b/libraries/baking/src/MaterialBaker.cpp @@ -258,7 +258,7 @@ void MaterialBaker::addTexture(const QString& materialName, image::TextureUsage: } }; -void MaterialBaker::setMaterials(const QHash& materials, const QString& baseURL) { +void MaterialBaker::setMaterials(const std::vector& materials, const QString& baseURL) { _materialResource = NetworkMaterialResourcePointer(new NetworkMaterialResource(), [](NetworkMaterialResource* ptr) { ptr->deleteLater(); }); for (auto& material : materials) { _materialResource->parsedMaterials.names.push_back(material.name.toStdString()); diff --git a/libraries/baking/src/MaterialBaker.h b/libraries/baking/src/MaterialBaker.h index 04782443f0..bb47941af6 100644 --- a/libraries/baking/src/MaterialBaker.h +++ b/libraries/baking/src/MaterialBaker.h @@ -32,7 +32,7 @@ public: bool isURL() const { return _isURL; } QString getBakedMaterialData() const { return _bakedMaterialData; } - void setMaterials(const QHash& materials, const QString& baseURL); + void setMaterials(const std::vector& materials, const QString& baseURL); void setMaterials(const NetworkMaterialResourcePointer& materialResource); NetworkMaterialResourcePointer getNetworkMaterialResource() const { return _materialResource; } diff --git a/libraries/baking/src/ModelBaker.cpp b/libraries/baking/src/ModelBaker.cpp index 9d6a368e1c..38ebf6dfce 100644 --- a/libraries/baking/src/ModelBaker.cpp +++ b/libraries/baking/src/ModelBaker.cpp @@ -259,7 +259,7 @@ void ModelBaker::bakeSourceCopy() { return; } - if (!_hfmModel->materials.isEmpty()) { + if (!_hfmModel->materials.empty()) { _materialBaker = QSharedPointer( new MaterialBaker(_modelURL.fileName(), true, _bakedOutputDir), &MaterialBaker::deleteLater diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index de6ab545e5..81444f8c6b 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1357,11 +1357,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } - hfmModel.joints.append(joint); + hfmModel.joints.push_back(joint); } // NOTE: shapeVertices are in joint-frame - hfmModel.shapeVertices.resize(std::max(1, hfmModel.joints.size()) ); + hfmModel.shapeVertices.resize(std::max((size_t)1, hfmModel.joints.size()) ); hfmModel.bindExtents.reset(); hfmModel.meshExtents.reset(); @@ -1400,7 +1400,10 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } #endif - hfmModel.materials = _hfmMaterials; + + for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) { + hfmModel.materials.push_back(materialIt.value()); + } // see if any materials have texture children bool materialsHaveTextures = checkMaterialsHaveTextures(_hfmMaterials, _textureFilenames, _connectionChildMap); diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index e42b516559..4c4050c935 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1002,8 +1002,8 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& for (int i = 0; i < materialIDs.size(); ++i) { QString& matid = materialIDs[i]; - hfmModel.materials[matid] = HFMMaterial(); - HFMMaterial& hfmMaterial = hfmModel.materials[matid]; + hfmModel.materials.emplace_back(); + HFMMaterial& hfmMaterial = hfmModel.materials.back(); hfmMaterial._material = std::make_shared(); hfmMaterial.name = hfmMaterial.materialID = matid; setHFMMaterial(hfmMaterial, _file.materials[i]); @@ -2036,7 +2036,7 @@ void GLTFSerializer::hfmDebugDump(const HFMModel& hfmModel) { qCDebug(modelformat) << " meshExtents.size() = " << hfmModel.meshExtents.size(); qCDebug(modelformat) << " jointIndices.size() =" << hfmModel.jointIndices.size(); - qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.count(); + qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.size(); qCDebug(modelformat) << "---------------- Meshes ----------------"; qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.size(); qCDebug(modelformat) << " blendshapeChannelNames = " << hfmModel.blendshapeChannelNames; diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 5f60fe7927..d6be066674 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -892,11 +892,12 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V continue; } - HFMMaterial& hfmMaterial = hfmModel.materials[materialID] = HFMMaterial(objMaterial.diffuseColor, - objMaterial.specularColor, - objMaterial.emissiveColor, - objMaterial.shininess, - objMaterial.opacity); + hfmModel.materials.emplace_back(objMaterial.diffuseColor, + objMaterial.specularColor, + objMaterial.emissiveColor, + objMaterial.shininess, + objMaterial.opacity); + HFMMaterial& hfmMaterial = hfmModel.materials.back(); hfmMaterial.name = materialID; hfmMaterial.materialID = materialID; @@ -1046,7 +1047,7 @@ void hfmDebugDump(const HFMModel& hfmModel) { } qCDebug(modelformat) << " jointIndices =" << hfmModel.jointIndices; - qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.count(); + qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.size(); foreach (HFMJoint joint, hfmModel.joints) { diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index fcbdd32150..b49e53ad4c 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -316,12 +316,12 @@ public: std::vector meshes; - QVector joints; + std::vector joints; QHash jointIndices; ///< 1-based, so as to more easily detect missing indices bool hasSkeletonJoints; QVector scripts; - QHash materials; + std::vector materials; glm::mat4 offset; // This includes offset, rotation, and scale as specified by the FST file diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index e1b2ff97cf..50221c4481 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -40,7 +40,7 @@ namespace baker { for (int i = 0; i < hfmModelIn->meshes.size(); i++) { blendshapesPerMesh.push_back(hfmModelIn->meshes[i].blendshapes.toStdVector()); } - output.edit4() = hfmModelIn->joints.toStdVector(); + output.edit4() = hfmModelIn->joints; } }; @@ -108,7 +108,7 @@ namespace baker { void run(const BakeContextPointer& context, const Input& input, Output& output) { auto hfmModelOut = input.get0(); hfmModelOut->meshes = input.get1(); - hfmModelOut->joints = QVector::fromStdVector(input.get2()); + hfmModelOut->joints = input.get2(); hfmModelOut->jointRotationOffsets = input.get3(); hfmModelOut->jointIndices = input.get4(); hfmModelOut->flowData = input.get5(); From ccdfb11de2e3ae33d3b563650cc0299034e89ad6 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 11 Sep 2019 16:56:29 -0700 Subject: [PATCH 004/121] Re-order hfm::Model variables and add shapeTransforms list --- libraries/hfm/src/hfm/HFM.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index b49e53ad4c..33b02819d2 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -315,14 +315,14 @@ public: std::vector shapes; std::vector meshes; + std::vector materials; + std::vector shapeTransforms; std::vector joints; QHash jointIndices; ///< 1-based, so as to more easily detect missing indices bool hasSkeletonJoints; QVector scripts; - std::vector materials; - glm::mat4 offset; // This includes offset, rotation, and scale as specified by the FST file glm::vec3 neckPivot; From d948e434ddbbc0706cecc934a9bc0dbb1ac8bb27 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 13 Sep 2019 09:54:46 -0700 Subject: [PATCH 005/121] DyanamicTransform concept --- libraries/hfm/src/hfm/HFM.h | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 33b02819d2..959fc0dc28 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -287,10 +287,22 @@ public: bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; } }; -class ShapeTransform { - std::vector clusters; - Extents meshExtents; - Transform modelTransform; +class TransformNode { + uint32_t parent { 0 }; + Transform transform; +}; + +// Formerly contained in hfm::Mesh +class Deformer { + std::vector indices; + std::vector weights; +}; + +class DynamicTransform { + std::vector deformers; + std::vector clusters; // affect the deformer of the same index + std::vector blendshapes; + // There is also the modelTransform, which for now is left in hfm::Mesh }; // The lightweight model part description. @@ -299,7 +311,8 @@ public: uint32_t mesh; uint32_t meshPart; uint32_t material; - uint32_t shapeTransform; + uint32_t transform; // The static transform node when not taking into account rigging/skinning + uint32_t dynamicTransform; }; /// The runtime model format. @@ -316,8 +329,9 @@ public: std::vector meshes; std::vector materials; - std::vector shapeTransforms; + std::vector deformers; + std::vector transforms; std::vector joints; QHash jointIndices; ///< 1-based, so as to more easily detect missing indices bool hasSkeletonJoints; From c765e1af39c4c8d21061e16184d33e2774895d71 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 13 Sep 2019 14:17:12 -0700 Subject: [PATCH 006/121] Add dynamicTransforms to hfm::Model --- libraries/hfm/src/hfm/HFM.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 959fc0dc28..e718a6d4ae 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -332,6 +332,8 @@ public: std::vector deformers; std::vector transforms; + std::vector dynamicTransforms; + std::vector joints; QHash jointIndices; ///< 1-based, so as to more easily detect missing indices bool hasSkeletonJoints; From b14593202ce9ba551a99425dc8e0f7b1c0ed768b Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 13 Sep 2019 14:23:15 -0700 Subject: [PATCH 007/121] Give hfm::Shape keys special undefined value --- libraries/hfm/src/hfm/HFM.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index e718a6d4ae..7a0516c04a 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -302,17 +302,19 @@ class DynamicTransform { std::vector deformers; std::vector clusters; // affect the deformer of the same index std::vector blendshapes; - // There is also the modelTransform, which for now is left in hfm::Mesh + // There are also the meshExtents and modelTransform, which for now are left in hfm::Mesh }; // The lightweight model part description. class Shape { public: - uint32_t mesh; - uint32_t meshPart; - uint32_t material; - uint32_t transform; // The static transform node when not taking into account rigging/skinning - uint32_t dynamicTransform; + const static uint32_t UNDEFINED_KEY { (uint32_t)-1 }; + + uint32_t mesh { UNDEFINED_KEY }; + uint32_t meshPart { UNDEFINED_KEY }; + uint32_t material { UNDEFINED_KEY }; + uint32_t transform { UNDEFINED_KEY }; // The static transform node when not taking into account rigging/skinning + uint32_t dynamicTransform { UNDEFINED_KEY }; }; /// The runtime model format. From a166b4121673d21695afe1d499cb2d608c1a1e75 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 13 Sep 2019 15:39:29 -0700 Subject: [PATCH 008/121] Fix build warnings/errors --- interface/src/raypick/CollisionPick.cpp | 4 +-- libraries/baking/src/FBXBaker.cpp | 6 ++--- libraries/baking/src/FBXBaker.h | 2 +- libraries/baking/src/OBJBaker.cpp | 25 +++++++++++-------- libraries/baking/src/OBJBaker.h | 2 +- .../src/RenderableModelEntityItem.cpp | 4 +-- .../RenderableParticleEffectEntityItem.cpp | 8 +++--- libraries/fbx/src/FBXSerializer.cpp | 6 ++--- libraries/fbx/src/FBXSerializer_Mesh.cpp | 4 +-- libraries/fbx/src/OBJSerializer.cpp | 4 +-- libraries/render-utils/src/Model.cpp | 2 +- 11 files changed, 35 insertions(+), 32 deletions(-) diff --git a/interface/src/raypick/CollisionPick.cpp b/interface/src/raypick/CollisionPick.cpp index 2602bdb0a0..fe943d5b84 100644 --- a/interface/src/raypick/CollisionPick.cpp +++ b/interface/src/raypick/CollisionPick.cpp @@ -248,9 +248,9 @@ void CollisionPick::computeShapeInfo(const CollisionRegion& pick, ShapeInfo& sha shapeInfo.setParams(type, dimensions, resource->getURL().toString()); } else if (type >= SHAPE_TYPE_SIMPLE_HULL && type <= SHAPE_TYPE_STATIC_MESH) { const HFMModel& hfmModel = resource->getHFMModel(); - int numHFMMeshes = hfmModel.meshes.size(); + uint32_t numHFMMeshes = (uint32_t)hfmModel.meshes.size(); int totalNumVertices = 0; - for (int i = 0; i < numHFMMeshes; i++) { + for (uint32_t i = 0; i < numHFMMeshes; i++) { const HFMMesh& mesh = hfmModel.meshes.at(i); totalNumVertices += mesh.vertices.size(); } diff --git a/libraries/baking/src/FBXBaker.cpp b/libraries/baking/src/FBXBaker.cpp index eb02ac2241..7f508dfe15 100644 --- a/libraries/baking/src/FBXBaker.cpp +++ b/libraries/baking/src/FBXBaker.cpp @@ -90,11 +90,11 @@ void FBXBaker::replaceMeshNodeWithDraco(FBXNode& meshNode, const QByteArray& dra } } -void FBXBaker::rewriteAndBakeSceneModels(const QVector& meshes, const std::vector& dracoMeshes, const std::vector>& dracoMaterialLists) { +void FBXBaker::rewriteAndBakeSceneModels(const std::vector& meshes, const std::vector& dracoMeshes, const std::vector>& dracoMaterialLists) { std::vector meshIndexToRuntimeOrder; - auto meshCount = (int)meshes.size(); + auto meshCount = (uint32_t)meshes.size(); meshIndexToRuntimeOrder.resize(meshCount); - for (int i = 0; i < meshCount; i++) { + for (uint32_t i = 0; i < meshCount; i++) { meshIndexToRuntimeOrder[meshes[i].meshIndex] = i; } diff --git a/libraries/baking/src/FBXBaker.h b/libraries/baking/src/FBXBaker.h index a528de512d..6ac05e36e9 100644 --- a/libraries/baking/src/FBXBaker.h +++ b/libraries/baking/src/FBXBaker.h @@ -33,7 +33,7 @@ protected: virtual void bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector& dracoMeshes, const std::vector>& dracoMaterialLists) override; private: - void rewriteAndBakeSceneModels(const QVector& meshes, const std::vector& dracoMeshes, const std::vector>& dracoMaterialLists); + void rewriteAndBakeSceneModels(const std::vector& meshes, const std::vector& dracoMeshes, const std::vector>& dracoMaterialLists); void replaceMeshNodeWithDraco(FBXNode& meshNode, const QByteArray& dracoMeshBytes, const std::vector& dracoMaterialList); }; diff --git a/libraries/baking/src/OBJBaker.cpp b/libraries/baking/src/OBJBaker.cpp index a2d0ab1094..4adaa01845 100644 --- a/libraries/baking/src/OBJBaker.cpp +++ b/libraries/baking/src/OBJBaker.cpp @@ -106,11 +106,16 @@ void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& h materialNode.name = MATERIAL_NODE_NAME; if (hfmModel->materials.size() == 1) { // case when no material information is provided, OBJSerializer considers it as a single default material - for (auto& materialID : hfmModel->materials.keys()) { - setMaterialNodeProperties(materialNode, materialID, hfmModel); + for (auto& material : hfmModel->materials) { + setMaterialNodeProperties(materialNode, material.name, material, hfmModel); } } else { - setMaterialNodeProperties(materialNode, meshPart.materialID, hfmModel); + for (auto& material : hfmModel->materials) { + if (material.name == meshPart.materialID) { + setMaterialNodeProperties(materialNode, meshPart.materialID, material, hfmModel); + break; + } + } } objectNode.children.append(materialNode); @@ -153,12 +158,10 @@ void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& h } // Set properties for material nodes -void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material, const hfm::Model::Pointer& hfmModel) { +void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, const QString& materialName, const hfm::Material& material, const hfm::Model::Pointer& hfmModel) { auto materialID = nextNodeID(); _materialIDs.push_back(materialID); - materialNode.properties = { materialID, material, MESH }; - - HFMMaterial currentMaterial = hfmModel->materials[material]; + materialNode.properties = { materialID, materialName, MESH }; // Setting the hierarchy: Material -> Properties70 -> P -> Properties FBXNode properties70Node; @@ -170,7 +173,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material pNodeDiffuseColor.name = P_NODE_NAME; pNodeDiffuseColor.properties.append({ "DiffuseColor", "Color", "", "A", - currentMaterial.diffuseColor[0], currentMaterial.diffuseColor[1], currentMaterial.diffuseColor[2] + material.diffuseColor[0], material.diffuseColor[1], material.diffuseColor[2] }); } properties70Node.children.append(pNodeDiffuseColor); @@ -181,7 +184,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material pNodeSpecularColor.name = P_NODE_NAME; pNodeSpecularColor.properties.append({ "SpecularColor", "Color", "", "A", - currentMaterial.specularColor[0], currentMaterial.specularColor[1], currentMaterial.specularColor[2] + material.specularColor[0], material.specularColor[1], material.specularColor[2] }); } properties70Node.children.append(pNodeSpecularColor); @@ -192,7 +195,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material pNodeShininess.name = P_NODE_NAME; pNodeShininess.properties.append({ "Shininess", "Number", "", "A", - currentMaterial.shininess + material.shininess }); } properties70Node.children.append(pNodeShininess); @@ -203,7 +206,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material pNodeOpacity.name = P_NODE_NAME; pNodeOpacity.properties.append({ "Opacity", "Number", "", "A", - currentMaterial.opacity + material.opacity }); } properties70Node.children.append(pNodeOpacity); diff --git a/libraries/baking/src/OBJBaker.h b/libraries/baking/src/OBJBaker.h index 9d0fe53e3c..044c51d0cc 100644 --- a/libraries/baking/src/OBJBaker.h +++ b/libraries/baking/src/OBJBaker.h @@ -28,7 +28,7 @@ protected: private: void createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh); - void setMaterialNodeProperties(FBXNode& materialNode, QString material, const hfm::Model::Pointer& hfmModel); + void setMaterialNodeProperties(FBXNode& materialNode, const QString& materialName, const hfm::Material& material, const hfm::Model::Pointer& hfmModel); NodeID nextNodeID() { return _nodeID++; } NodeID _nodeID { 0 }; diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index 6314cc8ce4..7c9e8e5f13 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -473,11 +473,11 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { // compute meshPart local transforms QVector localTransforms; const HFMModel& hfmModel = model->getHFMModel(); - int numHFMMeshes = hfmModel.meshes.size(); + uint32_t numHFMMeshes = (uint32_t)hfmModel.meshes.size(); int totalNumVertices = 0; glm::vec3 dimensions = getScaledDimensions(); glm::mat4 invRegistraionOffset = glm::translate(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT)); - for (int i = 0; i < numHFMMeshes; i++) { + for (uint32_t i = 0; i < numHFMMeshes; i++) { const HFMMesh& mesh = hfmModel.meshes.at(i); if (mesh.clusters.size() > 0) { const HFMCluster& cluster = mesh.clusters.at(0); diff --git a/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp b/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp index e3528e2291..a97cc7c84c 100644 --- a/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp @@ -490,7 +490,7 @@ void ParticleEffectEntityRenderer::fetchGeometryResource() { void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel) { PROFILE_RANGE(render, __FUNCTION__); - int numberOfMeshes = hfmModel.meshes.size(); + uint32_t numberOfMeshes = (uint32_t)hfmModel.meshes.size(); _hasComputedTriangles = true; _triangleInfo.triangles.clear(); @@ -500,11 +500,11 @@ void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel) float minArea = FLT_MAX; AABox bounds; - for (int i = 0; i < numberOfMeshes; i++) { + for (uint32_t i = 0; i < numberOfMeshes; i++) { const HFMMesh& mesh = hfmModel.meshes.at(i); - const int numberOfParts = mesh.parts.size(); - for (int j = 0; j < numberOfParts; j++) { + const uint32_t numberOfParts = (uint32_t)mesh.parts.size(); + for (uint32_t j = 0; j < numberOfParts; j++) { const HFMMeshPart& part = mesh.parts.at(j); const int INDICES_PER_TRIANGLE = 3; diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 81444f8c6b..f3c620c929 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1288,7 +1288,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const const FBXModel& fbxModel = fbxModels[modelID]; HFMJoint joint; joint.parentIndex = fbxModel.parentIndex; - int jointIndex = hfmModel.joints.size(); + uint32_t jointIndex = (uint32_t)hfmModel.joints.size(); joint.translation = fbxModel.translation; // these are usually in centimeters joint.preTransform = fbxModel.preTransform; @@ -1613,7 +1613,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // transform cluster vertices to joint-frame and save for later glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform; ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex); - foreach (const glm::vec3& vertex, extracted.mesh.vertices) { + for (const glm::vec3& vertex : extracted.mesh.vertices) { const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertex); points.push_back(extractTranslation(vertexTransform)); } @@ -1628,7 +1628,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } hfmModel.meshes.push_back(extracted.mesh); - int meshIndex = hfmModel.meshes.size() - 1; + uint32_t meshIndex = (uint32_t)hfmModel.meshes.size() - 1; meshIDsToMeshIndices.insert(it.key(), meshIndex); } diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index 802db4b428..479e7acfc9 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -500,7 +500,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me data.extracted.partMaterialTextures.append(materialTexture); } - partIndexPlusOne = data.extracted.mesh.parts.size(); + partIndexPlusOne = (int)data.extracted.mesh.parts.size(); } // give the mesh part this index @@ -535,7 +535,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me if (partIndex == 0) { data.extracted.partMaterialTextures.append(materialTexture); data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); - partIndex = data.extracted.mesh.parts.size(); + partIndex = (int)data.extracted.mesh.parts.size(); } HFMMeshPart& part = data.extracted.mesh.parts[partIndex - 1]; diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index d6be066674..e1fc85ca2a 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -752,11 +752,11 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V } // clean up old mesh parts. - int unmodifiedMeshPartCount = mesh.parts.size(); + auto unmodifiedMeshPartCount = (uint32_t)mesh.parts.size(); mesh.parts.clear(); mesh.parts = hfmMeshParts; - for (int i = 0, meshPartCount = 0; i < unmodifiedMeshPartCount; i++, meshPartCount++) { + for (uint32_t i = 0, meshPartCount = 0; i < unmodifiedMeshPartCount; i++, meshPartCount++) { FaceGroup faceGroup = faceGroups[meshPartCount]; // Now that each mesh has been created with its own unique material mappings, fill them with data (vertex data is duplicated, face data is not). diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index e4fefacb96..aa3708fb1e 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -804,7 +804,7 @@ void Model::calculateTriangleSets(const HFMModel& hfmModel) { for (uint32_t i = 0; i < numberOfMeshes; i++) { const HFMMesh& mesh = hfmModel.meshes.at(i); - const uint32_t numberOfParts = mesh.parts.size(); + const uint32_t numberOfParts = (uint32_t)mesh.parts.size(); auto& meshTriangleSets = _modelSpaceMeshTriangleSets[i]; meshTriangleSets.resize(numberOfParts); From 5837053f50c18109ee06e16e6fac148fe6c75848 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 13 Sep 2019 16:56:30 -0700 Subject: [PATCH 009/121] Fix miscellaneous build errors/warnings --- assignment-client/src/avatars/ScriptableAvatar.cpp | 4 ++-- libraries/fbx/src/GLTFSerializer.cpp | 2 +- tools/vhacd-util/src/VHACDUtil.cpp | 14 +++++++------- tools/vhacd-util/src/VHACDUtilApp.cpp | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/assignment-client/src/avatars/ScriptableAvatar.cpp b/assignment-client/src/avatars/ScriptableAvatar.cpp index 044ab86942..383f583327 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.cpp +++ b/assignment-client/src/avatars/ScriptableAvatar.cpp @@ -144,10 +144,10 @@ void ScriptableAvatar::update(float deltatime) { } _animationDetails.currentFrame = currentFrame; - const QVector& modelJoints = _bind->getHFMModel().joints; + const std::vector& modelJoints = _bind->getHFMModel().joints; QStringList animationJointNames = _animation->getJointNames(); - const int nJoints = modelJoints.size(); + const auto nJoints = (int)modelJoints.size(); if (_jointData.size() != nJoints) { _jointData.resize(nJoints); } diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 4c4050c935..4f1d871158 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1613,7 +1613,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& hfmModel.meshExtents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); hfmModel.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); - mesh.meshIndex = hfmModel.meshes.size(); + mesh.meshIndex = (int)hfmModel.meshes.size(); } ++nodecount; } diff --git a/tools/vhacd-util/src/VHACDUtil.cpp b/tools/vhacd-util/src/VHACDUtil.cpp index a5ad5bc891..3410d35e6a 100644 --- a/tools/vhacd-util/src/VHACDUtil.cpp +++ b/tools/vhacd-util/src/VHACDUtil.cpp @@ -154,7 +154,7 @@ void vhacd::VHACDUtil::fattenMesh(const HFMMesh& mesh, const glm::mat4& modelOff newMeshPart.triangleIndices << index0 << index3 << index1; newMeshPart.triangleIndices << index1 << index3 << index2; newMeshPart.triangleIndices << index2 << index3 << index0; - result.parts.append(newMeshPart); + result.parts.push_back(newMeshPart); } } @@ -259,8 +259,8 @@ void vhacd::VHACDUtil::getConvexResults(VHACD::IVHACD* convexifier, HFMMesh& res VHACD::IVHACD::ConvexHull hull; convexifier->GetConvexHull(j, hull); - resultMesh.parts.append(HFMMeshPart()); - HFMMeshPart& resultMeshPart = resultMesh.parts.last(); + resultMesh.parts.push_back(HFMMeshPart()); + HFMMeshPart& resultMeshPart = resultMesh.parts.back(); int hullIndexStart = resultMesh.vertices.size(); resultMesh.vertices.reserve(hullIndexStart + hull.m_nPoints); @@ -300,8 +300,8 @@ bool vhacd::VHACDUtil::computeVHACD(HFMModel& hfmModel, } // count the mesh-parts - int numParts = 0; - foreach (const HFMMesh& mesh, hfmModel.meshes) { + size_t numParts = 0; + for (const HFMMesh& mesh : hfmModel.meshes) { numParts += mesh.parts.size(); } if (_verbose) { @@ -311,8 +311,8 @@ bool vhacd::VHACDUtil::computeVHACD(HFMModel& hfmModel, VHACD::IVHACD * convexifier = VHACD::CreateVHACD(); result.meshExtents.reset(); - result.meshes.append(HFMMesh()); - HFMMesh &resultMesh = result.meshes.last(); + result.meshes.push_back(HFMMesh()); + HFMMesh &resultMesh = result.meshes.back(); const uint32_t POINT_STRIDE = 3; const uint32_t TRIANGLE_STRIDE = 3; diff --git a/tools/vhacd-util/src/VHACDUtilApp.cpp b/tools/vhacd-util/src/VHACDUtilApp.cpp index 3d675f8baf..61a6b38181 100644 --- a/tools/vhacd-util/src/VHACDUtilApp.cpp +++ b/tools/vhacd-util/src/VHACDUtilApp.cpp @@ -387,7 +387,7 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) : } if (verbose) { - int totalHulls = result.meshes[0].parts.size(); + auto totalHulls = result.meshes[0].parts.size(); qDebug() << "output file =" << outputFilename; qDebug() << "vertices =" << totalVertices; qDebug() << "triangles =" << totalTriangles; @@ -402,7 +402,7 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) : HFMMesh result; // count the mesh-parts - unsigned int meshCount = 0; + size_t meshCount = 0; foreach (const HFMMesh& mesh, fbx.meshes) { meshCount += mesh.parts.size(); } @@ -412,7 +412,7 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) : vUtil.fattenMesh(mesh, fbx.offset, result); } - newFbx.meshes.append(result); + newFbx.meshes.push_back(result); writeOBJ(outputFilename, newFbx, outputCentimeters); } } From 46616b4efddf07bdbe0c7e6e43376f410a58a2d7 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 16 Sep 2019 09:55:25 -0700 Subject: [PATCH 010/121] Fix more integer conversion warnings --- interface/src/ModelPropertiesDialog.cpp | 2 +- libraries/fbx/src/FST.cpp | 2 +- libraries/hfm/src/hfm/HFM.cpp | 4 ++-- libraries/model-baker/src/model-baker/Baker.cpp | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/interface/src/ModelPropertiesDialog.cpp b/interface/src/ModelPropertiesDialog.cpp index d67341990d..bf7fd26b08 100644 --- a/interface/src/ModelPropertiesDialog.cpp +++ b/interface/src/ModelPropertiesDialog.cpp @@ -80,7 +80,7 @@ QVariantHash ModelPropertiesDialog::getMapping() const { // update the joint indices QVariantHash jointIndices; - for (int i = 0; i < _hfmModel.joints.size(); i++) { + for (size_t i = 0; i < _hfmModel.joints.size(); i++) { jointIndices.insert(_hfmModel.joints.at(i).name, QString::number(i)); } mapping.insert(JOINT_INDEX_FIELD, jointIndices); diff --git a/libraries/fbx/src/FST.cpp b/libraries/fbx/src/FST.cpp index b6f109c217..5f5b7cf637 100644 --- a/libraries/fbx/src/FST.cpp +++ b/libraries/fbx/src/FST.cpp @@ -77,7 +77,7 @@ FST* FST::createFSTFromModel(const QString& fstPath, const QString& modelFilePat mapping.insert(JOINT_FIELD, joints); QVariantHash jointIndices; - for (int i = 0; i < hfmModel.joints.size(); i++) { + for (size_t i = 0; i < (size_t)hfmModel.joints.size(); i++) { jointIndices.insert(hfmModel.joints.at(i).name, QString::number(i)); } mapping.insert(JOINT_INDEX_FIELD, jointIndices); diff --git a/libraries/hfm/src/hfm/HFM.cpp b/libraries/hfm/src/hfm/HFM.cpp index b297db4bcb..ae68c15045 100644 --- a/libraries/hfm/src/hfm/HFM.cpp +++ b/libraries/hfm/src/hfm/HFM.cpp @@ -166,11 +166,11 @@ void HFMModel::computeKdops() { glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3), glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3) }; - if (joints.size() != (int)shapeVertices.size()) { + if (joints.size() != shapeVertices.size()) { return; } // now that all joints have been scanned compute a k-Dop bounding volume of mesh - for (int i = 0; i < joints.size(); ++i) { + for (size_t i = 0; i < joints.size(); ++i) { HFMJoint& joint = joints[i]; // NOTE: points are in joint-frame diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index 50221c4481..0c6aac6f22 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -37,7 +37,7 @@ namespace baker { output.edit2() = hfmModelIn->meshIndicesToModelNames; auto& blendshapesPerMesh = output.edit3(); blendshapesPerMesh.reserve(hfmModelIn->meshes.size()); - for (int i = 0; i < hfmModelIn->meshes.size(); i++) { + for (size_t i = 0; i < hfmModelIn->meshes.size(); i++) { blendshapesPerMesh.push_back(hfmModelIn->meshes[i].blendshapes.toStdVector()); } output.edit4() = hfmModelIn->joints; From 517bef63a9ae577983b0e9b5c8f0196b37d95704 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 16 Sep 2019 11:09:34 -0700 Subject: [PATCH 011/121] Make fields for TransformNode, Deformer, DynamicTransform public --- libraries/hfm/src/hfm/HFM.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index a015330431..31d8eda877 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -288,17 +288,20 @@ public: }; class TransformNode { +public: uint32_t parent { 0 }; Transform transform; }; // Formerly contained in hfm::Mesh class Deformer { +public: std::vector indices; std::vector weights; }; class DynamicTransform { +public: std::vector deformers; std::vector clusters; // affect the deformer of the same index std::vector blendshapes; From ac06234c3de8e3943f44b1e8358a332ca4a67d7f Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 16 Sep 2019 16:06:43 -0700 Subject: [PATCH 012/121] Make hfm::Deformer more closely resemble original model data. Leave weight-based blendshape packing to preparations step. --- libraries/hfm/src/hfm/HFM.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 31d8eda877..8e0944db43 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -296,8 +296,8 @@ public: // Formerly contained in hfm::Mesh class Deformer { public: - std::vector indices; - std::vector weights; + std::vector indices; + std::vector weights; }; class DynamicTransform { From cf8e584ced88c1410c5afc4695b0da120180e388 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 17 Sep 2019 14:23:11 -0700 Subject: [PATCH 013/121] Make the cluster jointIndex unsigned --- libraries/hfm/src/hfm/HFM.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 8e0944db43..8d6c0e79dc 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -122,8 +122,7 @@ public: /// A single binding to a joint. class Cluster { public: - - int jointIndex; + uint32_t jointIndex; glm::mat4 inverseBindMatrix; Transform inverseBindTransform; }; From de8223fee808c2ae1efec5c93b1da945b10d14ed Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 16 Sep 2019 17:44:43 -0700 Subject: [PATCH 014/121] Change BuildGraphicsMeshTask to use new deformers --- .../model-baker/src/model-baker/Baker.cpp | 10 +- .../src/model-baker/BuildGraphicsMeshTask.cpp | 134 ++++++++++++++++-- .../src/model-baker/BuildGraphicsMeshTask.h | 2 +- 3 files changed, 129 insertions(+), 17 deletions(-) diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index 3dab7f7241..1a68d3508d 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -27,7 +27,7 @@ namespace baker { class GetModelPartsTask { public: using Input = hfm::Model::Pointer; - using Output = VaryingSet5, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector>; + using Output = VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, std::vector>; using JobModel = Job::ModelIO; void run(const BakeContextPointer& context, const Input& input, Output& output) { @@ -41,6 +41,9 @@ namespace baker { blendshapesPerMesh.push_back(hfmModelIn->meshes[i].blendshapes.toStdVector()); } output.edit4() = hfmModelIn->joints; + output.edit5() = hfmModelIn->shapes; + output.edit6() = hfmModelIn->dynamicTransforms; + output.edit7() = hfmModelIn->deformers; } }; @@ -134,6 +137,9 @@ namespace baker { const auto meshIndicesToModelNames = modelPartsIn.getN(2); const auto blendshapesPerMeshIn = modelPartsIn.getN(3); const auto jointsIn = modelPartsIn.getN(4); + const auto shapesIn = modelPartsIn.getN(5); + const auto dynamicTransformsIn = modelPartsIn.getN(6); + const auto deformersIn = modelPartsIn.getN(7); // Calculate normals and tangents for meshes and blendshapes if they do not exist // Note: Normals are never calculated here for OBJ models. OBJ files optionally define normals on a per-face basis, so for consistency normals are calculated beforehand in OBJSerializer. @@ -146,7 +152,7 @@ namespace baker { const auto tangentsPerBlendshapePerMesh = model.addJob("CalculateBlendshapeTangents", calculateBlendshapeTangentsInputs); // Build the graphics::MeshPointer for each hfm::Mesh - const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh).asVarying(); + const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, dynamicTransformsIn, deformersIn).asVarying(); const auto graphicsMeshes = model.addJob("BuildGraphicsMesh", buildGraphicsMeshInputs); // Prepare joint information diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index 2467da7656..54e0e0ee2e 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -27,7 +27,84 @@ glm::vec3 normalizeDirForPacking(const glm::vec3& dir) { return dir; } -void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn) { +class ReweightedDeformers { +public: + std::vector indices; + std::vector weights; + bool trimmedToMatch { false }; +}; + +ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const hfm::DynamicTransform* dynamicTransform, const std::vector deformers, const uint16_t weightsPerVertex) { + size_t numClusterIndices = numMeshVertices * weightsPerVertex; + ReweightedDeformers reweightedDeformers; + // TODO: Consider having a rootCluster property in the DynamicTransform rather than appending the root to the end of the cluster list. + reweightedDeformers.indices.resize(numClusterIndices, deformers.size() - 1); + reweightedDeformers.weights.resize(numClusterIndices, 0); + + std::vector weightAccumulators; + weightAccumulators.resize(numClusterIndices, 0.0f); + for (size_t i = 0; i < deformers.size(); ++i) { + const hfm::Deformer& deformer = *deformers[i]; + const hfm::Cluster& cluster = dynamicTransform->clusters[i]; + + if (deformer.indices.size() != deformer.weights.size()) { + reweightedDeformers.trimmedToMatch = true; + } + size_t numIndicesOrWeights = std::min(deformer.indices.size(), deformer.weights.size()); + for (size_t j = 0; j < numIndicesOrWeights; ++j) { + uint32_t index = deformer.indices[j]; + float weight = deformer.weights[j]; + + // look for an unused slot in the weights vector + uint32_t weightIndex = index * weightsPerVertex; + uint32_t lowestIndex = -1; + float lowestWeight = FLT_MAX; + uint16_t k = 0; + for (; k < weightsPerVertex; k++) { + if (weightAccumulators[weightIndex + k] == 0.0f) { + reweightedDeformers.indices[weightIndex + k] = i; + weightAccumulators[weightIndex + k] = weight; + break; + } + if (weightAccumulators[weightIndex + k] < lowestWeight) { + lowestIndex = k; + lowestWeight = weightAccumulators[weightIndex + k]; + } + } + if (k == weightsPerVertex && weight > lowestWeight) { + // no space for an additional weight; we must replace the lowest + weightAccumulators[weightIndex + lowestIndex] = weight; + reweightedDeformers.indices[weightIndex + lowestIndex] = i; + } + } + } + + // now that we've accumulated the most relevant weights for each vertex + // normalize and compress to 16-bits + for (size_t i = 0; i < numMeshVertices; ++i) { + size_t j = i * weightsPerVertex; + + // normalize weights into uint16_t + float totalWeight = 0.0f; + for (size_t k = j; k < j + weightsPerVertex; ++k) { + totalWeight += weightAccumulators[k]; + } + + const float ALMOST_HALF = 0.499f; + if (totalWeight > 0.0f) { + float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; + for (size_t k = j; k < j + weightsPerVertex; ++k) { + reweightedDeformers.weights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF); + } + } else { + reweightedDeformers.weights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); + } + } + + return reweightedDeformers; +} + +void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn, const hfm::DynamicTransform* dynamicTransform, const std::vector meshDeformers) { auto graphicsMesh = std::make_shared(); // Fill tangents with a dummy value to force tangents to be present if there are normals @@ -86,13 +163,19 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics // Support for 4 skinning clusters: // 4 Indices are uint8 ideally, uint16 if more than 256. - const auto clusterIndiceElement = (hfmMesh.clusters.size() < UINT8_MAX ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW)); + const auto clusterIndiceElement = ((meshDeformers.size() < (size_t)UINT8_MAX) ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW)); // 4 Weights are normalized 16bits const auto clusterWeightElement = gpu::Element(gpu::VEC4, gpu::NUINT16, gpu::XYZW); + // Calculate a more condensed view of all the deformer weights + const uint16_t NUM_CLUSTERS_PER_VERT = 4; + ReweightedDeformers reweightedDeformers = getReweightedDeformers(hfmMesh.vertices.size(), dynamicTransform, meshDeformers, NUM_CLUSTERS_PER_VERT); // Cluster indices and weights must be the same sizes - const int NUM_CLUSTERS_PER_VERT = 4; - const int numVertClusters = (hfmMesh.clusterIndices.size() == hfmMesh.clusterWeights.size() ? hfmMesh.clusterIndices.size() / NUM_CLUSTERS_PER_VERT : 0); + if (reweightedDeformers.trimmedToMatch) { + HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- The number of indices and weights for a blendshape had different sizes and have been trimmed to match"); + } + // Record cluster sizes + const int numVertClusters = reweightedDeformers.indices.size() / NUM_CLUSTERS_PER_VERT; const int clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize(); const int clusterWeightsSize = numVertClusters * clusterWeightElement.getSize(); @@ -181,22 +264,22 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics // Clusters data if (clusterIndicesSize > 0) { - if (hfmMesh.clusters.size() < UINT8_MAX) { + if (meshDeformers.size() < UINT8_MAX) { // yay! we can fit the clusterIndices within 8-bits - int32_t numIndices = hfmMesh.clusterIndices.size(); - QVector clusterIndices; - clusterIndices.resize(numIndices); + int32_t numIndices = reweightedDeformers.indices.size(); + std::vector packedDeformerIndices; + packedDeformerIndices.resize(numIndices); for (int32_t i = 0; i < numIndices; ++i) { assert(hfmMesh.clusterIndices[i] <= UINT8_MAX); - clusterIndices[i] = (uint8_t)(hfmMesh.clusterIndices[i]); + packedDeformerIndices[i] = (uint8_t)(reweightedDeformers.indices[i]); } - vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) clusterIndices.constData()); + vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) packedDeformerIndices.data()); } else { - vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) hfmMesh.clusterIndices.constData()); + vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) reweightedDeformers.indices.data()); } } if (clusterWeightsSize > 0) { - vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) hfmMesh.clusterWeights.constData()); + vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) reweightedDeformers.weights.data()); } @@ -377,6 +460,18 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const const auto& meshIndicesToModelNames = input.get2(); const auto& normalsPerMesh = input.get3(); const auto& tangentsPerMesh = input.get4(); + const auto& shapes = input.get5(); + const auto& dynamicTransforms = input.get6(); + const auto& deformers = input.get7(); + + // Currently, there is only (at most) one dynamicTransform per mesh + // An undefined shape.dynamicTransform has the value hfm::Shape::UNDEFINED_KEY + std::vector dynamicTransformPerMesh; + dynamicTransformPerMesh.resize(meshes.size(), hfm::Shape::UNDEFINED_KEY); + for (const auto& shape : shapes) { + uint32_t dynamicTransformIndex = shape.dynamicTransform; + dynamicTransformPerMesh[shape.mesh] = dynamicTransformIndex; + } auto& graphicsMeshes = output; @@ -384,9 +479,20 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const for (int i = 0; i < n; i++) { graphicsMeshes.emplace_back(); auto& graphicsMesh = graphicsMeshes[i]; - + + auto dynamicTransformIndex = dynamicTransformPerMesh[i]; + const hfm::DynamicTransform* dynamicTransform = nullptr; + std::vector meshDeformers; + if (dynamicTransformIndex != hfm::Shape::UNDEFINED_KEY) { + dynamicTransform = &dynamicTransforms[dynamicTransformIndex]; + for (const auto& deformerIndex : dynamicTransform->deformers) { + const auto& deformer = deformers[deformerIndex]; + meshDeformers.push_back(&deformer); + } + } + // Try to create the graphics::Mesh - buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i)); + buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i), dynamicTransform, meshDeformers); // Choose a name for the mesh if (graphicsMesh) { diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h index bb4136c086..be1e4350be 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h @@ -20,7 +20,7 @@ class BuildGraphicsMeshTask { public: - using Input = baker::VaryingSet5, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh>; + using Input = baker::VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector, std::vector, std::vector>; using Output = std::vector; using JobModel = baker::Job::ModelIO; From 56896e08669de4f065723593d20788853d80c0f6 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 19 Sep 2019 09:26:28 -0700 Subject: [PATCH 015/121] Re-name error message to mention deformers, not blendshapes --- libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index 54e0e0ee2e..543c741588 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -172,7 +172,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics ReweightedDeformers reweightedDeformers = getReweightedDeformers(hfmMesh.vertices.size(), dynamicTransform, meshDeformers, NUM_CLUSTERS_PER_VERT); // Cluster indices and weights must be the same sizes if (reweightedDeformers.trimmedToMatch) { - HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- The number of indices and weights for a blendshape had different sizes and have been trimmed to match"); + HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- The number of indices and weights for a deformer had different sizes and have been trimmed to match"); } // Record cluster sizes const int numVertClusters = reweightedDeformers.indices.size() / NUM_CLUSTERS_PER_VERT; From 99386565b070c9b75e5fb3ba9fdef34bc4edd4e4 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 19 Sep 2019 11:15:38 -0700 Subject: [PATCH 016/121] Fix build warnings/errors --- libraries/hfm/src/hfm/HFM.cpp | 2 +- libraries/hfm/src/hfm/HFM.h | 6 ++-- .../src/model-baker/BuildGraphicsMeshTask.cpp | 36 +++++++++---------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.cpp b/libraries/hfm/src/hfm/HFM.cpp index ae68c15045..f68af2b1ce 100644 --- a/libraries/hfm/src/hfm/HFM.cpp +++ b/libraries/hfm/src/hfm/HFM.cpp @@ -175,7 +175,7 @@ void HFMModel::computeKdops() { // NOTE: points are in joint-frame ShapeVertices& points = shapeVertices.at(i); - glm::quat rotOffset = jointRotationOffsets.contains(i) ? glm::inverse(jointRotationOffsets[i]) : quat(); + glm::quat rotOffset = jointRotationOffsets.contains((int)i) ? glm::inverse(jointRotationOffsets[(int)i]) : quat(); if (points.size() > 0) { // compute average point glm::vec3 avgPoint = glm::vec3(0.0f); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 8d6c0e79dc..d13cf3e2d0 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -66,6 +66,8 @@ static const int DRACO_ATTRIBUTE_ORIGINAL_INDEX = DRACO_BEGIN_CUSTOM_HIFI_ATTRIB // High Fidelity Model namespace namespace hfm { +static const uint32_t UNDEFINED_KEY = (uint32_t)-1; + /// A single blendshape. class Blendshape { public: @@ -301,7 +303,7 @@ public: class DynamicTransform { public: - std::vector deformers; + std::vector deformers; std::vector clusters; // affect the deformer of the same index std::vector blendshapes; // There are also the meshExtents and modelTransform, which for now are left in hfm::Mesh @@ -310,8 +312,6 @@ public: // The lightweight model part description. class Shape { public: - const static uint32_t UNDEFINED_KEY { (uint32_t)-1 }; - uint32_t mesh { UNDEFINED_KEY }; uint32_t meshPart { UNDEFINED_KEY }; uint32_t material { UNDEFINED_KEY }; diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index 543c741588..a9a544c34a 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -38,12 +38,12 @@ ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const hfm::Dy size_t numClusterIndices = numMeshVertices * weightsPerVertex; ReweightedDeformers reweightedDeformers; // TODO: Consider having a rootCluster property in the DynamicTransform rather than appending the root to the end of the cluster list. - reweightedDeformers.indices.resize(numClusterIndices, deformers.size() - 1); + reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(deformers.size() - 1)); reweightedDeformers.weights.resize(numClusterIndices, 0); std::vector weightAccumulators; weightAccumulators.resize(numClusterIndices, 0.0f); - for (size_t i = 0; i < deformers.size(); ++i) { + for (uint16_t i = 0; i < (uint16_t)deformers.size(); ++i) { const hfm::Deformer& deformer = *deformers[i]; const hfm::Cluster& cluster = dynamicTransform->clusters[i]; @@ -175,19 +175,19 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- The number of indices and weights for a deformer had different sizes and have been trimmed to match"); } // Record cluster sizes - const int numVertClusters = reweightedDeformers.indices.size() / NUM_CLUSTERS_PER_VERT; - const int clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize(); - const int clusterWeightsSize = numVertClusters * clusterWeightElement.getSize(); + const size_t numVertClusters = reweightedDeformers.indices.size() / NUM_CLUSTERS_PER_VERT; + const size_t clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize(); + const size_t clusterWeightsSize = numVertClusters * clusterWeightElement.getSize(); // Decide on where to put what seequencially in a big buffer: - const int positionsOffset = 0; - const int normalsAndTangentsOffset = positionsOffset + positionsSize; - const int colorsOffset = normalsAndTangentsOffset + normalsAndTangentsSize; - const int texCoordsOffset = colorsOffset + colorsSize; - const int texCoords1Offset = texCoordsOffset + texCoordsSize; - const int clusterIndicesOffset = texCoords1Offset + texCoords1Size; - const int clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize; - const int totalVertsSize = clusterWeightsOffset + clusterWeightsSize; + const size_t positionsOffset = 0; + const size_t normalsAndTangentsOffset = positionsOffset + positionsSize; + const size_t colorsOffset = normalsAndTangentsOffset + normalsAndTangentsSize; + const size_t texCoordsOffset = colorsOffset + colorsSize; + const size_t texCoords1Offset = texCoordsOffset + texCoordsSize; + const size_t clusterIndicesOffset = texCoords1Offset + texCoords1Size; + const size_t clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize; + const size_t totalVertsSize = clusterWeightsOffset + clusterWeightsSize; // Copy all vertex data in a single buffer auto vertBuffer = std::make_shared(); @@ -266,7 +266,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics if (clusterIndicesSize > 0) { if (meshDeformers.size() < UINT8_MAX) { // yay! we can fit the clusterIndices within 8-bits - int32_t numIndices = reweightedDeformers.indices.size(); + int32_t numIndices = (int32_t)reweightedDeformers.indices.size(); std::vector packedDeformerIndices; packedDeformerIndices.resize(numIndices); for (int32_t i = 0; i < numIndices; ++i) { @@ -289,7 +289,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics auto vertexBufferStream = std::make_shared(); gpu::BufferPointer attribBuffer; - int totalAttribBufferSize = totalVertsSize; + size_t totalAttribBufferSize = totalVertsSize; gpu::uint8 posChannel = 0; gpu::uint8 tangentChannel = posChannel; gpu::uint8 attribChannel = posChannel; @@ -465,9 +465,9 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const const auto& deformers = input.get7(); // Currently, there is only (at most) one dynamicTransform per mesh - // An undefined shape.dynamicTransform has the value hfm::Shape::UNDEFINED_KEY + // An undefined shape.dynamicTransform has the value hfm::UNDEFINED_KEY std::vector dynamicTransformPerMesh; - dynamicTransformPerMesh.resize(meshes.size(), hfm::Shape::UNDEFINED_KEY); + dynamicTransformPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY); for (const auto& shape : shapes) { uint32_t dynamicTransformIndex = shape.dynamicTransform; dynamicTransformPerMesh[shape.mesh] = dynamicTransformIndex; @@ -483,7 +483,7 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const auto dynamicTransformIndex = dynamicTransformPerMesh[i]; const hfm::DynamicTransform* dynamicTransform = nullptr; std::vector meshDeformers; - if (dynamicTransformIndex != hfm::Shape::UNDEFINED_KEY) { + if (dynamicTransformIndex != hfm::UNDEFINED_KEY) { dynamicTransform = &dynamicTransforms[dynamicTransformIndex]; for (const auto& deformerIndex : dynamicTransform->deformers) { const auto& deformer = deformers[deformerIndex]; From 5edb312346d67877736ffc762eef004045ff9508 Mon Sep 17 00:00:00 2001 From: Brad Davis Date: Mon, 23 Sep 2019 10:22:13 -0700 Subject: [PATCH 017/121] gltf wip --- libraries/fbx/src/GLTFSerializer.cpp | 1950 ++++++++++++++------------ libraries/fbx/src/GLTFSerializer.h | 77 +- libraries/hfm/src/hfm/HFM.h | 6 +- tests-manual/fbx/CMakeLists.txt | 11 + tests-manual/fbx/src/main.cpp | 77 + 5 files changed, 1199 insertions(+), 922 deletions(-) create mode 100644 tests-manual/fbx/CMakeLists.txt create mode 100644 tests-manual/fbx/src/main.cpp diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 4f1d871158..fe63159543 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -25,9 +25,13 @@ #include #include +#include +#include #include #include +#include + #include #include #include @@ -37,30 +41,57 @@ #include "FBXSerializer.h" -#define GLTF_GET_INDICIES(accCount) int index1 = (indices[n + 0] * accCount); int index2 = (indices[n + 1] * accCount); int index3 = (indices[n + 2] * accCount); +#define GLTF_GET_INDICIES(accCount) \ + int index1 = (indices[n + 0] * accCount); \ + int index2 = (indices[n + 1] * accCount); \ + int index3 = (indices[n + 2] * accCount); -#define GLTF_APPEND_ARRAY_1(newArray, oldArray) GLTF_GET_INDICIES(1) \ -newArray.append(oldArray[index1]); \ -newArray.append(oldArray[index2]); \ -newArray.append(oldArray[index3]); +#define GLTF_APPEND_ARRAY_1(newArray, oldArray) \ + GLTF_GET_INDICIES(1) \ + newArray.append(oldArray[index1]); \ + newArray.append(oldArray[index2]); \ + newArray.append(oldArray[index3]); -#define GLTF_APPEND_ARRAY_2(newArray, oldArray) GLTF_GET_INDICIES(2) \ -newArray.append(oldArray[index1]); newArray.append(oldArray[index1 + 1]); \ -newArray.append(oldArray[index2]); newArray.append(oldArray[index2 + 1]); \ -newArray.append(oldArray[index3]); newArray.append(oldArray[index3 + 1]); +#define GLTF_APPEND_ARRAY_2(newArray, oldArray) \ + GLTF_GET_INDICIES(2) \ + newArray.append(oldArray[index1]); \ + newArray.append(oldArray[index1 + 1]); \ + newArray.append(oldArray[index2]); \ + newArray.append(oldArray[index2 + 1]); \ + newArray.append(oldArray[index3]); \ + newArray.append(oldArray[index3 + 1]); -#define GLTF_APPEND_ARRAY_3(newArray, oldArray) GLTF_GET_INDICIES(3) \ -newArray.append(oldArray[index1]); newArray.append(oldArray[index1 + 1]); newArray.append(oldArray[index1 + 2]); \ -newArray.append(oldArray[index2]); newArray.append(oldArray[index2 + 1]); newArray.append(oldArray[index2 + 2]); \ -newArray.append(oldArray[index3]); newArray.append(oldArray[index3 + 1]); newArray.append(oldArray[index3 + 2]); +#define GLTF_APPEND_ARRAY_3(newArray, oldArray) \ + GLTF_GET_INDICIES(3) \ + newArray.append(oldArray[index1]); \ + newArray.append(oldArray[index1 + 1]); \ + newArray.append(oldArray[index1 + 2]); \ + newArray.append(oldArray[index2]); \ + newArray.append(oldArray[index2 + 1]); \ + newArray.append(oldArray[index2 + 2]); \ + newArray.append(oldArray[index3]); \ + newArray.append(oldArray[index3 + 1]); \ + newArray.append(oldArray[index3 + 2]); -#define GLTF_APPEND_ARRAY_4(newArray, oldArray) GLTF_GET_INDICIES(4) \ -newArray.append(oldArray[index1]); newArray.append(oldArray[index1 + 1]); newArray.append(oldArray[index1 + 2]); newArray.append(oldArray[index1 + 3]); \ -newArray.append(oldArray[index2]); newArray.append(oldArray[index2 + 1]); newArray.append(oldArray[index2 + 2]); newArray.append(oldArray[index2 + 3]); \ -newArray.append(oldArray[index3]); newArray.append(oldArray[index3 + 1]); newArray.append(oldArray[index3 + 2]); newArray.append(oldArray[index3 + 3]); +#define GLTF_APPEND_ARRAY_4(newArray, oldArray) \ + GLTF_GET_INDICIES(4) \ + newArray.append(oldArray[index1]); \ + newArray.append(oldArray[index1 + 1]); \ + newArray.append(oldArray[index1 + 2]); \ + newArray.append(oldArray[index1 + 3]); \ + newArray.append(oldArray[index2]); \ + newArray.append(oldArray[index2 + 1]); \ + newArray.append(oldArray[index2 + 2]); \ + newArray.append(oldArray[index2 + 3]); \ + newArray.append(oldArray[index3]); \ + newArray.append(oldArray[index3 + 1]); \ + newArray.append(oldArray[index3 + 2]); \ + newArray.append(oldArray[index3 + 3]); -bool GLTFSerializer::getStringVal(const QJsonObject& object, const QString& fieldname, - QString& value, QMap& defined) { +bool GLTFSerializer::getStringVal(const QJsonObject& object, + const QString& fieldname, + QString& value, + QMap& defined) { bool _defined = (object.contains(fieldname) && object[fieldname].isString()); if (_defined) { value = object[fieldname].toString(); @@ -69,8 +100,10 @@ bool GLTFSerializer::getStringVal(const QJsonObject& object, const QString& fiel return _defined; } -bool GLTFSerializer::getBoolVal(const QJsonObject& object, const QString& fieldname, - bool& value, QMap& defined) { +bool GLTFSerializer::getBoolVal(const QJsonObject& object, + const QString& fieldname, + bool& value, + QMap& defined) { bool _defined = (object.contains(fieldname) && object[fieldname].isBool()); if (_defined) { value = object[fieldname].toBool(); @@ -79,8 +112,7 @@ bool GLTFSerializer::getBoolVal(const QJsonObject& object, const QString& fieldn return _defined; } -bool GLTFSerializer::getIntVal(const QJsonObject& object, const QString& fieldname, - int& value, QMap& defined) { +bool GLTFSerializer::getIntVal(const QJsonObject& object, const QString& fieldname, int& value, QMap& defined) { bool _defined = (object.contains(fieldname) && !object[fieldname].isNull()); if (_defined) { value = object[fieldname].toInt(); @@ -89,8 +121,10 @@ bool GLTFSerializer::getIntVal(const QJsonObject& object, const QString& fieldna return _defined; } -bool GLTFSerializer::getDoubleVal(const QJsonObject& object, const QString& fieldname, - double& value, QMap& defined) { +bool GLTFSerializer::getDoubleVal(const QJsonObject& object, + const QString& fieldname, + double& value, + QMap& defined) { bool _defined = (object.contains(fieldname) && object[fieldname].isDouble()); if (_defined) { value = object[fieldname].toDouble(); @@ -98,8 +132,10 @@ bool GLTFSerializer::getDoubleVal(const QJsonObject& object, const QString& fiel defined.insert(fieldname, _defined); return _defined; } -bool GLTFSerializer::getObjectVal(const QJsonObject& object, const QString& fieldname, - QJsonObject& value, QMap& defined) { +bool GLTFSerializer::getObjectVal(const QJsonObject& object, + const QString& fieldname, + QJsonObject& value, + QMap& defined) { bool _defined = (object.contains(fieldname) && object[fieldname].isObject()); if (_defined) { value = object[fieldname].toObject(); @@ -108,12 +144,14 @@ bool GLTFSerializer::getObjectVal(const QJsonObject& object, const QString& fiel return _defined; } -bool GLTFSerializer::getIntArrayVal(const QJsonObject& object, const QString& fieldname, - QVector& values, QMap& defined) { +bool GLTFSerializer::getIntArrayVal(const QJsonObject& object, + const QString& fieldname, + QVector& values, + QMap& defined) { bool _defined = (object.contains(fieldname) && object[fieldname].isArray()); if (_defined) { QJsonArray arr = object[fieldname].toArray(); - foreach(const QJsonValue & v, arr) { + for (const QJsonValue& v : arr) { if (!v.isNull()) { values.push_back(v.toInt()); } @@ -123,12 +161,14 @@ bool GLTFSerializer::getIntArrayVal(const QJsonObject& object, const QString& fi return _defined; } -bool GLTFSerializer::getDoubleArrayVal(const QJsonObject& object, const QString& fieldname, - QVector& values, QMap& defined) { +bool GLTFSerializer::getDoubleArrayVal(const QJsonObject& object, + const QString& fieldname, + QVector& values, + QMap& defined) { bool _defined = (object.contains(fieldname) && object[fieldname].isArray()); if (_defined) { QJsonArray arr = object[fieldname].toArray(); - foreach(const QJsonValue & v, arr) { + for (const QJsonValue& v : arr) { if (v.isDouble()) { values.push_back(v.toDouble()); } @@ -138,8 +178,10 @@ bool GLTFSerializer::getDoubleArrayVal(const QJsonObject& object, const QString& return _defined; } -bool GLTFSerializer::getObjectArrayVal(const QJsonObject& object, const QString& fieldname, - QJsonArray& objects, QMap& defined) { +bool GLTFSerializer::getObjectArrayVal(const QJsonObject& object, + const QString& fieldname, + QJsonArray& objects, + QMap& defined) { bool _defined = (object.contains(fieldname) && object[fieldname].isArray()); if (_defined) { objects = object[fieldname].toArray(); @@ -149,7 +191,7 @@ bool GLTFSerializer::getObjectArrayVal(const QJsonObject& object, const QString& } hifi::ByteArray GLTFSerializer::setGLBChunks(const hifi::ByteArray& data) { - int byte = 4; + int byte = 4; int jsonStart = data.indexOf("JSON", Qt::CaseSensitive); int binStart = data.indexOf("BIN", Qt::CaseSensitive); int jsonLength, binLength; @@ -173,8 +215,7 @@ hifi::ByteArray GLTFSerializer::setGLBChunks(const hifi::ByteArray& data) { return jsonChunk; } -int GLTFSerializer::getMeshPrimitiveRenderingMode(const QString& type) -{ +int GLTFSerializer::getMeshPrimitiveRenderingMode(const QString& type) { if (type == "POINTS") { return GLTFMeshPrimitivesRenderingMode::POINTS; } @@ -199,8 +240,7 @@ int GLTFSerializer::getMeshPrimitiveRenderingMode(const QString& type) return GLTFMeshPrimitivesRenderingMode::TRIANGLES; } -int GLTFSerializer::getAccessorType(const QString& type) -{ +int GLTFSerializer::getAccessorType(const QString& type) { if (type == "SCALAR") { return GLTFAccessorType::SCALAR; } @@ -225,8 +265,7 @@ int GLTFSerializer::getAccessorType(const QString& type) return GLTFAccessorType::SCALAR; } -int GLTFSerializer::getMaterialAlphaMode(const QString& type) -{ +int GLTFSerializer::getMaterialAlphaMode(const QString& type) { if (type == "OPAQUE") { return GLTFMaterialAlphaMode::OPAQUE; } @@ -239,8 +278,7 @@ int GLTFSerializer::getMaterialAlphaMode(const QString& type) return GLTFMaterialAlphaMode::OPAQUE; } -int GLTFSerializer::getCameraType(const QString& type) -{ +int GLTFSerializer::getCameraType(const QString& type) { if (type == "orthographic") { return GLTFCameraTypes::ORTHOGRAPHIC; } @@ -250,8 +288,7 @@ int GLTFSerializer::getCameraType(const QString& type) return GLTFCameraTypes::PERSPECTIVE; } -int GLTFSerializer::getImageMimeType(const QString& mime) -{ +int GLTFSerializer::getImageMimeType(const QString& mime) { if (mime == "image/jpeg") { return GLTFImageMimetype::JPEG; } @@ -261,8 +298,7 @@ int GLTFSerializer::getImageMimeType(const QString& mime) return GLTFImageMimetype::JPEG; } -int GLTFSerializer::getAnimationSamplerInterpolation(const QString& interpolation) -{ +int GLTFSerializer::getAnimationSamplerInterpolation(const QString& interpolation) { if (interpolation == "LINEAR") { return GLTFAnimationSamplerInterpolation::LINEAR; } @@ -273,8 +309,7 @@ bool GLTFSerializer::setAsset(const QJsonObject& object) { QJsonObject jsAsset; bool isAssetDefined = getObjectVal(object, "asset", jsAsset, _file.defined); if (isAssetDefined) { - if (!getStringVal(jsAsset, "version", _file.asset.version, - _file.asset.defined) || _file.asset.version != "2.0") { + if (!getStringVal(jsAsset, "version", _file.asset.version, _file.asset.defined) || _file.asset.version != "2.0") { return false; } getStringVal(jsAsset, "generator", _file.asset.generator, _file.asset.defined); @@ -283,7 +318,8 @@ bool GLTFSerializer::setAsset(const QJsonObject& object) { return isAssetDefined; } -GLTFAccessor::GLTFAccessorSparse::GLTFAccessorSparseIndices GLTFSerializer::createAccessorSparseIndices(const QJsonObject& object) { +GLTFAccessor::GLTFAccessorSparse::GLTFAccessorSparseIndices GLTFSerializer::createAccessorSparseIndices( + const QJsonObject& object) { GLTFAccessor::GLTFAccessorSparse::GLTFAccessorSparseIndices accessorSparseIndices; getIntVal(object, "bufferView", accessorSparseIndices.bufferView, accessorSparseIndices.defined); @@ -293,7 +329,8 @@ GLTFAccessor::GLTFAccessorSparse::GLTFAccessorSparseIndices GLTFSerializer::crea return accessorSparseIndices; } -GLTFAccessor::GLTFAccessorSparse::GLTFAccessorSparseValues GLTFSerializer::createAccessorSparseValues(const QJsonObject& object) { +GLTFAccessor::GLTFAccessorSparse::GLTFAccessorSparseValues GLTFSerializer::createAccessorSparseValues( + const QJsonObject& object) { GLTFAccessor::GLTFAccessorSparse::GLTFAccessorSparseValues accessorSparseValues; getIntVal(object, "bufferView", accessorSparseValues.bufferView, accessorSparseValues.defined); @@ -320,7 +357,7 @@ GLTFAccessor::GLTFAccessorSparse GLTFSerializer::createAccessorSparse(const QJso bool GLTFSerializer::addAccessor(const QJsonObject& object) { GLTFAccessor accessor; - + getIntVal(object, "bufferView", accessor.bufferView, accessor.defined); getIntVal(object, "byteOffset", accessor.byteOffset, accessor.defined); getIntVal(object, "componentType", accessor.componentType, accessor.defined); @@ -346,10 +383,10 @@ bool GLTFSerializer::addAccessor(const QJsonObject& object) { bool GLTFSerializer::addAnimation(const QJsonObject& object) { GLTFAnimation animation; - + QJsonArray channels; if (getObjectArrayVal(object, "channels", channels, animation.defined)) { - foreach(const QJsonValue & v, channels) { + for (const QJsonValue& v : channels) { if (v.isObject()) { GLTFChannel channel; getIntVal(v.toObject(), "sampler", channel.sampler, channel.defined); @@ -357,14 +394,14 @@ bool GLTFSerializer::addAnimation(const QJsonObject& object) { if (getObjectVal(v.toObject(), "target", jsChannel, channel.defined)) { getIntVal(jsChannel, "node", channel.target.node, channel.target.defined); getIntVal(jsChannel, "path", channel.target.path, channel.target.defined); - } + } } } } QJsonArray samplers; if (getObjectArrayVal(object, "samplers", samplers, animation.defined)) { - foreach(const QJsonValue & v, samplers) { + for (const QJsonValue& v : samplers) { if (v.isObject()) { GLTFAnimationSampler sampler; getIntVal(v.toObject(), "input", sampler.input, sampler.defined); @@ -376,7 +413,7 @@ bool GLTFSerializer::addAnimation(const QJsonObject& object) { } } } - + _file.animations.push_back(animation); return true; @@ -384,20 +421,20 @@ bool GLTFSerializer::addAnimation(const QJsonObject& object) { bool GLTFSerializer::addBufferView(const QJsonObject& object) { GLTFBufferView bufferview; - + getIntVal(object, "buffer", bufferview.buffer, bufferview.defined); getIntVal(object, "byteLength", bufferview.byteLength, bufferview.defined); getIntVal(object, "byteOffset", bufferview.byteOffset, bufferview.defined); getIntVal(object, "target", bufferview.target, bufferview.defined); - + _file.bufferviews.push_back(bufferview); - + return true; } bool GLTFSerializer::addBuffer(const QJsonObject& object) { GLTFBuffer buffer; - + getIntVal(object, "byteLength", buffer.byteLength, buffer.defined); if (_url.toString().endsWith("glb")) { @@ -413,13 +450,13 @@ bool GLTFSerializer::addBuffer(const QJsonObject& object) { } } _file.buffers.push_back(buffer); - + return true; } bool GLTFSerializer::addCamera(const QJsonObject& object) { GLTFCamera camera; - + QJsonObject jsPerspective; QJsonObject jsOrthographic; QString type; @@ -439,15 +476,15 @@ bool GLTFSerializer::addCamera(const QJsonObject& object) { } else if (getStringVal(object, "type", type, camera.defined)) { camera.type = getCameraType(type); } - + _file.cameras.push_back(camera); - + return true; } bool GLTFSerializer::addImage(const QJsonObject& object) { GLTFImage image; - + QString mime; getStringVal(object, "uri", image.uri, image.defined); if (image.uri.contains("data:image/png;base64,")) { @@ -457,16 +494,18 @@ bool GLTFSerializer::addImage(const QJsonObject& object) { } if (getStringVal(object, "mimeType", mime, image.defined)) { image.mimeType = getImageMimeType(mime); - } + } getIntVal(object, "bufferView", image.bufferView, image.defined); - + _file.images.push_back(image); return true; } -bool GLTFSerializer::getIndexFromObject(const QJsonObject& object, const QString& field, - int& outidx, QMap& defined) { +bool GLTFSerializer::getIndexFromObject(const QJsonObject& object, + const QString& field, + int& outidx, + QMap& defined) { QJsonObject subobject; if (getObjectVal(object, field, subobject, defined)) { QMap tmpdefined = QMap(); @@ -491,23 +530,18 @@ bool GLTFSerializer::addMaterial(const QJsonObject& object) { getDoubleVal(object, "alphaCutoff", material.alphaCutoff, material.defined); QJsonObject jsMetallicRoughness; if (getObjectVal(object, "pbrMetallicRoughness", jsMetallicRoughness, material.defined)) { - getDoubleArrayVal(jsMetallicRoughness, "baseColorFactor", - material.pbrMetallicRoughness.baseColorFactor, + getDoubleArrayVal(jsMetallicRoughness, "baseColorFactor", material.pbrMetallicRoughness.baseColorFactor, material.pbrMetallicRoughness.defined); - getIndexFromObject(jsMetallicRoughness, "baseColorTexture", - material.pbrMetallicRoughness.baseColorTexture, + getIndexFromObject(jsMetallicRoughness, "baseColorTexture", material.pbrMetallicRoughness.baseColorTexture, material.pbrMetallicRoughness.defined); - getDoubleVal(jsMetallicRoughness, "metallicFactor", - material.pbrMetallicRoughness.metallicFactor, + getDoubleVal(jsMetallicRoughness, "metallicFactor", material.pbrMetallicRoughness.metallicFactor, material.pbrMetallicRoughness.defined); - getDoubleVal(jsMetallicRoughness, "roughnessFactor", - material.pbrMetallicRoughness.roughnessFactor, + getDoubleVal(jsMetallicRoughness, "roughnessFactor", material.pbrMetallicRoughness.roughnessFactor, material.pbrMetallicRoughness.defined); - getIndexFromObject(jsMetallicRoughness, "metallicRoughnessTexture", - material.pbrMetallicRoughness.metallicRoughnessTexture, - material.pbrMetallicRoughness.defined); + getIndexFromObject(jsMetallicRoughness, "metallicRoughnessTexture", + material.pbrMetallicRoughness.metallicRoughnessTexture, material.pbrMetallicRoughness.defined); } - _file.materials.push_back(material); + _file.materials.push_back(material); return true; } @@ -519,18 +553,18 @@ bool GLTFSerializer::addMesh(const QJsonObject& object) { QJsonArray jsPrimitives; object.keys(); if (getObjectArrayVal(object, "primitives", jsPrimitives, mesh.defined)) { - foreach(const QJsonValue & prim, jsPrimitives) { + for (const QJsonValue& prim : jsPrimitives) { if (prim.isObject()) { GLTFMeshPrimitive primitive; QJsonObject jsPrimitive = prim.toObject(); getIntVal(jsPrimitive, "mode", primitive.mode, primitive.defined); getIntVal(jsPrimitive, "indices", primitive.indices, primitive.defined); getIntVal(jsPrimitive, "material", primitive.material, primitive.defined); - + QJsonObject jsAttributes; if (getObjectVal(jsPrimitive, "attributes", jsAttributes, primitive.defined)) { QStringList attrKeys = jsAttributes.keys(); - foreach(const QString & attrKey, attrKeys) { + for (const QString& attrKey : attrKeys) { int attrVal; getIntVal(jsAttributes, attrKey, attrVal, primitive.attributes.defined); primitive.attributes.values.insert(attrKey, attrVal); @@ -538,14 +572,13 @@ bool GLTFSerializer::addMesh(const QJsonObject& object) { } QJsonArray jsTargets; - if (getObjectArrayVal(jsPrimitive, "targets", jsTargets, primitive.defined)) - { - foreach(const QJsonValue & tar, jsTargets) { + if (getObjectArrayVal(jsPrimitive, "targets", jsTargets, primitive.defined)) { + for (const QJsonValue& tar : jsTargets) { if (tar.isObject()) { QJsonObject jsTarget = tar.toObject(); QStringList tarKeys = jsTarget.keys(); GLTFMeshPrimitiveAttr target; - foreach(const QString & tarKey, tarKeys) { + for (const QString& tarKey : tarKeys) { int tarVal; getIntVal(jsTarget, tarKey, tarVal, target.defined); target.values.insert(tarKey, tarVal); @@ -553,7 +586,7 @@ bool GLTFSerializer::addMesh(const QJsonObject& object) { primitive.targets.push_back(target); } } - } + } mesh.primitives.push_back(primitive); } } @@ -564,9 +597,7 @@ bool GLTFSerializer::addMesh(const QJsonObject& object) { if (getObjectVal(object, "extras", jsExtras, mesh.defined)) { QJsonArray jsTargetNames; if (getObjectArrayVal(jsExtras, "targetNames", jsTargetNames, extras.defined)) { - foreach (const QJsonValue& tarName, jsTargetNames) { - extras.targetNames.push_back(tarName.toString()); - } + foreach (const QJsonValue& tarName, jsTargetNames) { extras.targetNames.push_back(tarName.toString()); } } mesh.extras = extras; } @@ -578,7 +609,7 @@ bool GLTFSerializer::addMesh(const QJsonObject& object) { bool GLTFSerializer::addNode(const QJsonObject& object) { GLTFNode node; - + getStringVal(object, "name", node.name, node.defined); getIntVal(object, "camera", node.camera, node.defined); getIntVal(object, "mesh", node.mesh, node.defined); @@ -607,7 +638,6 @@ bool GLTFSerializer::addSampler(const QJsonObject& object) { _file.samplers.push_back(sampler); return true; - } bool GLTFSerializer::addScene(const QJsonObject& object) { @@ -633,10 +663,10 @@ bool GLTFSerializer::addSkin(const QJsonObject& object) { } bool GLTFSerializer::addTexture(const QJsonObject& object) { - GLTFTexture texture; + GLTFTexture texture; getIntVal(object, "sampler", texture.sampler, texture.defined); getIntVal(object, "source", texture.source, texture.defined); - + _file.textures.push_back(texture); return true; @@ -649,8 +679,8 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { if (_url.toString().endsWith("glb") && data.indexOf("glTF") == 0 && data.contains("JSON")) { jsonChunk = setGLBChunks(data); - } - + } + QJsonDocument d = QJsonDocument::fromJson(jsonChunk); QJsonObject jsFile = d.object(); @@ -658,7 +688,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { if (success) { QJsonArray accessors; if (getObjectArrayVal(jsFile, "accessors", accessors, _file.defined)) { - foreach(const QJsonValue & accVal, accessors) { + for (const QJsonValue& accVal : accessors) { if (accVal.isObject()) { success = success && addAccessor(accVal.toObject()); } @@ -667,7 +697,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray animations; if (getObjectArrayVal(jsFile, "animations", animations, _file.defined)) { - foreach(const QJsonValue & animVal, accessors) { + for (const QJsonValue& animVal : accessors) { if (animVal.isObject()) { success = success && addAnimation(animVal.toObject()); } @@ -676,7 +706,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray bufferViews; if (getObjectArrayVal(jsFile, "bufferViews", bufferViews, _file.defined)) { - foreach(const QJsonValue & bufviewVal, bufferViews) { + for (const QJsonValue& bufviewVal : bufferViews) { if (bufviewVal.isObject()) { success = success && addBufferView(bufviewVal.toObject()); } @@ -685,7 +715,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray buffers; if (getObjectArrayVal(jsFile, "buffers", buffers, _file.defined)) { - foreach(const QJsonValue & bufVal, buffers) { + for (const QJsonValue& bufVal : buffers) { if (bufVal.isObject()) { success = success && addBuffer(bufVal.toObject()); } @@ -694,7 +724,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray cameras; if (getObjectArrayVal(jsFile, "cameras", cameras, _file.defined)) { - foreach(const QJsonValue & camVal, cameras) { + for (const QJsonValue& camVal : cameras) { if (camVal.isObject()) { success = success && addCamera(camVal.toObject()); } @@ -703,7 +733,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray images; if (getObjectArrayVal(jsFile, "images", images, _file.defined)) { - foreach(const QJsonValue & imgVal, images) { + for (const QJsonValue& imgVal : images) { if (imgVal.isObject()) { success = success && addImage(imgVal.toObject()); } @@ -712,7 +742,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray materials; if (getObjectArrayVal(jsFile, "materials", materials, _file.defined)) { - foreach(const QJsonValue & matVal, materials) { + for (const QJsonValue& matVal : materials) { if (matVal.isObject()) { success = success && addMaterial(matVal.toObject()); } @@ -721,7 +751,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray meshes; if (getObjectArrayVal(jsFile, "meshes", meshes, _file.defined)) { - foreach(const QJsonValue & meshVal, meshes) { + for (const QJsonValue& meshVal : meshes) { if (meshVal.isObject()) { success = success && addMesh(meshVal.toObject()); } @@ -730,7 +760,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray nodes; if (getObjectArrayVal(jsFile, "nodes", nodes, _file.defined)) { - foreach(const QJsonValue & nodeVal, nodes) { + for (const QJsonValue& nodeVal : nodes) { if (nodeVal.isObject()) { success = success && addNode(nodeVal.toObject()); } @@ -739,7 +769,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray samplers; if (getObjectArrayVal(jsFile, "samplers", samplers, _file.defined)) { - foreach(const QJsonValue & samVal, samplers) { + for (const QJsonValue& samVal : samplers) { if (samVal.isObject()) { success = success && addSampler(samVal.toObject()); } @@ -748,7 +778,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray scenes; if (getObjectArrayVal(jsFile, "scenes", scenes, _file.defined)) { - foreach(const QJsonValue & sceneVal, scenes) { + for (const QJsonValue& sceneVal : scenes) { if (sceneVal.isObject()) { success = success && addScene(sceneVal.toObject()); } @@ -757,7 +787,7 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray skins; if (getObjectArrayVal(jsFile, "skins", skins, _file.defined)) { - foreach(const QJsonValue & skinVal, skins) { + for (const QJsonValue& skinVal : skins) { if (skinVal.isObject()) { success = success && addSkin(skinVal.toObject()); } @@ -766,51 +796,22 @@ bool GLTFSerializer::parseGLTF(const hifi::ByteArray& data) { QJsonArray textures; if (getObjectArrayVal(jsFile, "textures", textures, _file.defined)) { - foreach(const QJsonValue & texVal, textures) { + for (const QJsonValue& texVal : textures) { if (texVal.isObject()) { success = success && addTexture(texVal.toObject()); } } } - } + } return success; } -glm::mat4 GLTFSerializer::getModelTransform(const GLTFNode& node) { - glm::mat4 tmat = glm::mat4(1.0); - - if (node.defined["matrix"] && node.matrix.size() == 16) { - tmat = glm::mat4(node.matrix[0], node.matrix[1], node.matrix[2], node.matrix[3], - node.matrix[4], node.matrix[5], node.matrix[6], node.matrix[7], - node.matrix[8], node.matrix[9], node.matrix[10], node.matrix[11], - node.matrix[12], node.matrix[13], node.matrix[14], node.matrix[15]); - } else { - - if (node.defined["scale"] && node.scale.size() == 3) { - glm::vec3 scale = glm::vec3(node.scale[0], node.scale[1], node.scale[2]); - glm::mat4 s = glm::mat4(1.0); - s = glm::scale(s, scale); - tmat = s * tmat; - } - - if (node.defined["rotation"] && node.rotation.size() == 4) { - //quat(x,y,z,w) to quat(w,x,y,z) - glm::quat rotquat = glm::quat(node.rotation[3], node.rotation[0], node.rotation[1], node.rotation[2]); - tmat = glm::mat4_cast(rotquat) * tmat; - } - - if (node.defined["translation"] && node.translation.size() == 3) { - glm::vec3 trans = glm::vec3(node.translation[0], node.translation[1], node.translation[2]); - glm::mat4 t = glm::mat4(1.0); - t = glm::translate(t, trans); - tmat = t * tmat; - } - } - return tmat; +const glm::mat4& GLTFSerializer::getModelTransform(const GLTFNode& node) { + return node.transform; } void GLTFSerializer::getSkinInverseBindMatrices(std::vector>& inverseBindMatrixValues) { - for (auto &skin : _file.skins) { + for (auto& skin : _file.skins) { GLTFAccessor& indicesAccessor = _file.accessors[skin.inverseBindMatrices]; QVector matrices; addArrayFromAccessor(indicesAccessor, matrices); @@ -827,78 +828,193 @@ void GLTFSerializer::generateTargetData(int index, float weight, QVector; +ParentIndexMap findParentIndices(const QVector& nodes) { + ParentIndexMap parentIndices; + int numNodes = nodes.size(); + for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { + auto& gltfNode = nodes[nodeIndex]; + for (const auto& childIndex : gltfNode.children) { + parentIndices[childIndex] = nodeIndex; + } + } + return parentIndices; +} + +bool requiresNodeReordering(const ParentIndexMap& map) { + for (const auto& entry : map) { + if (entry.first < entry.second) { + return true; + } + } + return false; +} + +int findEdgeCount(const ParentIndexMap& parentIndices, int nodeIndex) { + auto parentsEnd = parentIndices.end(); + ParentIndexMap::const_iterator itr; + int result = 0; + while (parentsEnd != (itr = parentIndices.find(nodeIndex))) { + nodeIndex = itr->second; + ++result; + } + return result; +} + +using IndexBag = std::unordered_set; +using EdgeCountMap = std::map; +EdgeCountMap findEdgeCounts(int numNodes, const ParentIndexMap& map) { + EdgeCountMap edgeCounts; + // For each item, determine how many tranversals to a root node + for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { + // How many steps between this node and a root node? + int edgeCount = findEdgeCount(map, nodeIndex); + // Populate the result map + edgeCounts[edgeCount].insert(nodeIndex); + } + return edgeCounts; +} + +using ReorderMap = std::unordered_map; +ReorderMap buildReorderMap(const EdgeCountMap& map) { + ReorderMap result; + int newIndex = 0; + for (const auto& entry : map) { + const IndexBag& oldIndices = entry.second; + for (const auto& oldIndex : oldIndices) { + result.insert({ oldIndex, newIndex }); + ++newIndex; + } + } + return result; +} + +void reorderNodeIndices(QVector& indices, const ReorderMap& oldToNewIndexMap) { + for (auto& index : indices) { + index = oldToNewIndexMap.at(index); + } +} + +} // namespace gltf + +void GLTFFile::populateMaterialNames() { + // Build material names + QSet usedNames; + for (const auto& material : materials) { + if (!material.name.isEmpty()) { + usedNames.insert(material.name); + } + } + + int ukcount = 0; + const QString unknown{ "Default_%1" }; + for (auto& material : materials) { + QString generatedName = unknown.arg(ukcount++); + while (usedNames.contains(generatedName)) { + generatedName = unknown.arg(ukcount++); + } + material.name = generatedName; + material.defined.insert("name", true); + usedNames.insert(generatedName); + } +} + +void GLTFFile::reorderNodes(const std::unordered_map& oldToNewIndexMap) { + int numNodes = nodes.size(); + assert(numNodes == oldToNewIndexMap.size()); + QVector newNodes; + newNodes.resize(numNodes); + for (int oldIndex = 0; oldIndex < numNodes; ++oldIndex) { + const auto& oldNode = nodes[oldIndex]; + int newIndex = oldToNewIndexMap.at(oldIndex); + auto& newNode = newNodes[newIndex]; + // Write the new node + newNode = oldNode; + // Fixup the child indices + gltf::reorderNodeIndices(newNode.children, oldToNewIndexMap); + } + newNodes.swap(nodes); + + for (auto& subScene : scenes) { + gltf::reorderNodeIndices(subScene.nodes, oldToNewIndexMap); + } +} + +// Ensure that the GLTF nodes are ordered so +void GLTFFile::sortNodes() { + // Find all the parents + auto parentIndices = gltf::findParentIndices(nodes); + // If the nodes are already in a good order, we're done + if (!gltf::requiresNodeReordering(parentIndices)) { + return; + } + + auto edgeCounts = gltf::findEdgeCounts(nodes.size(), parentIndices); + auto oldToNewIndexMap = gltf::buildReorderMap(edgeCounts); + reorderNodes(oldToNewIndexMap); + assert(!gltf::requiresNodeReordering(gltf::findParentIndices(nodes))); +} + +void GLTFNode::normalizeTransform() { + if (defined["matrix"] && matrix.size() == 16) { + transform = glm::make_mat4(matrix.constData()); + } else { + transform = glm::mat4(1.0); + if (defined["scale"] && scale.size() == 3) { + glm::vec3 scaleVec = glm::make_vec3(scale.data()); + transform = glm::scale(transform, scaleVec); + } + + if (defined["rotation"] && rotation.size() == 4) { + glm::quat rotQ = glm::make_quat(rotation.data()); + transform = glm::mat4_cast(rotQ) * transform; + } + + if (defined["translation"] && translation.size() == 3) { + glm::vec3 transV = glm::make_vec3(translation.data()); + transform = glm::translate(glm::mat4(1.0), transV) * transform; + } + } +} + +void GLTFFile::normalizeNodeTransforms() { + for (auto& node : nodes) { + node.normalizeTransform(); + } +} + bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mapping, const hifi::URL& url) { int numNodes = _file.nodes.size(); - - //Build dependencies - QVector parents; - QVector sortedNodes; - parents.fill(-1, numNodes); - sortedNodes.reserve(numNodes); - int nodecount = 0; - foreach(auto &node, _file.nodes) { - foreach(int child, node.children) { - parents[child] = nodecount; + hfmModel.transforms.resize(numNodes); + + auto parentIndices = gltf::findParentIndices(_file.nodes); + const auto parentsEnd = parentIndices.end(); + for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { + auto& gltfNode = _file.nodes[nodeIndex]; + auto& hmfTransform = hfmModel.transforms[nodeIndex]; + auto parentItr = parentIndices.find(nodeIndex); + if (parentItr != parentsEnd ) { + hmfTransform.parent = parentItr->second; } - sortedNodes.push_back(nodecount); - ++nodecount; + hmfTransform.transform = getModelTransform(gltfNode); } // Build transforms - nodecount = 0; - foreach(auto &node, _file.nodes) { - // collect node transform - _file.nodes[nodecount].transforms.push_back(getModelTransform(node)); - int parentIndex = parents[nodecount]; - while (parentIndex != -1) { - const auto& parentNode = _file.nodes[parentIndex]; - // collect transforms for a node's parents, grandparents, etc. - _file.nodes[nodecount].transforms.push_back(getModelTransform(parentNode)); - parentIndex = parents[parentIndex]; - } - ++nodecount; - } - - - // since parent indices must exist in the sorted list before any of their children, sortedNodes might not be initialized in the correct order - // therefore we need to re-initialize the order in which nodes will be parsed - QVector hasBeenSorted; - hasBeenSorted.fill(false, numNodes); - int i = 0; // initial index - while (i < numNodes) { - int currentNode = sortedNodes[i]; - int parentIndex = parents[currentNode]; - if (parentIndex == -1 || hasBeenSorted[parentIndex]) { - hasBeenSorted[currentNode] = true; - ++i; - } else { - int j = i + 1; // index of node to be sorted - while (j < numNodes) { - int nextNode = sortedNodes[j]; - parentIndex = parents[nextNode]; - if (parentIndex == -1 || hasBeenSorted[parentIndex]) { - // swap with currentNode - hasBeenSorted[nextNode] = true; - sortedNodes[i] = nextNode; - sortedNodes[j] = currentNode; - ++i; - currentNode = sortedNodes[i]; - } - ++j; - } + for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { + auto& gltfNode = _file.nodes[nodeIndex]; + //gltfNode.transforms.push_back(getModelTransform(gltfNode)); + gltf::ParentIndexMap::const_iterator parentItr; + int curNode = nodeIndex; + while (parentsEnd != (parentItr = parentIndices.find(curNode))) { + curNode = parentItr->second; + auto& ancestorNode = _file.nodes[curNode]; + //gltfNode.transforms.push_back(getModelTransform(ancestorNode)); } } - - // Build map from original to new indices - QVector originalToNewNodeIndexMap; - originalToNewNodeIndexMap.fill(-1, numNodes); - for (int i = 0; i < numNodes; ++i) { - originalToNewNodeIndexMap[sortedNodes[i]] = i; - } - - // Build joints HFMJoint joint; joint.distanceToParent = 0; @@ -906,24 +1022,24 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& QVector globalTransforms; globalTransforms.resize(numNodes); - for (int nodeIndex : sortedNodes) { + for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { auto& node = _file.nodes[nodeIndex]; - - joint.parentIndex = parents[nodeIndex]; - if (joint.parentIndex != -1) { - joint.parentIndex = originalToNewNodeIndexMap[joint.parentIndex]; + auto parentItr = parentIndices.find(nodeIndex); + if (parentsEnd == parentItr) { + joint.parentIndex = -1; + } else { + joint.parentIndex = parentItr->second; } - joint.transform = node.transforms.first(); + + joint.transform = getModelTransform(node); joint.translation = extractTranslation(joint.transform); joint.rotation = glmExtractRotation(joint.transform); glm::vec3 scale = extractScale(joint.transform); joint.postTransform = glm::scale(glm::mat4(), scale); - - joint.parentIndex = parents[nodeIndex]; globalTransforms[nodeIndex] = joint.transform; + if (joint.parentIndex != -1) { globalTransforms[nodeIndex] = globalTransforms[joint.parentIndex] * globalTransforms[nodeIndex]; - joint.parentIndex = originalToNewNodeIndexMap[joint.parentIndex]; } joint.name = node.name; @@ -932,7 +1048,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } hfmModel.shapeVertices.resize(hfmModel.joints.size()); - // get offset transform from mapping float unitScaleFactor = 1.0f; float offsetScale = mapping.value("scale", 1.0f).toFloat() * unitScaleFactor; @@ -953,7 +1068,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& getSkinInverseBindMatrices(inverseBindValues); for (int jointIndex = 0; jointIndex < numNodes; ++jointIndex) { - int nodeIndex = sortedNodes[jointIndex]; + int nodeIndex = jointIndex; auto joint = hfmModel.joints[jointIndex]; for (int s = 0; s < _file.skins.size(); ++s) { @@ -984,638 +1099,559 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } } - - // Build materials - QVector materialIDs; - QString unknown = "Default"; - int ukcount = 0; - foreach(auto material, _file.materials) { - if (!material.defined["name"]) { - QString name = unknown + QString::number(++ukcount); - material.name = name; - material.defined.insert("name", true); - } - - QString mid = material.name; - materialIDs.push_back(mid); - } - - for (int i = 0; i < materialIDs.size(); ++i) { - QString& matid = materialIDs[i]; + for (const auto& material : _file.materials) { + const QString& matid = material.name; hfmModel.materials.emplace_back(); HFMMaterial& hfmMaterial = hfmModel.materials.back(); hfmMaterial._material = std::make_shared(); - hfmMaterial.name = hfmMaterial.materialID = matid; - setHFMMaterial(hfmMaterial, _file.materials[i]); + hfmMaterial.materialID = hfmMaterial.name; + setHFMMaterial(hfmMaterial, material); } - - // Build meshes - nodecount = 0; + int meshCount = _file.meshes.size(); + hfmModel.meshes.resize(meshCount); hfmModel.meshExtents.reset(); - for (int nodeIndex : sortedNodes) { - auto& node = _file.nodes[nodeIndex]; - if (node.defined["mesh"]) { - hfmModel.meshes.push_back(HFMMesh()); - HFMMesh& mesh = hfmModel.meshes[hfmModel.meshes.size() - 1]; - if (!hfmModel.hasSkeletonJoints) { + hfmModel.meshes.resize(meshCount); + for (int meshIndex = 0; meshIndex < meshCount; ++meshIndex) { + const auto& gltfMesh = _file.meshes[meshIndex]; + auto& mesh = hfmModel.meshes[meshIndex]; + mesh.meshIndex = meshIndex; +#if 0 + if (!hfmModel.hasSkeletonJoints) { + HFMCluster cluster; + cluster.jointIndex = nodeIndex; + cluster.inverseBindMatrix = glm::mat4(); + cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); + mesh.clusters.append(cluster); + } else { // skinned model + for (int j = 0; j < numNodes; ++j) { HFMCluster cluster; - cluster.jointIndex = nodecount; - cluster.inverseBindMatrix = glm::mat4(); + cluster.jointIndex = j; + cluster.inverseBindMatrix = jointInverseBindTransforms[j]; cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); mesh.clusters.append(cluster); - } else { // skinned model - for (int j = 0; j < numNodes; ++j) { - HFMCluster cluster; - cluster.jointIndex = j; - cluster.inverseBindMatrix = jointInverseBindTransforms[j]; - cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); - mesh.clusters.append(cluster); - } } - HFMCluster root; - root.jointIndex = 0; - root.inverseBindMatrix = jointInverseBindTransforms[root.jointIndex]; - root.inverseBindTransform = Transform(root.inverseBindMatrix); - mesh.clusters.append(root); + } + HFMCluster root; + root.jointIndex = 0; + root.inverseBindMatrix = jointInverseBindTransforms[root.jointIndex]; + root.inverseBindTransform = Transform(root.inverseBindMatrix); + mesh.clusters.append(root); +#endif - QList meshAttributes; - foreach(auto &primitive, _file.meshes[node.mesh].primitives) { - QList keys = primitive.attributes.values.keys(); - foreach (auto &key, keys) { - if (!meshAttributes.contains(key)) { - meshAttributes.push_back(key); - } - } + QSet meshAttributes; + for(const auto &primitive : gltfMesh.primitives) { + for (const auto& attribute : primitive.attributes.values.keys()) { + meshAttributes.insert(attribute); + } + } + + for(auto &primitive : gltfMesh.primitives) { + HFMMeshPart part = HFMMeshPart(); + + int indicesAccessorIdx = primitive.indices; + + GLTFAccessor& indicesAccessor = _file.accessors[indicesAccessorIdx]; + + // Buffers + constexpr int VERTEX_STRIDE = 3; + constexpr int NORMAL_STRIDE = 3; + constexpr int TEX_COORD_STRIDE = 2; + + QVector indices; + QVector vertices; + QVector normals; + QVector tangents; + QVector texcoords; + QVector texcoords2; + QVector colors; + QVector joints; + QVector weights; + + static int tangentStride = 4; + static int colorStride = 3; + static int jointStride = 4; + static int weightStride = 4; + + bool success = addArrayFromAccessor(indicesAccessor, indices); + + if (!success) { + qWarning(modelformat) << "There was a problem reading glTF INDICES data for model " << _url; + continue; } - foreach(auto &primitive, _file.meshes[node.mesh].primitives) { - HFMMeshPart part = HFMMeshPart(); + // Increment the triangle indices by the current mesh vertex count so each mesh part can all reference the same buffers within the mesh + int prevMeshVerticesCount = mesh.vertices.count(); - int indicesAccessorIdx = primitive.indices; + QList keys = primitive.attributes.values.keys(); + QVector clusterJoints; + QVector clusterWeights; - GLTFAccessor& indicesAccessor = _file.accessors[indicesAccessorIdx]; + for(auto &key : keys) { + int accessorIdx = primitive.attributes.values[key]; + GLTFAccessor& accessor = _file.accessors[accessorIdx]; + const auto vertexAttribute = GLTFVertexAttribute::fromString(key); + switch (vertexAttribute) { + case GLTFVertexAttribute::POSITION: + success = addArrayFromAttribute(vertexAttribute, accessor, vertices); + break; - // Buffers - QVector indices; - QVector vertices; - int verticesStride = 3; - QVector normals; - int normalStride = 3; - QVector tangents; - int tangentStride = 4; - QVector texcoords; - int texCoordStride = 2; - QVector texcoords2; - int texCoord2Stride = 2; - QVector colors; - int colorStride = 3; - QVector joints; - int jointStride = 4; - QVector weights; - int weightStride = 4; + case GLTFVertexAttribute::NORMAL: + success = addArrayFromAttribute(vertexAttribute, accessor, normals); + break; - bool success = addArrayFromAccessor(indicesAccessor, indices); + case GLTFVertexAttribute::TANGENT: + success = addArrayFromAttribute(vertexAttribute, accessor, tangents); + tangentStride = GLTFAccessorType::count((GLTFAccessorType::Value)accessor.type); + break; + case GLTFVertexAttribute::TEXCOORD_0: + success = addArrayFromAttribute(vertexAttribute, accessor, texcoords); + break; + + case GLTFVertexAttribute::TEXCOORD_1: + success = addArrayFromAttribute(vertexAttribute, accessor, texcoords2); + break; + + case GLTFVertexAttribute::COLOR_0: + success = addArrayFromAttribute(vertexAttribute, accessor, colors); + colorStride = GLTFAccessorType::count((GLTFAccessorType::Value)accessor.type); + break; + + case GLTFVertexAttribute::JOINTS_0: + success = addArrayFromAttribute(vertexAttribute, accessor, colors); + jointStride = GLTFAccessorType::count((GLTFAccessorType::Value)accessor.type); + break; + + case GLTFVertexAttribute::WEIGHTS_0: + success = addArrayFromAttribute(vertexAttribute, accessor, colors); + weightStride = GLTFAccessorType::count((GLTFAccessorType::Value)accessor.type); + break; + } if (!success) { - qWarning(modelformat) << "There was a problem reading glTF INDICES data for model " << _url; continue; } + } - // Increment the triangle indices by the current mesh vertex count so each mesh part can all reference the same buffers within the mesh - int prevMeshVerticesCount = mesh.vertices.count(); + // Validation stage + if (indices.count() == 0) { + qWarning(modelformat) << "Missing indices for model " << _url; + continue; + } + if (vertices.count() == 0) { + qWarning(modelformat) << "Missing vertices for model " << _url; + continue; + } - QList keys = primitive.attributes.values.keys(); - QVector clusterJoints; - QVector clusterWeights; + int partVerticesCount = vertices.size() / 3; - foreach(auto &key, keys) { - int accessorIdx = primitive.attributes.values[key]; + // generate the normals if they don't exist + // FIXME move to GLTF post-load processing + if (normals.size() == 0) { + QVector newIndices; + QVector newVertices; + QVector newNormals; + QVector newTexcoords; + QVector newTexcoords2; + QVector newColors; + QVector newJoints; + QVector newWeights; - GLTFAccessor& accessor = _file.accessors[accessorIdx]; + for (int n = 0; n < indices.size(); n = n + 3) { + int v1_index = (indices[n + 0] * 3); + int v2_index = (indices[n + 1] * 3); + int v3_index = (indices[n + 2] * 3); - if (key == "POSITION") { - if (accessor.type != GLTFAccessorType::VEC3) { - qWarning(modelformat) << "Invalid accessor type on glTF POSITION data for model " << _url; - continue; - } + glm::vec3 v1 = glm::vec3(vertices[v1_index], vertices[v1_index + 1], vertices[v1_index + 2]); + glm::vec3 v2 = glm::vec3(vertices[v2_index], vertices[v2_index + 1], vertices[v2_index + 2]); + glm::vec3 v3 = glm::vec3(vertices[v3_index], vertices[v3_index + 1], vertices[v3_index + 2]); - success = addArrayFromAccessor(accessor, vertices); - if (!success) { - qWarning(modelformat) << "There was a problem reading glTF POSITION data for model " << _url; - continue; - } - } else if (key == "NORMAL") { - if (accessor.type != GLTFAccessorType::VEC3) { - qWarning(modelformat) << "Invalid accessor type on glTF NORMAL data for model " << _url; - continue; - } + newVertices.append(v1.x); + newVertices.append(v1.y); + newVertices.append(v1.z); + newVertices.append(v2.x); + newVertices.append(v2.y); + newVertices.append(v2.z); + newVertices.append(v3.x); + newVertices.append(v3.y); + newVertices.append(v3.z); - success = addArrayFromAccessor(accessor, normals); - if (!success) { - qWarning(modelformat) << "There was a problem reading glTF NORMAL data for model " << _url; - continue; - } - } else if (key == "TANGENT") { - if (accessor.type == GLTFAccessorType::VEC4) { - tangentStride = 4; - } else if (accessor.type == GLTFAccessorType::VEC3) { - tangentStride = 3; - } else { - qWarning(modelformat) << "Invalid accessor type on glTF TANGENT data for model " << _url; - continue; - } + glm::vec3 norm = glm::normalize(glm::cross(v2 - v1, v3 - v1)); - success = addArrayFromAccessor(accessor, tangents); - if (!success) { - qWarning(modelformat) << "There was a problem reading glTF TANGENT data for model " << _url; - tangentStride = 0; - continue; - } - } else if (key == "TEXCOORD_0") { - success = addArrayFromAccessor(accessor, texcoords); - if (!success) { - qWarning(modelformat) << "There was a problem reading glTF TEXCOORD_0 data for model " << _url; - continue; - } + newNormals.append(norm.x); + newNormals.append(norm.y); + newNormals.append(norm.z); + newNormals.append(norm.x); + newNormals.append(norm.y); + newNormals.append(norm.z); + newNormals.append(norm.x); + newNormals.append(norm.y); + newNormals.append(norm.z); - if (accessor.type != GLTFAccessorType::VEC2) { - qWarning(modelformat) << "Invalid accessor type on glTF TEXCOORD_0 data for model " << _url; - continue; - } - } else if (key == "TEXCOORD_1") { - success = addArrayFromAccessor(accessor, texcoords2); - if (!success) { - qWarning(modelformat) << "There was a problem reading glTF TEXCOORD_1 data for model " << _url; - continue; - } - - if (accessor.type != GLTFAccessorType::VEC2) { - qWarning(modelformat) << "Invalid accessor type on glTF TEXCOORD_1 data for model " << _url; - continue; - } - } else if (key == "COLOR_0") { - if (accessor.type == GLTFAccessorType::VEC4) { - colorStride = 4; - } else if (accessor.type == GLTFAccessorType::VEC3) { - colorStride = 3; - } else { - qWarning(modelformat) << "Invalid accessor type on glTF COLOR_0 data for model " << _url; - continue; - } - - success = addArrayFromAccessor(accessor, colors); - if (!success) { - qWarning(modelformat) << "There was a problem reading glTF COLOR_0 data for model " << _url; - continue; - } - } else if (key == "JOINTS_0") { - if (accessor.type == GLTFAccessorType::VEC4) { - jointStride = 4; - } else if (accessor.type == GLTFAccessorType::VEC3) { - jointStride = 3; - } else if (accessor.type == GLTFAccessorType::VEC2) { - jointStride = 2; - } else if (accessor.type == GLTFAccessorType::SCALAR) { - jointStride = 1; - } else { - qWarning(modelformat) << "Invalid accessor type on glTF JOINTS_0 data for model " << _url; - continue; - } - - success = addArrayFromAccessor(accessor, joints); - if (!success) { - qWarning(modelformat) << "There was a problem reading glTF JOINTS_0 data for model " << _url; - continue; - } - } else if (key == "WEIGHTS_0") { - if (accessor.type == GLTFAccessorType::VEC4) { - weightStride = 4; - } else if (accessor.type == GLTFAccessorType::VEC3) { - weightStride = 3; - } else if (accessor.type == GLTFAccessorType::VEC2) { - weightStride = 2; - } else if (accessor.type == GLTFAccessorType::SCALAR) { - weightStride = 1; - } else { - qWarning(modelformat) << "Invalid accessor type on glTF WEIGHTS_0 data for model " << _url; - continue; - } - - success = addArrayFromAccessor(accessor, weights); - if (!success) { - qWarning(modelformat) << "There was a problem reading glTF WEIGHTS_0 data for model " << _url; - continue; - } - } - } - - // Validation stage - if (indices.count() == 0) { - qWarning(modelformat) << "Missing indices for model " << _url; - continue; - } - if (vertices.count() == 0) { - qWarning(modelformat) << "Missing vertices for model " << _url; - continue; - } - - int partVerticesCount = vertices.size() / 3; - - // generate the normals if they don't exist - if (normals.size() == 0) { - QVector newIndices; - QVector newVertices; - QVector newNormals; - QVector newTexcoords; - QVector newTexcoords2; - QVector newColors; - QVector newJoints; - QVector newWeights; - - for (int n = 0; n < indices.size(); n = n + 3) { - int v1_index = (indices[n + 0] * 3); - int v2_index = (indices[n + 1] * 3); - int v3_index = (indices[n + 2] * 3); - - glm::vec3 v1 = glm::vec3(vertices[v1_index], vertices[v1_index + 1], vertices[v1_index + 2]); - glm::vec3 v2 = glm::vec3(vertices[v2_index], vertices[v2_index + 1], vertices[v2_index + 2]); - glm::vec3 v3 = glm::vec3(vertices[v3_index], vertices[v3_index + 1], vertices[v3_index + 2]); - - newVertices.append(v1.x); - newVertices.append(v1.y); - newVertices.append(v1.z); - newVertices.append(v2.x); - newVertices.append(v2.y); - newVertices.append(v2.z); - newVertices.append(v3.x); - newVertices.append(v3.y); - newVertices.append(v3.z); - - glm::vec3 norm = glm::normalize(glm::cross(v2 - v1, v3 - v1)); - - newNormals.append(norm.x); - newNormals.append(norm.y); - newNormals.append(norm.z); - newNormals.append(norm.x); - newNormals.append(norm.y); - newNormals.append(norm.z); - newNormals.append(norm.x); - newNormals.append(norm.y); - newNormals.append(norm.z); - - if (texcoords.size() == partVerticesCount * texCoordStride) { - GLTF_APPEND_ARRAY_2(newTexcoords, texcoords) - } - - if (texcoords2.size() == partVerticesCount * texCoord2Stride) { - GLTF_APPEND_ARRAY_2(newTexcoords2, texcoords2) - } - - if (colors.size() == partVerticesCount * colorStride) { - if (colorStride == 4) { - GLTF_APPEND_ARRAY_4(newColors, colors) - } else { - GLTF_APPEND_ARRAY_3(newColors, colors) - } - } - - if (joints.size() == partVerticesCount * jointStride) { - if (jointStride == 4) { - GLTF_APPEND_ARRAY_4(newJoints, joints) - } else if (jointStride == 3) { - GLTF_APPEND_ARRAY_3(newJoints, joints) - } else if (jointStride == 2) { - GLTF_APPEND_ARRAY_2(newJoints, joints) - } else { - GLTF_APPEND_ARRAY_1(newJoints, joints) - } - } - - if (weights.size() == partVerticesCount * weightStride) { - if (weightStride == 4) { - GLTF_APPEND_ARRAY_4(newWeights, weights) - } else if (weightStride == 3) { - GLTF_APPEND_ARRAY_3(newWeights, weights) - } else if (weightStride == 2) { - GLTF_APPEND_ARRAY_2(newWeights, weights) - } else { - GLTF_APPEND_ARRAY_1(newWeights, weights) - } - } - newIndices.append(n); - newIndices.append(n + 1); - newIndices.append(n + 2); + if (texcoords.size() == partVerticesCount * TEX_COORD_STRIDE) { + GLTF_APPEND_ARRAY_2(newTexcoords, texcoords) } - vertices = newVertices; - normals = newNormals; - tangents = QVector(); - texcoords = newTexcoords; - texcoords2 = newTexcoords2; - colors = newColors; - joints = newJoints; - weights = newWeights; - indices = newIndices; + if (texcoords2.size() == partVerticesCount * TEX_COORD_STRIDE) { + GLTF_APPEND_ARRAY_2(newTexcoords2, texcoords2) + } - partVerticesCount = vertices.size() / 3; + if (colors.size() == partVerticesCount * colorStride) { + if (colorStride == 4) { + GLTF_APPEND_ARRAY_4(newColors, colors) + } else { + GLTF_APPEND_ARRAY_3(newColors, colors) + } + } + + if (joints.size() == partVerticesCount * jointStride) { + if (jointStride == 4) { + GLTF_APPEND_ARRAY_4(newJoints, joints) + } else if (jointStride == 3) { + GLTF_APPEND_ARRAY_3(newJoints, joints) + } else if (jointStride == 2) { + GLTF_APPEND_ARRAY_2(newJoints, joints) + } else { + GLTF_APPEND_ARRAY_1(newJoints, joints) + } + } + + if (weights.size() == partVerticesCount * weightStride) { + if (weightStride == 4) { + GLTF_APPEND_ARRAY_4(newWeights, weights) + } else if (weightStride == 3) { + GLTF_APPEND_ARRAY_3(newWeights, weights) + } else if (weightStride == 2) { + GLTF_APPEND_ARRAY_2(newWeights, weights) + } else { + GLTF_APPEND_ARRAY_1(newWeights, weights) + } + } + newIndices.append(n); + newIndices.append(n + 1); + newIndices.append(n + 2); } - QVector validatedIndices; - for (int n = 0; n < indices.count(); ++n) { - if (indices[n] < partVerticesCount) { - validatedIndices.push_back(indices[n] + prevMeshVerticesCount); + vertices = newVertices; + normals = newNormals; + tangents = QVector(); + texcoords = newTexcoords; + texcoords2 = newTexcoords2; + colors = newColors; + joints = newJoints; + weights = newWeights; + indices = newIndices; + + partVerticesCount = vertices.size() / 3; + } + + QVector validatedIndices; + for (int n = 0; n < indices.count(); ++n) { + if (indices[n] < partVerticesCount) { + validatedIndices.push_back(indices[n] + prevMeshVerticesCount); + } else { + validatedIndices = QVector(); + break; + } + } + + if (validatedIndices.size() == 0) { + qWarning(modelformat) << "Indices out of range for model " << _url; + continue; + } + + part.triangleIndices.append(validatedIndices); + + mesh.vertices.reserve(partVerticesCount); + for (int n = 0; n < vertices.size(); n = n + VERTEX_STRIDE) { + mesh.vertices.push_back(glm::vec3(vertices[n], vertices[n + 1], vertices[n + 2])); + } + + mesh.normals.reserve(partVerticesCount); + for (int n = 0; n < normals.size(); n = n + NORMAL_STRIDE) { + mesh.normals.push_back(glm::vec3(normals[n], normals[n + 1], normals[n + 2])); + } + + // TODO: add correct tangent generation + if (tangents.size() == partVerticesCount * tangentStride) { + mesh.tangents.reserve(partVerticesCount); + for (int n = 0; n < tangents.size(); n += tangentStride) { + float tanW = tangentStride == 4 ? tangents[n + 3] : 1; + mesh.tangents.push_back(glm::vec3(tanW * tangents[n], tangents[n + 1], tanW * tangents[n + 2])); + } + } else if (meshAttributes.contains("TANGENT")) { + mesh.tangents.resize(partVerticesCount); + } + + if (texcoords.size() == partVerticesCount * TEX_COORD_STRIDE) { + mesh.texCoords.reserve(partVerticesCount); + for (int n = 0; n < texcoords.size(); n = n + 2) { + mesh.texCoords.push_back(glm::vec2(texcoords[n], texcoords[n + 1])); + } + } else if (meshAttributes.contains("TEXCOORD_0")) { + mesh.texCoords.resize(partVerticesCount); + } + + if (texcoords2.size() == partVerticesCount * TEX_COORD_STRIDE) { + mesh.texCoords1.reserve(partVerticesCount); + for (int n = 0; n < texcoords2.size(); n = n + 2) { + mesh.texCoords1.push_back(glm::vec2(texcoords2[n], texcoords2[n + 1])); + } + } else if (meshAttributes.contains("TEXCOORD_1")) { + mesh.texCoords1.resize(partVerticesCount); + } + + if (colors.size() == partVerticesCount * colorStride) { + mesh.colors.reserve(partVerticesCount); + for (int n = 0; n < colors.size(); n += colorStride) { + mesh.colors.push_back(glm::vec3(colors[n], colors[n + 1], colors[n + 2])); + } + } else if (meshAttributes.contains("COLOR_0")) { + mesh.colors.reserve(partVerticesCount); + for (int i = 0; i < partVerticesCount; ++i) { + mesh.colors.push_back(glm::vec3(1.0f, 1.0f, 1.0f)); + } + } + + if (joints.size() == partVerticesCount * jointStride) { + for (int n = 0; n < joints.size(); n += jointStride) { + clusterJoints.push_back(joints[n]); + if (jointStride > 1) { + clusterJoints.push_back(joints[n + 1]); + if (jointStride > 2) { + clusterJoints.push_back(joints[n + 2]); + if (jointStride > 3) { + clusterJoints.push_back(joints[n + 3]); + } else { + clusterJoints.push_back(0); + } + } else { + clusterJoints.push_back(0); + clusterJoints.push_back(0); + } } else { - validatedIndices = QVector(); + clusterJoints.push_back(0); + clusterJoints.push_back(0); + clusterJoints.push_back(0); + } + } + } else if (meshAttributes.contains("JOINTS_0")) { + for (int i = 0; i < partVerticesCount; ++i) { + for (int j = 0; j < 4; ++j) { + clusterJoints.push_back(0); + } + } + } + + if (weights.size() == partVerticesCount * weightStride) { + for (int n = 0; n < weights.size(); n += weightStride) { + clusterWeights.push_back(weights[n]); + if (weightStride > 1) { + clusterWeights.push_back(weights[n + 1]); + if (weightStride > 2) { + clusterWeights.push_back(weights[n + 2]); + if (weightStride > 3) { + clusterWeights.push_back(weights[n + 3]); + } else { + clusterWeights.push_back(0.0f); + } + } else { + clusterWeights.push_back(0.0f); + clusterWeights.push_back(0.0f); + } + } else { + clusterWeights.push_back(0.0f); + clusterWeights.push_back(0.0f); + clusterWeights.push_back(0.0f); + } + } + } else if (meshAttributes.contains("WEIGHTS_0")) { + for (int i = 0; i < partVerticesCount; ++i) { + clusterWeights.push_back(1.0f); + for (int j = 1; j < 4; ++j) { + clusterWeights.push_back(0.0f); + } + } + } + +#if 0 + // Build weights (adapted from FBXSerializer.cpp) + if (hfmModel.hasSkeletonJoints) { + int prevMeshClusterIndexCount = mesh.clusterIndices.count(); + int prevMeshClusterWeightCount = mesh.clusterWeights.count(); + const int WEIGHTS_PER_VERTEX = 4; + const float ALMOST_HALF = 0.499f; + int numVertices = mesh.vertices.size() - prevMeshVerticesCount; + + // Append new cluster indices and weights for this mesh part + for (int i = 0; i < numVertices * WEIGHTS_PER_VERTEX; ++i) { + mesh.clusterIndices.push_back(mesh.clusters.size() - 1); + mesh.clusterWeights.push_back(0); + } + + for (int c = 0; c < clusterJoints.size(); ++c) { + mesh.clusterIndices[prevMeshClusterIndexCount + c] = + originalToNewNodeIndexMap[_file.skins[node.skin].joints[clusterJoints[c]]]; + } + + // normalize and compress to 16-bits + for (int i = 0; i < numVertices; ++i) { + int j = i * WEIGHTS_PER_VERTEX; + + float totalWeight = 0.0f; + for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { + totalWeight += clusterWeights[k]; + } + if (totalWeight > 0.0f) { + float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; + for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { + mesh.clusterWeights[prevMeshClusterWeightCount + k] = (uint16_t)(weightScalingFactor * clusterWeights[k] + ALMOST_HALF); + } + } else { + mesh.clusterWeights[prevMeshClusterWeightCount + j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); + } + for (int clusterIndex = 0; clusterIndex < mesh.clusters.size() - 1; ++clusterIndex) { + ShapeVertices& points = hfmModel.shapeVertices.at(clusterIndex); + glm::vec3 globalMeshScale = extractScale(globalTransforms[nodeIndex]); + const glm::mat4 meshToJoint = glm::scale(glm::mat4(), globalMeshScale) * jointInverseBindTransforms[clusterIndex]; + + const float EXPANSION_WEIGHT_THRESHOLD = 0.25f; + if (mesh.clusterWeights[j] >= EXPANSION_WEIGHT_THRESHOLD) { + // TODO: fix transformed vertices being pushed back + auto& vertex = mesh.vertices[i]; + const glm::mat4 vertexTransform = meshToJoint * (glm::translate(glm::mat4(), vertex)); + glm::vec3 transformedVertex = hfmModel.joints[clusterIndex].translation * (extractTranslation(vertexTransform)); + points.push_back(transformedVertex); + } + } + } + } +#endif + +#if 0 + if (primitive.defined["material"]) { + part.materialID = materialIDs[primitive.material]; + } +#endif + + mesh.parts.push_back(part); + + // populate the texture coordinates if they don't exist + if (mesh.texCoords.size() == 0 && !hfmModel.hasSkeletonJoints) { + for (int i = 0; i < part.triangleIndices.size(); ++i) { mesh.texCoords.push_back(glm::vec2(0.0, 1.0)); } + } + + // Build morph targets (blend shapes) + if (!primitive.targets.isEmpty()) { + + // Build list of blendshapes from FST + typedef QPair WeightedIndex; + hifi::VariantHash blendshapeMappings = mapping.value("bs").toHash(); + QMultiHash blendshapeIndices; + + for (int i = 0;; ++i) { + hifi::ByteArray blendshapeName = FACESHIFT_BLENDSHAPES[i]; + if (blendshapeName.isEmpty()) { break; } - } - - if (validatedIndices.size() == 0) { - qWarning(modelformat) << "Indices out of range for model " << _url; - continue; - } - - part.triangleIndices.append(validatedIndices); - - for (int n = 0; n < vertices.size(); n = n + verticesStride) { - mesh.vertices.push_back(glm::vec3(vertices[n], vertices[n + 1], vertices[n + 2])); - } - - for (int n = 0; n < normals.size(); n = n + normalStride) { - mesh.normals.push_back(glm::vec3(normals[n], normals[n + 1], normals[n + 2])); - } - - // TODO: add correct tangent generation - if (tangents.size() == partVerticesCount * tangentStride) { - for (int n = 0; n < tangents.size(); n += tangentStride) { - float tanW = tangentStride == 4 ? tangents[n + 3] : 1; - mesh.tangents.push_back(glm::vec3(tanW * tangents[n], tangents[n + 1], tanW * tangents[n + 2])); + QList mappings = blendshapeMappings.values(blendshapeName); + foreach (const QVariant& mapping, mappings) { + QVariantList blendshapeMapping = mapping.toList(); + blendshapeIndices.insert(blendshapeMapping.at(0).toByteArray(), WeightedIndex(i, blendshapeMapping.at(1).toFloat())); } - } else { - if (meshAttributes.contains("TANGENT")) { - for (int i = 0; i < partVerticesCount; ++i) { - mesh.tangents.push_back(glm::vec3(0.0f, 0.0f, 0.0f)); + } + + // glTF morph targets may or may not have names. if they are labeled, add them based on + // the corresponding names from the FST. otherwise, just add them in the order they are given + mesh.blendshapes.resize(blendshapeMappings.size()); + auto values = blendshapeIndices.values(); + auto keys = blendshapeIndices.keys(); + auto names = gltfMesh.extras.targetNames; + QVector weights = gltfMesh.weights; + + for (int weightedIndex = 0; weightedIndex < values.size(); ++weightedIndex) { + float weight = 0.1f; + int indexFromMapping = weightedIndex; + int targetIndex = weightedIndex; + hfmModel.blendshapeChannelNames.push_back("target_" + QString::number(weightedIndex)); + + if (!names.isEmpty()) { + targetIndex = names.indexOf(keys[weightedIndex]); + indexFromMapping = values[weightedIndex].first; + weight = weight * values[weightedIndex].second; + hfmModel.blendshapeChannelNames[weightedIndex] = keys[weightedIndex]; + } + HFMBlendshape& blendshape = mesh.blendshapes[indexFromMapping]; + blendshape.indices = part.triangleIndices; + auto target = primitive.targets[targetIndex]; + + QVector normals; + QVector vertices; + + if (weights.size() == primitive.targets.size()) { + int targetWeight = weights[targetIndex]; + if (targetWeight != 0) { + weight = weight * targetWeight; } } - } - if (texcoords.size() == partVerticesCount * texCoordStride) { - for (int n = 0; n < texcoords.size(); n = n + 2) { - mesh.texCoords.push_back(glm::vec2(texcoords[n], texcoords[n + 1])); + if (target.values.contains((QString) "NORMAL")) { + generateTargetData(target.values.value((QString) "NORMAL"), weight, normals); } - } else { - if (meshAttributes.contains("TEXCOORD_0")) { - for (int i = 0; i < partVerticesCount; ++i) { - mesh.texCoords.push_back(glm::vec2(0.0f, 0.0f)); - } + if (target.values.contains((QString) "POSITION")) { + generateTargetData(target.values.value((QString) "POSITION"), weight, vertices); } - } - - if (texcoords2.size() == partVerticesCount * texCoord2Stride) { - for (int n = 0; n < texcoords2.size(); n = n + 2) { - mesh.texCoords1.push_back(glm::vec2(texcoords2[n], texcoords2[n + 1])); - } - } else { - if (meshAttributes.contains("TEXCOORD_1")) { - for (int i = 0; i < partVerticesCount; ++i) { - mesh.texCoords1.push_back(glm::vec2(0.0f, 0.0f)); - } - } - } - - if (colors.size() == partVerticesCount * colorStride) { - for (int n = 0; n < colors.size(); n += colorStride) { - mesh.colors.push_back(glm::vec3(colors[n], colors[n + 1], colors[n + 2])); - } - } else { - if (meshAttributes.contains("COLOR_0")) { - for (int i = 0; i < partVerticesCount; ++i) { - mesh.colors.push_back(glm::vec3(1.0f, 1.0f, 1.0f)); - } - } - } - - if (joints.size() == partVerticesCount * jointStride) { - for (int n = 0; n < joints.size(); n += jointStride) { - clusterJoints.push_back(joints[n]); - if (jointStride > 1) { - clusterJoints.push_back(joints[n + 1]); - if (jointStride > 2) { - clusterJoints.push_back(joints[n + 2]); - if (jointStride > 3) { - clusterJoints.push_back(joints[n + 3]); - } else { - clusterJoints.push_back(0); - } - } else { - clusterJoints.push_back(0); - clusterJoints.push_back(0); - } + bool isNewBlendshape = blendshape.vertices.size() < vertices.size(); + int count = 0; + for (int i : blendshape.indices) { + if (isNewBlendshape) { + blendshape.vertices.push_back(vertices[i]); + blendshape.normals.push_back(normals[i]); } else { - clusterJoints.push_back(0); - clusterJoints.push_back(0); - clusterJoints.push_back(0); - } - } - } else { - if (meshAttributes.contains("JOINTS_0")) { - for (int i = 0; i < partVerticesCount; ++i) { - for (int j = 0; j < 4; ++j) { - clusterJoints.push_back(0); - } + blendshape.vertices[count] = blendshape.vertices[count] + vertices[i]; + blendshape.normals[count] = blendshape.normals[count] + normals[i]; + ++count; } } } - - if (weights.size() == partVerticesCount * weightStride) { - for (int n = 0; n < weights.size(); n += weightStride) { - clusterWeights.push_back(weights[n]); - if (weightStride > 1) { - clusterWeights.push_back(weights[n + 1]); - if (weightStride > 2) { - clusterWeights.push_back(weights[n + 2]); - if (weightStride > 3) { - clusterWeights.push_back(weights[n + 3]); - } else { - clusterWeights.push_back(0.0f); - } - } else { - clusterWeights.push_back(0.0f); - clusterWeights.push_back(0.0f); - } - } else { - clusterWeights.push_back(0.0f); - clusterWeights.push_back(0.0f); - clusterWeights.push_back(0.0f); - } - } - } else { - if (meshAttributes.contains("WEIGHTS_0")) { - for (int i = 0; i < partVerticesCount; ++i) { - clusterWeights.push_back(1.0f); - for (int j = 1; j < 4; ++j) { - clusterWeights.push_back(0.0f); - } - } - } - } - - // Build weights (adapted from FBXSerializer.cpp) - if (hfmModel.hasSkeletonJoints) { - int prevMeshClusterIndexCount = mesh.clusterIndices.count(); - int prevMeshClusterWeightCount = mesh.clusterWeights.count(); - const int WEIGHTS_PER_VERTEX = 4; - const float ALMOST_HALF = 0.499f; - int numVertices = mesh.vertices.size() - prevMeshVerticesCount; - - // Append new cluster indices and weights for this mesh part - for (int i = 0; i < numVertices * WEIGHTS_PER_VERTEX; ++i) { - mesh.clusterIndices.push_back(mesh.clusters.size() - 1); - mesh.clusterWeights.push_back(0); - } - - for (int c = 0; c < clusterJoints.size(); ++c) { - mesh.clusterIndices[prevMeshClusterIndexCount + c] = - originalToNewNodeIndexMap[_file.skins[node.skin].joints[clusterJoints[c]]]; - } - - // normalize and compress to 16-bits - for (int i = 0; i < numVertices; ++i) { - int j = i * WEIGHTS_PER_VERTEX; - - float totalWeight = 0.0f; - for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { - totalWeight += clusterWeights[k]; - } - if (totalWeight > 0.0f) { - float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; - for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { - mesh.clusterWeights[prevMeshClusterWeightCount + k] = (uint16_t)(weightScalingFactor * clusterWeights[k] + ALMOST_HALF); - } - } else { - mesh.clusterWeights[prevMeshClusterWeightCount + j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); - } - for (int clusterIndex = 0; clusterIndex < mesh.clusters.size() - 1; ++clusterIndex) { - ShapeVertices& points = hfmModel.shapeVertices.at(clusterIndex); - glm::vec3 globalMeshScale = extractScale(globalTransforms[nodeIndex]); - const glm::mat4 meshToJoint = glm::scale(glm::mat4(), globalMeshScale) * jointInverseBindTransforms[clusterIndex]; - - const float EXPANSION_WEIGHT_THRESHOLD = 0.25f; - if (mesh.clusterWeights[j] >= EXPANSION_WEIGHT_THRESHOLD) { - // TODO: fix transformed vertices being pushed back - auto& vertex = mesh.vertices[i]; - const glm::mat4 vertexTransform = meshToJoint * (glm::translate(glm::mat4(), vertex)); - glm::vec3 transformedVertex = hfmModel.joints[clusterIndex].translation * (extractTranslation(vertexTransform)); - points.push_back(transformedVertex); - } - } - } - } - - if (primitive.defined["material"]) { - part.materialID = materialIDs[primitive.material]; - } - mesh.parts.push_back(part); - - // populate the texture coordinates if they don't exist - if (mesh.texCoords.size() == 0 && !hfmModel.hasSkeletonJoints) { - for (int i = 0; i < part.triangleIndices.size(); ++i) { mesh.texCoords.push_back(glm::vec2(0.0, 1.0)); } - } - - // Build morph targets (blend shapes) - if (!primitive.targets.isEmpty()) { - - // Build list of blendshapes from FST - typedef QPair WeightedIndex; - hifi::VariantHash blendshapeMappings = mapping.value("bs").toHash(); - QMultiHash blendshapeIndices; - - for (int i = 0;; ++i) { - hifi::ByteArray blendshapeName = FACESHIFT_BLENDSHAPES[i]; - if (blendshapeName.isEmpty()) { - break; - } - QList mappings = blendshapeMappings.values(blendshapeName); - foreach (const QVariant& mapping, mappings) { - QVariantList blendshapeMapping = mapping.toList(); - blendshapeIndices.insert(blendshapeMapping.at(0).toByteArray(), WeightedIndex(i, blendshapeMapping.at(1).toFloat())); - } - } - - // glTF morph targets may or may not have names. if they are labeled, add them based on - // the corresponding names from the FST. otherwise, just add them in the order they are given - mesh.blendshapes.resize(blendshapeMappings.size()); - auto values = blendshapeIndices.values(); - auto keys = blendshapeIndices.keys(); - auto names = _file.meshes[node.mesh].extras.targetNames; - QVector weights = _file.meshes[node.mesh].weights; - - for (int weightedIndex = 0; weightedIndex < values.size(); ++weightedIndex) { - float weight = 0.1f; - int indexFromMapping = weightedIndex; - int targetIndex = weightedIndex; - hfmModel.blendshapeChannelNames.push_back("target_" + QString::number(weightedIndex)); - - if (!names.isEmpty()) { - targetIndex = names.indexOf(keys[weightedIndex]); - indexFromMapping = values[weightedIndex].first; - weight = weight * values[weightedIndex].second; - hfmModel.blendshapeChannelNames[weightedIndex] = keys[weightedIndex]; - } - HFMBlendshape& blendshape = mesh.blendshapes[indexFromMapping]; - blendshape.indices = part.triangleIndices; - auto target = primitive.targets[targetIndex]; - - QVector normals; - QVector vertices; - - if (weights.size() == primitive.targets.size()) { - int targetWeight = weights[targetIndex]; - if (targetWeight != 0) { - weight = weight * targetWeight; - } - } - - if (target.values.contains((QString) "NORMAL")) { - generateTargetData(target.values.value((QString) "NORMAL"), weight, normals); - } - if (target.values.contains((QString) "POSITION")) { - generateTargetData(target.values.value((QString) "POSITION"), weight, vertices); - } - bool isNewBlendshape = blendshape.vertices.size() < vertices.size(); - int count = 0; - for (int i : blendshape.indices) { - if (isNewBlendshape) { - blendshape.vertices.push_back(vertices[i]); - blendshape.normals.push_back(normals[i]); - } else { - blendshape.vertices[count] = blendshape.vertices[count] + vertices[i]; - blendshape.normals[count] = blendshape.normals[count] + normals[i]; - ++count; - } - } - } - } - - foreach(const glm::vec3& vertex, mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(globalTransforms[nodeIndex] * glm::vec4(vertex, 1.0f)); - mesh.meshExtents.addPoint(transformedVertex); - hfmModel.meshExtents.addPoint(transformedVertex); - } } - // Add epsilon to mesh extents to compensate for planar meshes - mesh.meshExtents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); - mesh.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); - hfmModel.meshExtents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); - hfmModel.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); - - mesh.meshIndex = (int)hfmModel.meshes.size(); +#if 0 + for(const glm::vec3& vertex : mesh.vertices) { + glm::vec3 transformedVertex = glm::vec3(globalTransforms[nodeIndex] * glm::vec4(vertex, 1.0f)); + mesh.meshExtents.addPoint(transformedVertex); + hfmModel.meshExtents.addPoint(transformedVertex); + } +#endif + } + + // Add epsilon to mesh extents to compensate for planar meshes + mesh.meshExtents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); + mesh.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); + hfmModel.meshExtents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); + hfmModel.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); + + } + + for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { + const auto& node = _file.nodes[nodeIndex]; + if (-1 == node.mesh) { + continue; + } + + const auto& mesh = _file.meshes[node.mesh]; + int primCount = (int)mesh.primitives.size(); + for (int primIndex = 0; primIndex < primCount; ++primIndex) { + const auto& primitive = mesh.primitives[primIndex]; + hfmModel.shapes.push_back({}); + auto& hfmShape = hfmModel.shapes.back(); + hfmShape.transform = nodeIndex; + hfmShape.mesh = node.mesh; + hfmShape.meshPart = primIndex; + hfmShape.material = primitive.material; } - ++nodecount; } return true; @@ -1637,9 +1673,8 @@ std::unique_ptr GLTFSerializer::getFactory() const { } HFMModel::Pointer GLTFSerializer::read(const hifi::ByteArray& data, const hifi::VariantHash& mapping, const hifi::URL& url) { - _url = url; - + // Normalize url for local files hifi::URL normalizeUrl = DependencyManager::get()->normalizeURL(_url); if (normalizeUrl.scheme().isEmpty() || (normalizeUrl.scheme() == "file")) { @@ -1649,6 +1684,9 @@ HFMModel::Pointer GLTFSerializer::read(const hifi::ByteArray& data, const hifi:: if (parseGLTF(data)) { //_file.dump(); + _file.sortNodes(); + _file.populateMaterialNames(); + _file.normalizeNodeTransforms(); auto hfmModelPtr = std::make_shared(); HFMModel& hfmModel = *hfmModelPtr; buildGeometry(hfmModel, mapping, _url); @@ -1672,7 +1710,7 @@ bool GLTFSerializer::readBinary(const QString& url, hifi::ByteArray& outdata) { hifi::URL binaryUrl = _url.resolved(url); std::tie(success, outdata) = requestData(binaryUrl); } - + return success; } @@ -1685,8 +1723,8 @@ bool GLTFSerializer::doesResourceExist(const QString& url) { } std::tuple GLTFSerializer::requestData(hifi::URL& url) { - auto request = DependencyManager::get()->createResourceRequest( - nullptr, url, true, -1, "GLTFSerializer::requestData"); + auto request = + DependencyManager::get()->createResourceRequest(nullptr, url, true, -1, "GLTFSerializer::requestData"); if (!request) { return std::make_tuple(false, hifi::ByteArray()); @@ -1705,19 +1743,16 @@ std::tuple GLTFSerializer::requestData(hifi::URL& url) { } hifi::ByteArray GLTFSerializer::requestEmbeddedData(const QString& url) { - QString binaryUrl = url.split(",")[1]; + QString binaryUrl = url.split(",")[1]; return binaryUrl.isEmpty() ? hifi::ByteArray() : QByteArray::fromBase64(binaryUrl.toUtf8()); } - QNetworkReply* GLTFSerializer::request(hifi::URL& url, bool isTest) { if (!qApp) { return nullptr; } bool aboutToQuit{ false }; - auto connection = QObject::connect(qApp, &QCoreApplication::aboutToQuit, [&] { - aboutToQuit = true; - }); + auto connection = QObject::connect(qApp, &QCoreApplication::aboutToQuit, [&] { aboutToQuit = true; }); QNetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance(); QNetworkRequest netRequest(url); netRequest.setAttribute(QNetworkRequest::FollowRedirectsAttribute, true); @@ -1726,18 +1761,18 @@ QNetworkReply* GLTFSerializer::request(hifi::URL& url, bool isTest) { netReply->deleteLater(); return nullptr; } - QEventLoop loop; // Create an event loop that will quit when we get the finished signal + QEventLoop loop; // Create an event loop that will quit when we get the finished signal QObject::connect(netReply, SIGNAL(finished()), &loop, SLOT(quit())); - loop.exec(); // Nothing is going to happen on this whole run thread until we get this + loop.exec(); // Nothing is going to happen on this whole run thread until we get this QObject::disconnect(connection); - return netReply; // trying to sync later on. + return netReply; // trying to sync later on. } HFMTexture GLTFSerializer::getHFMTexture(const GLTFTexture& texture) { HFMTexture fbxtex = HFMTexture(); fbxtex.texcoordSet = 0; - + if (texture.defined["source"]) { QString url = _file.images[texture.source].uri; @@ -1745,10 +1780,10 @@ HFMTexture GLTFSerializer::getHFMTexture(const GLTFTexture& texture) { hifi::URL textureUrl = _url.resolved(url); fbxtex.name = fname; fbxtex.filename = textureUrl.toEncoded(); - + if (_url.toString().endsWith("glb") && !_glbBinary.isEmpty()) { int bufferView = _file.images[texture.source].bufferView; - + GLTFBufferView& imagesBufferview = _file.bufferviews[bufferView]; int offset = imagesBufferview.byteOffset; int length = imagesBufferview.byteLength; @@ -1758,19 +1793,15 @@ HFMTexture GLTFSerializer::getHFMTexture(const GLTFTexture& texture) { } if (url.contains("data:image/jpeg;base64,") || url.contains("data:image/png;base64,")) { - fbxtex.content = requestEmbeddedData(url); + fbxtex.content = requestEmbeddedData(url); } } return fbxtex; } void GLTFSerializer::setHFMMaterial(HFMMaterial& fbxmat, const GLTFMaterial& material) { - - if (material.defined["emissiveFactor"] && material.emissiveFactor.size() == 3) { - glm::vec3 emissive = glm::vec3(material.emissiveFactor[0], - material.emissiveFactor[1], - material.emissiveFactor[2]); + glm::vec3 emissive = glm::vec3(material.emissiveFactor[0], material.emissiveFactor[1], material.emissiveFactor[2]); fbxmat._material->setEmissive(emissive); } @@ -1778,12 +1809,12 @@ void GLTFSerializer::setHFMMaterial(HFMMaterial& fbxmat, const GLTFMaterial& mat fbxmat.emissiveTexture = getHFMTexture(_file.textures[material.emissiveTexture]); fbxmat.useEmissiveMap = true; } - + if (material.defined["normalTexture"]) { fbxmat.normalTexture = getHFMTexture(_file.textures[material.normalTexture]); fbxmat.useNormalMap = true; } - + if (material.defined["occlusionTexture"]) { fbxmat.occlusionTexture = getHFMTexture(_file.textures[material.occlusionTexture]); fbxmat.useOcclusionMap = true; @@ -1791,7 +1822,7 @@ void GLTFSerializer::setHFMMaterial(HFMMaterial& fbxmat, const GLTFMaterial& mat if (material.defined["pbrMetallicRoughness"]) { fbxmat.isPBSMaterial = true; - + if (material.pbrMetallicRoughness.defined["metallicFactor"]) { fbxmat.metallic = material.pbrMetallicRoughness.metallicFactor; } @@ -1811,23 +1842,20 @@ void GLTFSerializer::setHFMMaterial(HFMMaterial& fbxmat, const GLTFMaterial& mat if (material.pbrMetallicRoughness.defined["roughnessFactor"]) { fbxmat._material->setRoughness(material.pbrMetallicRoughness.roughnessFactor); } - if (material.pbrMetallicRoughness.defined["baseColorFactor"] && + if (material.pbrMetallicRoughness.defined["baseColorFactor"] && material.pbrMetallicRoughness.baseColorFactor.size() == 4) { - glm::vec3 dcolor = glm::vec3(material.pbrMetallicRoughness.baseColorFactor[0], - material.pbrMetallicRoughness.baseColorFactor[1], - material.pbrMetallicRoughness.baseColorFactor[2]); + glm::vec3 dcolor = + glm::vec3(material.pbrMetallicRoughness.baseColorFactor[0], material.pbrMetallicRoughness.baseColorFactor[1], + material.pbrMetallicRoughness.baseColorFactor[2]); fbxmat.diffuseColor = dcolor; fbxmat._material->setAlbedo(dcolor); fbxmat._material->setOpacity(material.pbrMetallicRoughness.baseColorFactor[3]); - } + } } - } -template -bool GLTFSerializer::readArray(const hifi::ByteArray& bin, int byteOffset, int count, - QVector& outarray, int accessorType) { - +template +bool GLTFSerializer::readArray(const hifi::ByteArray& bin, int byteOffset, int count, QVector& outarray, int accessorType) { QDataStream blobstream(bin); blobstream.setByteOrder(QDataStream::LittleEndian); blobstream.setVersion(QDataStream::Qt_5_9); @@ -1836,31 +1864,31 @@ bool GLTFSerializer::readArray(const hifi::ByteArray& bin, int byteOffset, int c int bufferCount = 0; switch (accessorType) { - case GLTFAccessorType::SCALAR: - bufferCount = 1; - break; - case GLTFAccessorType::VEC2: - bufferCount = 2; - break; - case GLTFAccessorType::VEC3: - bufferCount = 3; - break; - case GLTFAccessorType::VEC4: - bufferCount = 4; - break; - case GLTFAccessorType::MAT2: - bufferCount = 4; - break; - case GLTFAccessorType::MAT3: - bufferCount = 9; - break; - case GLTFAccessorType::MAT4: - bufferCount = 16; - break; - default: - qWarning(modelformat) << "Unknown accessorType: " << accessorType; - blobstream.unsetDevice(); - return false; + case GLTFAccessorType::SCALAR: + bufferCount = 1; + break; + case GLTFAccessorType::VEC2: + bufferCount = 2; + break; + case GLTFAccessorType::VEC3: + bufferCount = 3; + break; + case GLTFAccessorType::VEC4: + bufferCount = 4; + break; + case GLTFAccessorType::MAT2: + bufferCount = 4; + break; + case GLTFAccessorType::MAT3: + bufferCount = 9; + break; + case GLTFAccessorType::MAT4: + bufferCount = 16; + break; + default: + qWarning(modelformat) << "Unknown accessorType: " << accessorType; + blobstream.unsetDevice(); + return false; } for (int i = 0; i < count; ++i) { for (int j = 0; j < bufferCount; ++j) { @@ -1878,31 +1906,137 @@ bool GLTFSerializer::readArray(const hifi::ByteArray& bin, int byteOffset, int c blobstream.unsetDevice(); return true; } -template -bool GLTFSerializer::addArrayOfType(const hifi::ByteArray& bin, int byteOffset, int count, - QVector& outarray, int accessorType, int componentType) { - +template +bool GLTFSerializer::addArrayOfType(const hifi::ByteArray& bin, + int byteOffset, + int count, + QVector& outarray, + int accessorType, + int componentType) { switch (componentType) { - case GLTFAccessorComponentType::BYTE: {} - case GLTFAccessorComponentType::UNSIGNED_BYTE: { - return readArray(bin, byteOffset, count, outarray, accessorType); - } - case GLTFAccessorComponentType::SHORT: { - return readArray(bin, byteOffset, count, outarray, accessorType); - } - case GLTFAccessorComponentType::UNSIGNED_INT: { - return readArray(bin, byteOffset, count, outarray, accessorType); - } - case GLTFAccessorComponentType::UNSIGNED_SHORT: { - return readArray(bin, byteOffset, count, outarray, accessorType); - } - case GLTFAccessorComponentType::FLOAT: { - return readArray(bin, byteOffset, count, outarray, accessorType); - } + case GLTFAccessorComponentType::BYTE: { + } + case GLTFAccessorComponentType::UNSIGNED_BYTE: { + return readArray(bin, byteOffset, count, outarray, accessorType); + } + case GLTFAccessorComponentType::SHORT: { + return readArray(bin, byteOffset, count, outarray, accessorType); + } + case GLTFAccessorComponentType::UNSIGNED_INT: { + return readArray(bin, byteOffset, count, outarray, accessorType); + } + case GLTFAccessorComponentType::UNSIGNED_SHORT: { + return readArray(bin, byteOffset, count, outarray, accessorType); + } + case GLTFAccessorComponentType::FLOAT: { + return readArray(bin, byteOffset, count, outarray, accessorType); + } } return false; } + +template +bool GLTFSerializer::addArrayFromAttribute(GLTFVertexAttribute::Value vertexAttribute, GLTFAccessor& accessor, QVector& outarray) { + switch (vertexAttribute) { + case GLTFVertexAttribute::POSITION: + if (accessor.type != GLTFAccessorType::VEC3) { + qWarning(modelformat) << "Invalid accessor type on glTF POSITION data for model " << _url; + return false; + } + + if (!addArrayFromAccessor(accessor, outarray)) { + qWarning(modelformat) << "There was a problem reading glTF POSITION data for model " << _url; + return false; + } + break; + + case GLTFVertexAttribute::NORMAL: + if (accessor.type != GLTFAccessorType::VEC3) { + qWarning(modelformat) << "Invalid accessor type on glTF NORMAL data for model " << _url; + return false; + } + + if (!addArrayFromAccessor(accessor, outarray)) { + qWarning(modelformat) << "There was a problem reading glTF NORMAL data for model " << _url; + return false; + } + break; + + case GLTFVertexAttribute::TANGENT: + if (accessor.type != GLTFAccessorType::VEC4 && accessor.type != GLTFAccessorType::VEC3) { + qWarning(modelformat) << "Invalid accessor type on glTF TANGENT data for model " << _url; + return false; + } + break; + + if (!addArrayFromAccessor(accessor, outarray)) { + qWarning(modelformat) << "There was a problem reading glTF TANGENT data for model " << _url; + return false; + } + break; + + case GLTFVertexAttribute::TEXCOORD_0: + if (accessor.type != GLTFAccessorType::VEC2) { + qWarning(modelformat) << "Invalid accessor type on glTF TEXCOORD_0 data for model " << _url; + return false; + } + + if (!addArrayFromAccessor(accessor, outarray)) { + qWarning(modelformat) << "There was a problem reading glTF TEXCOORD_0 data for model " << _url; + return false; + } + break; + + case GLTFVertexAttribute::TEXCOORD_1: + if (accessor.type != GLTFAccessorType::VEC2) { + qWarning(modelformat) << "Invalid accessor type on glTF TEXCOORD_1 data for model " << _url; + return false; + } + if (!addArrayFromAccessor(accessor, outarray)) { + qWarning(modelformat) << "There was a problem reading glTF TEXCOORD_1 data for model " << _url; + return false; + } + break; + + case GLTFVertexAttribute::COLOR_0: + if (accessor.type != GLTFAccessorType::VEC4 && accessor.type != GLTFAccessorType::VEC3) { + qWarning(modelformat) << "Invalid accessor type on glTF COLOR_0 data for model " << _url; + return false; + } + + if (!addArrayFromAccessor(accessor, outarray)) { + qWarning(modelformat) << "There was a problem reading glTF COLOR_0 data for model " << _url; + return false; + } + break; + + case GLTFVertexAttribute::JOINTS_0: + if (accessor.type < GLTFAccessorType::SCALAR || accessor.type > GLTFAccessorType::VEC4) { + qWarning(modelformat) << "Invalid accessor type on glTF JOINTS_0 data for model " << _url; + return false; + } + + if (!addArrayFromAccessor(accessor, outarray)) { + qWarning(modelformat) << "There was a problem reading glTF JOINTS_0 data for model " << _url; + return false; + } + break; + + case GLTFVertexAttribute::WEIGHTS_0: + if (accessor.type < GLTFAccessorType::SCALAR || accessor.type > GLTFAccessorType::VEC4) { + qWarning(modelformat) << "Invalid accessor type on glTF WEIGHTS_0 data for model " << _url; + return false; + } + + if (!addArrayFromAccessor(accessor, outarray)) { + qWarning(modelformat) << "There was a problem reading glTF WEIGHTS_0 data for model " << _url; + } + } + + return true; +} + template bool GLTFSerializer::addArrayFromAccessor(GLTFAccessor& accessor, QVector& outarray) { bool success = true; @@ -1948,7 +2082,7 @@ bool GLTFSerializer::addArrayFromAccessor(GLTFAccessor& accessor, QVector& ou if (success) { for (int i = 0; i < accessor.sparse.count; ++i) { - if ((i * 3) + 2 < out_sparse_values_array.size()) { + if ((i * 3) + 2 < out_sparse_values_array.size()) { if ((out_sparse_indices_array[i] * 3) + 2 < outarray.length()) { for (int j = 0; j < 3; ++j) { outarray[(out_sparse_indices_array[i] * 3) + j] = out_sparse_values_array[(i * 3) + j]; @@ -1970,14 +2104,16 @@ bool GLTFSerializer::addArrayFromAccessor(GLTFAccessor& accessor, QVector& ou return success; } -void GLTFSerializer::retriangulate(const QVector& inIndices, const QVector& in_vertices, - const QVector& in_normals, QVector& outIndices, - QVector& out_vertices, QVector& out_normals) { +void GLTFSerializer::retriangulate(const QVector& inIndices, + const QVector& in_vertices, + const QVector& in_normals, + QVector& outIndices, + QVector& out_vertices, + QVector& out_normals) { for (int i = 0; i < inIndices.size(); i = i + 3) { - int idx1 = inIndices[i]; - int idx2 = inIndices[i+1]; - int idx3 = inIndices[i+2]; + int idx2 = inIndices[i + 1]; + int idx3 = inIndices[i + 2]; out_vertices.push_back(in_vertices[idx1]); out_vertices.push_back(in_vertices[idx2]); @@ -1988,8 +2124,8 @@ void GLTFSerializer::retriangulate(const QVector& inIndices, const QVector< out_normals.push_back(in_normals[idx3]); outIndices.push_back(i); - outIndices.push_back(i+1); - outIndices.push_back(i+2); + outIndices.push_back(i + 1); + outIndices.push_back(i + 2); } } @@ -1998,7 +2134,7 @@ void GLTFSerializer::glTFDebugDump() { for (GLTFNode node : _file.nodes) { if (node.defined["mesh"]) { qCDebug(modelformat) << "\n"; - qCDebug(modelformat) << " node_transforms" << node.transforms; + qCDebug(modelformat) << " node_transform" << node.transform; qCDebug(modelformat) << "\n"; } } @@ -2051,12 +2187,12 @@ void GLTFSerializer::hfmDebugDump(const HFMModel& hfmModel) { qCDebug(modelformat) << " colors.count() =" << mesh.colors.count(); qCDebug(modelformat) << " texCoords.count() =" << mesh.texCoords.count(); qCDebug(modelformat) << " texCoords1.count() =" << mesh.texCoords1.count(); - qCDebug(modelformat) << " clusterIndices.count() =" << mesh.clusterIndices.count(); - qCDebug(modelformat) << " clusterWeights.count() =" << mesh.clusterWeights.count(); - qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform; + //qCDebug(modelformat) << " clusterIndices.count() =" << mesh.clusterIndices.count(); + //qCDebug(modelformat) << " clusterWeights.count() =" << mesh.clusterWeights.count(); + //qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform; qCDebug(modelformat) << " parts.count() =" << mesh.parts.size(); qCDebug(modelformat) << "---------------- Meshes (blendshapes)--------"; - foreach(HFMBlendshape bshape, mesh.blendshapes) { + for (HFMBlendshape bshape : mesh.blendshapes) { qCDebug(modelformat) << "\n"; qCDebug(modelformat) << " bshape.indices.count() =" << bshape.indices.count(); qCDebug(modelformat) << " bshape.vertices.count() =" << bshape.vertices.count(); @@ -2064,37 +2200,37 @@ void GLTFSerializer::hfmDebugDump(const HFMModel& hfmModel) { qCDebug(modelformat) << "\n"; } qCDebug(modelformat) << "---------------- Meshes (meshparts)--------"; - foreach(HFMMeshPart meshPart, mesh.parts) { + for (HFMMeshPart meshPart : mesh.parts) { qCDebug(modelformat) << "\n"; qCDebug(modelformat) << " quadIndices.count() =" << meshPart.quadIndices.count(); qCDebug(modelformat) << " triangleIndices.count() =" << meshPart.triangleIndices.count(); - qCDebug(modelformat) << " materialID =" << meshPart.materialID; + //qCDebug(modelformat) << " materialID =" << meshPart.materialID; qCDebug(modelformat) << "\n"; - } qCDebug(modelformat) << "---------------- Meshes (clusters)--------"; - qCDebug(modelformat) << " clusters.count() =" << mesh.clusters.count(); - foreach(HFMCluster cluster, mesh.clusters) { - qCDebug(modelformat) << "\n"; - qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex; - qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix; - qCDebug(modelformat) << "\n"; - } - qCDebug(modelformat) << "\n"; + //qCDebug(modelformat) << " clusters.count() =" << mesh.clusters.count(); + //for(HFMCluster cluster : mesh.clusters) { + // qCDebug(modelformat) << "\n"; + // qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex; + // qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix; + // qCDebug(modelformat) << "\n"; + //} + //qCDebug(modelformat) << "\n"; } qCDebug(modelformat) << "---------------- AnimationFrames ----------------"; - foreach(HFMAnimationFrame anim, hfmModel.animationFrames) { + for (HFMAnimationFrame anim : hfmModel.animationFrames) { qCDebug(modelformat) << " anim.translations = " << anim.translations; qCDebug(modelformat) << " anim.rotations = " << anim.rotations; } QList mitomona_keys = hfmModel.meshIndicesToModelNames.keys(); - foreach(int key, mitomona_keys) { - qCDebug(modelformat) << " meshIndicesToModelNames key =" << key << " val =" << hfmModel.meshIndicesToModelNames[key]; + for (int key : mitomona_keys) { + qCDebug(modelformat) << " meshIndicesToModelNames key =" << key + << " val =" << hfmModel.meshIndicesToModelNames[key]; } qCDebug(modelformat) << "---------------- Materials ----------------"; - foreach(HFMMaterial mat, hfmModel.materials) { + for (HFMMaterial mat : hfmModel.materials) { qCDebug(modelformat) << "\n"; qCDebug(modelformat) << " mat.materialID =" << mat.materialID; qCDebug(modelformat) << " diffuseColor =" << mat.diffuseColor; diff --git a/libraries/fbx/src/GLTFSerializer.h b/libraries/fbx/src/GLTFSerializer.h index 4d72805863..78dc9b9a37 100755 --- a/libraries/fbx/src/GLTFSerializer.h +++ b/libraries/fbx/src/GLTFSerializer.h @@ -38,14 +38,14 @@ struct GLTFAsset { struct GLTFNode { QString name; - int camera; - int mesh; + int camera{ -1 }; + int mesh{ -1 }; QVector children; QVector translation; QVector rotation; QVector scale; QVector matrix; - QVector transforms; + glm::mat4 transform; int skin; QVector skeletons; QString jointName; @@ -85,6 +85,8 @@ struct GLTFNode { qCDebug(modelformat) << "skeletons: " << skeletons; } } + + void normalizeTransform(); }; // Meshes @@ -460,15 +462,56 @@ struct GLTFMaterial { // Accesors namespace GLTFAccessorType { - enum Values { - SCALAR = 0, - VEC2, - VEC3, - VEC4, - MAT2, - MAT3, - MAT4 + enum Value { + SCALAR = 1, + VEC2 = 2, + VEC3 = 3, + VEC4 = 4, + MAT2 = 5, + MAT3 = 9, + MAT4 = 16 }; + + inline int count(Value value) { + if (value == MAT2) { + return 4; + } + return (int)value; + } +} + +namespace GLTFVertexAttribute { + enum Value { + UNKNOWN = -1, + POSITION = 0, + NORMAL, + TANGENT, + TEXCOORD_0, + TEXCOORD_1, + COLOR_0, + JOINTS_0, + WEIGHTS_0, + }; + inline Value fromString(const QString& key) { + if (key == "POSITION") { + return POSITION; + } else if (key == "NORMAL") { + return NORMAL; + } else if (key == "TANGENT") { + return TANGENT; + } else if (key == "TEXCOORD_0") { + return TEXCOORD_0; + } else if (key == "TEXCOORD_1") { + return TEXCOORD_1; + } else if (key == "COLOR_0") { + return COLOR_0; + } else if (key == "JOINTS_0") { + return JOINTS_0; + } else if (key == "WEIGHTS_0") { + return WEIGHTS_0; + } + return UNKNOWN; + } } namespace GLTFAccessorComponentType { enum Values { @@ -760,6 +803,13 @@ struct GLTFFile { foreach(auto tex, textures) tex.dump(); } } + + + void populateMaterialNames(); + void sortNodes(); + void normalizeNodeTransforms(); +private: + void reorderNodes(const std::unordered_map& reorderMap); }; class GLTFSerializer : public QObject, public HFMSerializer { @@ -774,7 +824,7 @@ private: hifi::URL _url; hifi::ByteArray _glbBinary; - glm::mat4 getModelTransform(const GLTFNode& node); + const glm::mat4& getModelTransform(const GLTFNode& node); void getSkinInverseBindMatrices(std::vector>& inverseBindMatrixValues); void generateTargetData(int index, float weight, QVector& returnVector); @@ -843,6 +893,9 @@ private: template bool addArrayFromAccessor(GLTFAccessor& accessor, QVector& outarray); + template + bool addArrayFromAttribute(GLTFVertexAttribute::Value vertexAttribute, GLTFAccessor& accessor, QVector& outarray); + void retriangulate(const QVector& in_indices, const QVector& in_vertices, const QVector& in_normals, QVector& out_indices, QVector& out_vertices, QVector& out_normals); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 8e0944db43..497bb60568 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -122,8 +122,7 @@ public: /// A single binding to a joint. class Cluster { public: - - int jointIndex; + uint32_t jointIndex; glm::mat4 inverseBindMatrix; Transform inverseBindTransform; }; @@ -289,7 +288,8 @@ public: class TransformNode { public: - uint32_t parent { 0 }; + static const uint32_t INVALID_PARENT_INDEX{ (uint32_t)-1 }; + uint32_t parent { INVALID_PARENT_INDEX }; Transform transform; }; diff --git a/tests-manual/fbx/CMakeLists.txt b/tests-manual/fbx/CMakeLists.txt new file mode 100644 index 0000000000..7221f081fe --- /dev/null +++ b/tests-manual/fbx/CMakeLists.txt @@ -0,0 +1,11 @@ +set(TARGET_NAME fbx-test) +# This is not a testcase -- just set it up as a regular hifi project +setup_hifi_project(Quick Gui) +setup_memory_debugger() +set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/") + +file(GLOB_RECURSE GLB_TEST_FILES "c:/Users/bdavi/git/glTF-Sample-Models/2.0/*.glb") +list(JOIN GLB_TEST_FILES "|" GLB_TEST_FILES) +target_compile_definitions(${TARGET_NAME} PRIVATE -DGLB_TEST_FILES="${GLB_TEST_FILES}") +link_hifi_libraries(shared graphics networking image gpu hfm fbx) +package_libraries_for_deployment() diff --git a/tests-manual/fbx/src/main.cpp b/tests-manual/fbx/src/main.cpp new file mode 100644 index 0000000000..66c3a4f30e --- /dev/null +++ b/tests-manual/fbx/src/main.cpp @@ -0,0 +1,77 @@ +// +// Created by Bradley Austin Davis on 2018/01/11 +// Copyright 2014 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include +#include + +#include + +#include +#include +#include +#include + +#include + +// Currently only used by testing code +inline std::list splitString(const std::string& source, const char delimiter = ' ') { + std::list result; + size_t start = 0, next; + + while (std::string::npos != (next = source.find(delimiter, start))) { + std::string sub = source.substr(start, next - start); + if (!sub.empty()) { + result.push_back(sub); + } + start = next + 1; + } + if (source.size() > start) { + result.push_back(source.substr(start)); + } + return result; +} + +std::list getGlbTestFiles() { + return splitString(GLB_TEST_FILES, '|'); +} + +QtMessageHandler originalHandler; + +void messageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message) { +#if defined(Q_OS_WIN) + OutputDebugStringA(message.toStdString().c_str()); + OutputDebugStringA("\n"); +#endif + originalHandler(type, context, message); +} + +QByteArray readFileBytes(const std::string& filename) { + QFile file(filename.c_str()); + file.open(QFile::ReadOnly); + QByteArray result = file.readAll(); + file.close(); + return result; +} + +void processFile(const std::string& filename) { + qDebug() << filename.c_str(); + GLTFSerializer().read(readFileBytes(filename), {}, QUrl::fromLocalFile(filename.c_str())); +} + +int main(int argc, char** argv) { + QCoreApplication app{ argc, argv }; + originalHandler = qInstallMessageHandler(messageHandler); + + DependencyManager::set(false); + + //processFile("c:/Users/bdavi/git/glTF-Sample-Models/2.0/Box/glTF-Binary/Box.glb"); + + for (const auto& testFile : getGlbTestFiles()) { + processFile(testFile); + } +} From 85b22be68b0a7151e02e8cd63d7dd6383e31d296 Mon Sep 17 00:00:00 2001 From: Brad Davis Date: Mon, 23 Sep 2019 16:42:34 -0700 Subject: [PATCH 018/121] Fixing warnings --- libraries/fbx/src/FBXSerializer.cpp | 4 ++-- libraries/fbx/src/GLTFSerializer.cpp | 26 +++++++++++--------------- libraries/hfm/src/hfm/HFM.h | 3 ++- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index f3c620c929..4ce12d30e4 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1480,7 +1480,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // of skinning information in FBX QString jointID = _connectionChildMap.value(clusterID); hfmCluster.jointIndex = modelIDs.indexOf(jointID); - if (hfmCluster.jointIndex == -1) { + if (hfmCluster.jointIndex == HFMCluster::INVALID_JOINT_INDEX) { qCDebug(modelformat) << "Joint not in model list: " << jointID; hfmCluster.jointIndex = 0; } @@ -1514,7 +1514,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const { HFMCluster cluster; cluster.jointIndex = modelIDs.indexOf(modelID); - if (cluster.jointIndex == -1) { + if (cluster.jointIndex == HFMCluster::INVALID_JOINT_INDEX) { qCDebug(modelformat) << "Model not in model list: " << modelID; cluster.jointIndex = 0; } diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index fe63159543..29b65e98c0 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1002,22 +1002,8 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } - // Build transforms - for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { - auto& gltfNode = _file.nodes[nodeIndex]; - //gltfNode.transforms.push_back(getModelTransform(gltfNode)); - gltf::ParentIndexMap::const_iterator parentItr; - int curNode = nodeIndex; - while (parentsEnd != (parentItr = parentIndices.find(curNode))) { - curNode = parentItr->second; - auto& ancestorNode = _file.nodes[curNode]; - //gltfNode.transforms.push_back(getModelTransform(ancestorNode)); - } - } - // Build joints HFMJoint joint; - joint.distanceToParent = 0; hfmModel.jointIndices["x"] = numNodes; QVector globalTransforms; globalTransforms.resize(numNodes); @@ -1104,7 +1090,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& hfmModel.materials.emplace_back(); HFMMaterial& hfmMaterial = hfmModel.materials.back(); hfmMaterial._material = std::make_shared(); - hfmMaterial.materialID = hfmMaterial.name; + hfmMaterial.materialID = matid; setHFMMaterial(hfmMaterial, material); } @@ -1229,6 +1215,10 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& success = addArrayFromAttribute(vertexAttribute, accessor, colors); weightStride = GLTFAccessorType::count((GLTFAccessorType::Value)accessor.type); break; + + default: + success = false; + break; } if (!success) { continue; @@ -2031,9 +2021,15 @@ bool GLTFSerializer::addArrayFromAttribute(GLTFVertexAttribute::Value vertexAttr if (!addArrayFromAccessor(accessor, outarray)) { qWarning(modelformat) << "There was a problem reading glTF WEIGHTS_0 data for model " << _url; + return false; } + + default: + qWarning(modelformat) << "Unexpected attribute type" << _url; + return false; } + return true; } diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 497bb60568..39c123e9df 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -122,7 +122,8 @@ public: /// A single binding to a joint. class Cluster { public: - uint32_t jointIndex; + static const uint32_t INVALID_JOINT_INDEX{ (uint32_t)-1 }; + uint32_t jointIndex{ INVALID_JOINT_INDEX }; glm::mat4 inverseBindMatrix; Transform inverseBindTransform; }; From 725d4ee6432b883e95c444bea30153f2e800e8b6 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 24 Sep 2019 10:57:16 -0700 Subject: [PATCH 019/121] Fix build warnings --- libraries/fbx/src/FBXSerializer.cpp | 4 ++-- libraries/hfm/src/hfm/HFM.h | 3 ++- .../model-baker/src/model-baker/BuildGraphicsMeshTask.cpp | 1 - 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index f3c620c929..e6b4a62b51 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1480,7 +1480,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // of skinning information in FBX QString jointID = _connectionChildMap.value(clusterID); hfmCluster.jointIndex = modelIDs.indexOf(jointID); - if (hfmCluster.jointIndex == -1) { + if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { qCDebug(modelformat) << "Joint not in model list: " << jointID; hfmCluster.jointIndex = 0; } @@ -1514,7 +1514,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const { HFMCluster cluster; cluster.jointIndex = modelIDs.indexOf(modelID); - if (cluster.jointIndex == -1) { + if (cluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { qCDebug(modelformat) << "Model not in model list: " << modelID; cluster.jointIndex = 0; } diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index d13cf3e2d0..51c5f929d8 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -124,7 +124,8 @@ public: /// A single binding to a joint. class Cluster { public: - uint32_t jointIndex; + static const uint32_t INVALID_JOINT_INDEX { (uint32_t)-1 }; + uint32_t jointIndex { INVALID_JOINT_INDEX }; glm::mat4 inverseBindMatrix; Transform inverseBindTransform; }; diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index a9a544c34a..ea05b81d1f 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -45,7 +45,6 @@ ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const hfm::Dy weightAccumulators.resize(numClusterIndices, 0.0f); for (uint16_t i = 0; i < (uint16_t)deformers.size(); ++i) { const hfm::Deformer& deformer = *deformers[i]; - const hfm::Cluster& cluster = dynamicTransform->clusters[i]; if (deformer.indices.size() != deformer.weights.size()) { reweightedDeformers.trimmedToMatch = true; From c4a1fe9a006ef3a17ea5f3a8842991a53cfa56a8 Mon Sep 17 00:00:00 2001 From: Brad Davis Date: Tue, 24 Sep 2019 11:05:33 -0700 Subject: [PATCH 020/121] Warnings --- libraries/fbx/src/GLTFSerializer.cpp | 18 +++++++++++------- libraries/hfm/src/hfm/HFM.cpp | 2 +- libraries/hfm/src/hfm/HFM.h | 2 +- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 29b65e98c0..774c63e8dd 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1094,20 +1094,25 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& setHFMMaterial(hfmMaterial, material); } + + for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { + } + + int meshCount = _file.meshes.size(); hfmModel.meshes.resize(meshCount); hfmModel.meshExtents.reset(); - - hfmModel.meshes.resize(meshCount); for (int meshIndex = 0; meshIndex < meshCount; ++meshIndex) { const auto& gltfMesh = _file.meshes[meshIndex]; auto& mesh = hfmModel.meshes[meshIndex]; mesh.meshIndex = meshIndex; -#if 0 + if (!hfmModel.hasSkeletonJoints) { HFMCluster cluster; +#if 0 cluster.jointIndex = nodeIndex; +#endif cluster.inverseBindMatrix = glm::mat4(); cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); mesh.clusters.append(cluster); @@ -1125,7 +1130,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& root.inverseBindMatrix = jointInverseBindTransforms[root.jointIndex]; root.inverseBindTransform = Transform(root.inverseBindMatrix); mesh.clusters.append(root); -#endif QSet meshAttributes; for(const auto &primitive : gltfMesh.primitives) { @@ -1466,7 +1470,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } } -#if 0 // Build weights (adapted from FBXSerializer.cpp) if (hfmModel.hasSkeletonJoints) { int prevMeshClusterIndexCount = mesh.clusterIndices.count(); @@ -1481,10 +1484,12 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mesh.clusterWeights.push_back(0); } +#if 0 for (int c = 0; c < clusterJoints.size(); ++c) { mesh.clusterIndices[prevMeshClusterIndexCount + c] = originalToNewNodeIndexMap[_file.skins[node.skin].joints[clusterJoints[c]]]; } +#endif // normalize and compress to 16-bits for (int i = 0; i < numVertices; ++i) { @@ -1518,7 +1523,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } } } -#endif #if 0 if (primitive.defined["material"]) { @@ -1622,9 +1626,9 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mesh.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); hfmModel.meshExtents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); hfmModel.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); - } + for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { const auto& node = _file.nodes[nodeIndex]; if (-1 == node.mesh) { diff --git a/libraries/hfm/src/hfm/HFM.cpp b/libraries/hfm/src/hfm/HFM.cpp index ae68c15045..3a61ebb6b9 100644 --- a/libraries/hfm/src/hfm/HFM.cpp +++ b/libraries/hfm/src/hfm/HFM.cpp @@ -175,7 +175,7 @@ void HFMModel::computeKdops() { // NOTE: points are in joint-frame ShapeVertices& points = shapeVertices.at(i); - glm::quat rotOffset = jointRotationOffsets.contains(i) ? glm::inverse(jointRotationOffsets[i]) : quat(); + glm::quat rotOffset = jointRotationOffsets.contains((int)i) ? glm::inverse(jointRotationOffsets[i]) : quat(); if (points.size() > 0) { // compute average point glm::vec3 avgPoint = glm::vec3(0.0f); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 39c123e9df..1a3a5ecdb2 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -124,7 +124,7 @@ class Cluster { public: static const uint32_t INVALID_JOINT_INDEX{ (uint32_t)-1 }; uint32_t jointIndex{ INVALID_JOINT_INDEX }; - glm::mat4 inverseBindMatrix; + glm::mat4 inverseBindMatrix{ glm::mat4{ 1.0 } }; Transform inverseBindTransform; }; From 911fd27fc53a7c171887a6f3268930eb8fa35d2d Mon Sep 17 00:00:00 2001 From: Brad Davis Date: Tue, 24 Sep 2019 15:34:51 -0700 Subject: [PATCH 021/121] wip --- libraries/fbx/src/GLTFSerializer.cpp | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 774c63e8dd..3d78202092 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1472,8 +1472,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& // Build weights (adapted from FBXSerializer.cpp) if (hfmModel.hasSkeletonJoints) { - int prevMeshClusterIndexCount = mesh.clusterIndices.count(); - int prevMeshClusterWeightCount = mesh.clusterWeights.count(); const int WEIGHTS_PER_VERTEX = 4; const float ALMOST_HALF = 0.499f; int numVertices = mesh.vertices.size() - prevMeshVerticesCount; @@ -1485,6 +1483,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } #if 0 + int prevMeshClusterIndexCount = mesh.clusterIndices.count(); for (int c = 0; c < clusterJoints.size(); ++c) { mesh.clusterIndices[prevMeshClusterIndexCount + c] = originalToNewNodeIndexMap[_file.skins[node.skin].joints[clusterJoints[c]]]; @@ -1492,6 +1491,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& #endif // normalize and compress to 16-bits + int prevMeshClusterWeightCount = mesh.clusterWeights.count(); for (int i = 0; i < numVertices; ++i) { int j = i * WEIGHTS_PER_VERTEX; @@ -1507,20 +1507,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } else { mesh.clusterWeights[prevMeshClusterWeightCount + j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); } - for (int clusterIndex = 0; clusterIndex < mesh.clusters.size() - 1; ++clusterIndex) { - ShapeVertices& points = hfmModel.shapeVertices.at(clusterIndex); - glm::vec3 globalMeshScale = extractScale(globalTransforms[nodeIndex]); - const glm::mat4 meshToJoint = glm::scale(glm::mat4(), globalMeshScale) * jointInverseBindTransforms[clusterIndex]; - - const float EXPANSION_WEIGHT_THRESHOLD = 0.25f; - if (mesh.clusterWeights[j] >= EXPANSION_WEIGHT_THRESHOLD) { - // TODO: fix transformed vertices being pushed back - auto& vertex = mesh.vertices[i]; - const glm::mat4 vertexTransform = meshToJoint * (glm::translate(glm::mat4(), vertex)); - glm::vec3 transformedVertex = hfmModel.joints[clusterIndex].translation * (extractTranslation(vertexTransform)); - points.push_back(transformedVertex); - } - } } } From af460d7f5fbbe9a2a6aaea3dba504045a809707a Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 25 Sep 2019 15:22:29 -0700 Subject: [PATCH 022/121] Add hfm::Shape::transformedExtents. Mark select hfm fields as deprecated. --- libraries/hfm/src/hfm/HFM.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 51c5f929d8..d4d6dd33d0 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -158,7 +158,7 @@ public: QVector quadTrianglesIndices; // original indices from the FBX mesh of the quad converted as triangles QVector triangleIndices; // original indices from the FBX mesh - QString materialID; + QString materialID; // DEPRECATED }; class Material { @@ -238,14 +238,14 @@ public: QVector colors; QVector texCoords; QVector texCoords1; - QVector clusterIndices; - QVector clusterWeights; + QVector clusterIndices; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::deformers, hfm::Deformer) + QVector clusterWeights; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::deformers, hfm::Deformer) QVector originalIndices; - QVector clusters; + QVector clusters; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::clusters) - Extents meshExtents; - glm::mat4 modelTransform; + Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents) + glm::mat4 modelTransform; // DEPRECATED (see hfm::Shape::transform, hfm::TransformNode, hfm::Model::transforms) QVector blendshapes; @@ -317,6 +317,8 @@ public: uint32_t meshPart { UNDEFINED_KEY }; uint32_t material { UNDEFINED_KEY }; uint32_t transform { UNDEFINED_KEY }; // The static transform node when not taking into account rigging/skinning + // TODO: Have all serializers calculate hfm::Shape::transformedExtents in world space where they previously calculated hfm::Mesh::meshExtents. Change all code that uses hfm::Mesh::meshExtents to use this instead. + Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after the transform node and parent transform nodes are applied, while not taking into account rigging/skinning uint32_t dynamicTransform { UNDEFINED_KEY }; }; From 2077da6f371bdba5cdb7fb13c8f89dc67085dde4 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 26 Sep 2019 17:02:42 -0700 Subject: [PATCH 023/121] Create ReweightDeformersTask --- .../model-baker/src/model-baker/Baker.cpp | 8 +- .../model-baker/src/model-baker/BakerTypes.h | 8 ++ .../src/model-baker/BuildGraphicsMeshTask.cpp | 110 +++------------- .../src/model-baker/BuildGraphicsMeshTask.h | 6 +- .../src/model-baker/ReweightDeformersTask.cpp | 119 ++++++++++++++++++ .../src/model-baker/ReweightDeformersTask.h | 29 +++++ 6 files changed, 180 insertions(+), 100 deletions(-) create mode 100644 libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp create mode 100644 libraries/model-baker/src/model-baker/ReweightDeformersTask.h diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index 1a68d3508d..a567537105 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -13,6 +13,7 @@ #include "BakerTypes.h" #include "ModelMath.h" +#include "ReweightDeformersTask.h" #include "BuildGraphicsMeshTask.h" #include "CalculateMeshNormalsTask.h" #include "CalculateMeshTangentsTask.h" @@ -151,8 +152,13 @@ namespace baker { const auto calculateBlendshapeTangentsInputs = CalculateBlendshapeTangentsTask::Input(normalsPerBlendshapePerMesh, blendshapesPerMeshIn, meshesIn).asVarying(); const auto tangentsPerBlendshapePerMesh = model.addJob("CalculateBlendshapeTangents", calculateBlendshapeTangentsInputs); + // Skinning weight calculations + // NOTE: Due to limitations in the current graphics::MeshPointer representation, the output list of ReweightedDeformers is per-mesh. An element is empty if there are no deformers for the mesh of the same index. + const auto reweightDeformersInputs = ReweightDeformersTask::Input(meshesIn, shapesIn, dynamicTransformsIn, deformersIn).asVarying(); + const auto reweightedDeformers = model.addJob("ReweightDeformers", reweightDeformersInputs); + // Build the graphics::MeshPointer for each hfm::Mesh - const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, dynamicTransformsIn, deformersIn).asVarying(); + const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, dynamicTransformsIn, reweightedDeformers).asVarying(); const auto graphicsMeshes = model.addJob("BuildGraphicsMesh", buildGraphicsMeshInputs); // Prepare joint information diff --git a/libraries/model-baker/src/model-baker/BakerTypes.h b/libraries/model-baker/src/model-baker/BakerTypes.h index 3d16afab2e..8760fa6db4 100644 --- a/libraries/model-baker/src/model-baker/BakerTypes.h +++ b/libraries/model-baker/src/model-baker/BakerTypes.h @@ -36,6 +36,14 @@ namespace baker { using TangentsPerBlendshape = std::vector>; using MeshIndicesToModelNames = QHash; + + class ReweightedDeformers { + public: + std::vector indices; + std::vector weights; + uint16_t weightsPerVertex { 0 }; + bool trimmedToMatch { false }; + }; }; #endif // hifi_BakerTypes_h diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index ea05b81d1f..deacd6a977 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -2,8 +2,8 @@ // BuildGraphicsMeshTask.h // model-baker/src/model-baker // -// Created by Sabrina Shanman on 2018/12/06. -// Copyright 2018 High Fidelity, Inc. +// Created by Sabrina Shanman on 2019/09/16. +// Copyright 2019 High Fidelity, Inc. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -27,83 +27,7 @@ glm::vec3 normalizeDirForPacking(const glm::vec3& dir) { return dir; } -class ReweightedDeformers { -public: - std::vector indices; - std::vector weights; - bool trimmedToMatch { false }; -}; - -ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const hfm::DynamicTransform* dynamicTransform, const std::vector deformers, const uint16_t weightsPerVertex) { - size_t numClusterIndices = numMeshVertices * weightsPerVertex; - ReweightedDeformers reweightedDeformers; - // TODO: Consider having a rootCluster property in the DynamicTransform rather than appending the root to the end of the cluster list. - reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(deformers.size() - 1)); - reweightedDeformers.weights.resize(numClusterIndices, 0); - - std::vector weightAccumulators; - weightAccumulators.resize(numClusterIndices, 0.0f); - for (uint16_t i = 0; i < (uint16_t)deformers.size(); ++i) { - const hfm::Deformer& deformer = *deformers[i]; - - if (deformer.indices.size() != deformer.weights.size()) { - reweightedDeformers.trimmedToMatch = true; - } - size_t numIndicesOrWeights = std::min(deformer.indices.size(), deformer.weights.size()); - for (size_t j = 0; j < numIndicesOrWeights; ++j) { - uint32_t index = deformer.indices[j]; - float weight = deformer.weights[j]; - - // look for an unused slot in the weights vector - uint32_t weightIndex = index * weightsPerVertex; - uint32_t lowestIndex = -1; - float lowestWeight = FLT_MAX; - uint16_t k = 0; - for (; k < weightsPerVertex; k++) { - if (weightAccumulators[weightIndex + k] == 0.0f) { - reweightedDeformers.indices[weightIndex + k] = i; - weightAccumulators[weightIndex + k] = weight; - break; - } - if (weightAccumulators[weightIndex + k] < lowestWeight) { - lowestIndex = k; - lowestWeight = weightAccumulators[weightIndex + k]; - } - } - if (k == weightsPerVertex && weight > lowestWeight) { - // no space for an additional weight; we must replace the lowest - weightAccumulators[weightIndex + lowestIndex] = weight; - reweightedDeformers.indices[weightIndex + lowestIndex] = i; - } - } - } - - // now that we've accumulated the most relevant weights for each vertex - // normalize and compress to 16-bits - for (size_t i = 0; i < numMeshVertices; ++i) { - size_t j = i * weightsPerVertex; - - // normalize weights into uint16_t - float totalWeight = 0.0f; - for (size_t k = j; k < j + weightsPerVertex; ++k) { - totalWeight += weightAccumulators[k]; - } - - const float ALMOST_HALF = 0.499f; - if (totalWeight > 0.0f) { - float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; - for (size_t k = j; k < j + weightsPerVertex; ++k) { - reweightedDeformers.weights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF); - } - } else { - reweightedDeformers.weights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); - } - } - - return reweightedDeformers; -} - -void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn, const hfm::DynamicTransform* dynamicTransform, const std::vector meshDeformers) { +void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn, uint16_t numDeformerControllers, const baker::ReweightedDeformers reweightedDeformers) { auto graphicsMesh = std::make_shared(); // Fill tangents with a dummy value to force tangents to be present if there are normals @@ -162,19 +86,16 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics // Support for 4 skinning clusters: // 4 Indices are uint8 ideally, uint16 if more than 256. - const auto clusterIndiceElement = ((meshDeformers.size() < (size_t)UINT8_MAX) ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW)); + const auto clusterIndiceElement = ((numDeformerControllers < (uint16_t)UINT8_MAX) ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW)); // 4 Weights are normalized 16bits const auto clusterWeightElement = gpu::Element(gpu::VEC4, gpu::NUINT16, gpu::XYZW); - // Calculate a more condensed view of all the deformer weights - const uint16_t NUM_CLUSTERS_PER_VERT = 4; - ReweightedDeformers reweightedDeformers = getReweightedDeformers(hfmMesh.vertices.size(), dynamicTransform, meshDeformers, NUM_CLUSTERS_PER_VERT); // Cluster indices and weights must be the same sizes if (reweightedDeformers.trimmedToMatch) { HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- The number of indices and weights for a deformer had different sizes and have been trimmed to match"); } // Record cluster sizes - const size_t numVertClusters = reweightedDeformers.indices.size() / NUM_CLUSTERS_PER_VERT; + const size_t numVertClusters = reweightedDeformers.indices.size() / reweightedDeformers.weightsPerVertex; const size_t clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize(); const size_t clusterWeightsSize = numVertClusters * clusterWeightElement.getSize(); @@ -263,7 +184,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics // Clusters data if (clusterIndicesSize > 0) { - if (meshDeformers.size() < UINT8_MAX) { + if (numDeformerControllers < (uint16_t)UINT8_MAX) { // yay! we can fit the clusterIndices within 8-bits int32_t numIndices = (int32_t)reweightedDeformers.indices.size(); std::vector packedDeformerIndices; @@ -461,7 +382,7 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const const auto& tangentsPerMesh = input.get4(); const auto& shapes = input.get5(); const auto& dynamicTransforms = input.get6(); - const auto& deformers = input.get7(); + const auto& reweightedDeformersPerMesh = input.get7(); // Currently, there is only (at most) one dynamicTransform per mesh // An undefined shape.dynamicTransform has the value hfm::UNDEFINED_KEY @@ -478,20 +399,17 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const for (int i = 0; i < n; i++) { graphicsMeshes.emplace_back(); auto& graphicsMesh = graphicsMeshes[i]; + const auto& reweightedDeformers = reweightedDeformersPerMesh[i]; - auto dynamicTransformIndex = dynamicTransformPerMesh[i]; - const hfm::DynamicTransform* dynamicTransform = nullptr; - std::vector meshDeformers; - if (dynamicTransformIndex != hfm::UNDEFINED_KEY) { - dynamicTransform = &dynamicTransforms[dynamicTransformIndex]; - for (const auto& deformerIndex : dynamicTransform->deformers) { - const auto& deformer = deformers[deformerIndex]; - meshDeformers.push_back(&deformer); - } + uint16_t numDeformerControllers = 0; + if (reweightedDeformers.weightsPerVertex != 0) { + uint32_t dynamicTransformIndex = dynamicTransformPerMesh[i]; + const hfm::DynamicTransform& dynamicTransform = dynamicTransforms[dynamicTransformIndex]; + numDeformerControllers = (uint16_t)dynamicTransform.deformers.size(); } // Try to create the graphics::Mesh - buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i), dynamicTransform, meshDeformers); + buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i), numDeformerControllers, reweightedDeformers); // Choose a name for the mesh if (graphicsMesh) { diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h index be1e4350be..1bb9b9be0c 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h @@ -2,8 +2,8 @@ // BuildGraphicsMeshTask.h // model-baker/src/model-baker // -// Created by Sabrina Shanman on 2018/12/06. -// Copyright 2018 High Fidelity, Inc. +// Created by Sabrina Shanman on 2019/09/16. +// Copyright 2019 High Fidelity, Inc. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -20,7 +20,7 @@ class BuildGraphicsMeshTask { public: - using Input = baker::VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector, std::vector, std::vector>; + using Input = baker::VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector, std::vector, std::vector>; using Output = std::vector; using JobModel = baker::Job::ModelIO; diff --git a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp new file mode 100644 index 0000000000..2dd5030c78 --- /dev/null +++ b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp @@ -0,0 +1,119 @@ +// +// ReweightDeformersTask.h +// model-baker/src/model-baker +// +// Created by Sabrina Shanman on 2019/09/26. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "ReweightDeformersTask.h" + +baker::ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const hfm::DynamicTransform* dynamicTransform, const std::vector deformers, const uint16_t weightsPerVertex) { + size_t numClusterIndices = numMeshVertices * weightsPerVertex; + baker::ReweightedDeformers reweightedDeformers; + reweightedDeformers.weightsPerVertex = weightsPerVertex; + // TODO: Consider having a rootCluster property in the DynamicTransform rather than appending the root to the end of the cluster list. + reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(deformers.size() - 1)); + reweightedDeformers.weights.resize(numClusterIndices, 0); + + std::vector weightAccumulators; + weightAccumulators.resize(numClusterIndices, 0.0f); + for (uint16_t i = 0; i < (uint16_t)deformers.size(); ++i) { + const hfm::Deformer& deformer = *deformers[i]; + + if (deformer.indices.size() != deformer.weights.size()) { + reweightedDeformers.trimmedToMatch = true; + } + size_t numIndicesOrWeights = std::min(deformer.indices.size(), deformer.weights.size()); + for (size_t j = 0; j < numIndicesOrWeights; ++j) { + uint32_t index = deformer.indices[j]; + float weight = deformer.weights[j]; + + // look for an unused slot in the weights vector + uint32_t weightIndex = index * weightsPerVertex; + uint32_t lowestIndex = -1; + float lowestWeight = FLT_MAX; + uint16_t k = 0; + for (; k < weightsPerVertex; k++) { + if (weightAccumulators[weightIndex + k] == 0.0f) { + reweightedDeformers.indices[weightIndex + k] = i; + weightAccumulators[weightIndex + k] = weight; + break; + } + if (weightAccumulators[weightIndex + k] < lowestWeight) { + lowestIndex = k; + lowestWeight = weightAccumulators[weightIndex + k]; + } + } + if (k == weightsPerVertex && weight > lowestWeight) { + // no space for an additional weight; we must replace the lowest + weightAccumulators[weightIndex + lowestIndex] = weight; + reweightedDeformers.indices[weightIndex + lowestIndex] = i; + } + } + } + + // now that we've accumulated the most relevant weights for each vertex + // normalize and compress to 16-bits + for (size_t i = 0; i < numMeshVertices; ++i) { + size_t j = i * weightsPerVertex; + + // normalize weights into uint16_t + float totalWeight = 0.0f; + for (size_t k = j; k < j + weightsPerVertex; ++k) { + totalWeight += weightAccumulators[k]; + } + + const float ALMOST_HALF = 0.499f; + if (totalWeight > 0.0f) { + float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; + for (size_t k = j; k < j + weightsPerVertex; ++k) { + reweightedDeformers.weights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF); + } + } else { + reweightedDeformers.weights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); + } + } + + return reweightedDeformers; +} + +void ReweightDeformersTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) { + const uint16_t NUM_WEIGHTS_PER_VERTEX { 4 }; + + const auto& meshes = input.get0(); + const auto& shapes = input.get1(); + const auto& dynamicTransforms = input.get2(); + const auto& deformers = input.get3(); + auto& reweightedDeformers = output; + + // Currently, there is only (at most) one dynamicTransform per mesh + // An undefined shape.dynamicTransform has the value hfm::UNDEFINED_KEY + std::vector dynamicTransformPerMesh; + dynamicTransformPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY); + for (const auto& shape : shapes) { + uint32_t dynamicTransformIndex = shape.dynamicTransform; + dynamicTransformPerMesh[shape.mesh] = dynamicTransformIndex; + } + + reweightedDeformers.reserve(meshes.size()); + for (size_t i = 0; i < meshes.size(); ++i) { + const auto& mesh = meshes[i]; + uint32_t dynamicTransformIndex = dynamicTransformPerMesh[i]; + + const hfm::DynamicTransform* dynamicTransform = nullptr; + std::vector meshDeformers; + if (dynamicTransformIndex != hfm::UNDEFINED_KEY) { + dynamicTransform = &dynamicTransforms[dynamicTransformIndex]; + for (const auto& deformerIndex : dynamicTransform->deformers) { + const auto& deformer = deformers[deformerIndex]; + meshDeformers.push_back(&deformer); + } + } + + reweightedDeformers.push_back(getReweightedDeformers((size_t)mesh.vertices.size(), dynamicTransform, meshDeformers, NUM_WEIGHTS_PER_VERTEX)); + } +} diff --git a/libraries/model-baker/src/model-baker/ReweightDeformersTask.h b/libraries/model-baker/src/model-baker/ReweightDeformersTask.h new file mode 100644 index 0000000000..98befa8000 --- /dev/null +++ b/libraries/model-baker/src/model-baker/ReweightDeformersTask.h @@ -0,0 +1,29 @@ +// +// ReweightDeformersTask.h +// model-baker/src/model-baker +// +// Created by Sabrina Shanman on 2019/09/26. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_ReweightDeformersTask_h +#define hifi_ReweightDeformersTask_h + +#include + +#include "Engine.h" +#include "BakerTypes.h" + +class ReweightDeformersTask { +public: + using Input = baker::VaryingSet4, std::vector, std::vector, std::vector>; + using Output = std::vector; + using JobModel = baker::Job::ModelIO; + + void run(const baker::BakeContextPointer& context, const Input& input, Output& output); +}; + +#endif // hifi_ReweightDeformersTask_h From 9518aa9ed03433065ee2d5aa7087baabcfdf698f Mon Sep 17 00:00:00 2001 From: Brad Davis Date: Fri, 27 Sep 2019 08:33:55 -0700 Subject: [PATCH 024/121] . --- libraries/fbx/src/GLTFSerializer.cpp | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 3d78202092..7fdbcce141 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1482,14 +1482,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mesh.clusterWeights.push_back(0); } -#if 0 - int prevMeshClusterIndexCount = mesh.clusterIndices.count(); - for (int c = 0; c < clusterJoints.size(); ++c) { - mesh.clusterIndices[prevMeshClusterIndexCount + c] = - originalToNewNodeIndexMap[_file.skins[node.skin].joints[clusterJoints[c]]]; - } -#endif - // normalize and compress to 16-bits int prevMeshClusterWeightCount = mesh.clusterWeights.count(); for (int i = 0; i < numVertices; ++i) { @@ -1510,12 +1502,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } } -#if 0 - if (primitive.defined["material"]) { - part.materialID = materialIDs[primitive.material]; - } -#endif - mesh.parts.push_back(part); // populate the texture coordinates if they don't exist From cd94dc15f9fd4d05368704246c70b118f7f623aa Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 27 Sep 2019 16:13:55 -0700 Subject: [PATCH 025/121] Add CollectShapeVerticesTask --- .../model-baker/src/model-baker/Baker.cpp | 11 ++- .../model-baker/CollectShapeVerticesTask.cpp | 92 +++++++++++++++++++ .../model-baker/CollectShapeVerticesTask.h | 30 ++++++ 3 files changed, 131 insertions(+), 2 deletions(-) create mode 100644 libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp create mode 100644 libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index a567537105..ccb5e1816f 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -14,6 +14,7 @@ #include "BakerTypes.h" #include "ModelMath.h" #include "ReweightDeformersTask.h" +#include "CollectShapeVerticesTask.h" #include "BuildGraphicsMeshTask.h" #include "CalculateMeshNormalsTask.h" #include "CalculateMeshTangentsTask.h" @@ -105,7 +106,7 @@ namespace baker { class BuildModelTask { public: - using Input = VaryingSet6, std::vector, QMap, QHash, FlowData>; + using Input = VaryingSet7, std::vector, QMap, QHash, FlowData, std::vector>; using Output = hfm::Model::Pointer; using JobModel = Job::ModelIO; @@ -116,6 +117,9 @@ namespace baker { hfmModelOut->jointRotationOffsets = input.get3(); hfmModelOut->jointIndices = input.get4(); hfmModelOut->flowData = input.get5(); + hfmModelOut->shapeVertices = input.get6(); + // These depend on the ShapeVertices + // TODO: Create a task for this rather than calculating it here hfmModelOut->computeKdops(); output = hfmModelOut; } @@ -156,6 +160,9 @@ namespace baker { // NOTE: Due to limitations in the current graphics::MeshPointer representation, the output list of ReweightedDeformers is per-mesh. An element is empty if there are no deformers for the mesh of the same index. const auto reweightDeformersInputs = ReweightDeformersTask::Input(meshesIn, shapesIn, dynamicTransformsIn, deformersIn).asVarying(); const auto reweightedDeformers = model.addJob("ReweightDeformers", reweightDeformersInputs); + // Shape vertices are included/rejected based on skinning weight, and thus must use the reweighted deformers. + const auto collectShapeVerticesInputs = CollectShapeVerticesTask::Input(meshesIn, shapesIn, jointsIn, dynamicTransformsIn, reweightedDeformers).asVarying(); + const auto shapeVerticesPerJoint = model.addJob("CollectShapeVertices", collectShapeVerticesInputs); // Build the graphics::MeshPointer for each hfm::Mesh const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, dynamicTransformsIn, reweightedDeformers).asVarying(); @@ -191,7 +198,7 @@ namespace baker { const auto blendshapesPerMeshOut = model.addJob("BuildBlendshapes", buildBlendshapesInputs); const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, graphicsMeshes, normalsPerMesh, tangentsPerMesh, blendshapesPerMeshOut).asVarying(); const auto meshesOut = model.addJob("BuildMeshes", buildMeshesInputs); - const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData).asVarying(); + const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData, shapeVerticesPerJoint).asVarying(); const auto hfmModelOut = model.addJob("BuildModel", buildModelInputs); output = Output(hfmModelOut, materialMapping, dracoMeshes, dracoErrors, materialList); diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp new file mode 100644 index 0000000000..755b61b7df --- /dev/null +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp @@ -0,0 +1,92 @@ +// +// CollectShapeVerticesTask.h +// model-baker/src/model-baker +// +// Created by Sabrina Shanman on 2019/09/27. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "CollectShapeVerticesTask.h" + +#include + +// Used to track and avoid duplicate shape vertices, as multiple shapes can have the same mesh and dynamicTransform +class VertexSource { +public: + uint32_t mesh; + uint32_t dynamicTransform; + + bool operator==(const VertexSource& other) const { + return mesh == other.mesh && + dynamicTransform == other.dynamicTransform; + } +}; + +void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) { + const auto& meshes = input.get0(); + const auto& shapes = input.get1(); + const auto& joints = input.get2(); + const auto& dynamicTransforms = input.get3(); + const auto& reweightedDeformers = input.get4(); + auto& shapeVerticesPerJoint = output; + + shapeVerticesPerJoint.reserve(joints.size()); + std::vector> vertexSourcesPerJoint; + vertexSourcesPerJoint.resize(joints.size()); + for (size_t i = 0; i < shapes.size(); ++i) { + const auto& shape = shapes[i]; + const uint32_t dynamicTransformKey = shape.dynamicTransform; + if (dynamicTransformKey == hfm::UNDEFINED_KEY) { + continue; + } + + VertexSource vertexSource; + vertexSource.mesh = shape.mesh; + vertexSource.dynamicTransform = dynamicTransformKey; + + const auto& dynamicTransform = dynamicTransforms[dynamicTransformKey]; + for (size_t j = 0; j < dynamicTransform.clusters.size(); ++j) { + const auto& cluster = dynamicTransform.clusters[j]; + const uint32_t jointIndex = cluster.jointIndex; + + auto& vertexSources = vertexSourcesPerJoint[jointIndex]; + if (std::find(vertexSources.cbegin(), vertexSources.cend(), vertexSource) == vertexSources.cend()) { + vertexSources.push_back(vertexSource); + auto& shapeVertices = shapeVerticesPerJoint[jointIndex]; + + const uint16_t deformerIndex = dynamicTransform.deformers[j]; + const auto& mesh = meshes[shape.mesh]; + const auto& vertices = mesh.vertices; + const auto& reweightedDeformer = reweightedDeformers[shape.mesh]; + const glm::mat4 meshToJoint = cluster.inverseBindMatrix; + + const uint16_t weightsPerVertex = reweightedDeformer.weightsPerVertex; + if (weightsPerVertex == 0) { + for (int vertexIndex = 0; vertexIndex < (int)vertices.size(); ++vertexIndex) { + const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertices[vertexIndex]); + shapeVertices.push_back(extractTranslation(vertexTransform)); + } + } else { + for (int vertexIndex = 0; vertexIndex < (int)vertices.size(); ++vertexIndex) { + for (uint16_t weightIndex = 0; weightIndex < weightsPerVertex; ++weightIndex) { + const size_t index = vertexIndex*4 + weightIndex; + const uint16_t clusterIndex = reweightedDeformer.indices[index]; + const uint16_t clusterWeight = reweightedDeformer.weights[index]; + // Remember vertices associated with this joint with at least 1/4 weight + const uint16_t EXPANSION_WEIGHT_THRESHOLD = std::numeric_limits::max() / 4; + if (clusterIndex != j || clusterWeight < EXPANSION_WEIGHT_THRESHOLD) { + continue; + } + + const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertices[vertexIndex]); + shapeVertices.push_back(extractTranslation(vertexTransform)); + } + } + } + } + } + } +} diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h new file mode 100644 index 0000000000..3111dcadc1 --- /dev/null +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h @@ -0,0 +1,30 @@ +// +// CollectShapeVerticesTask.h +// model-baker/src/model-baker +// +// Created by Sabrina Shanman on 2019/09/27. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_CollectShapeVerticesTask_h +#define hifi_CollectShapeVerticesTask_h + +#include + +#include "Engine.h" +#include "BakerTypes.h" + +class CollectShapeVerticesTask { +public: + using Input = baker::VaryingSet5, std::vector, std::vector, std::vector, std::vector>; + using Output = std::vector; + using JobModel = baker::Job::ModelIO; + + void run(const baker::BakeContextPointer& context, const Input& input, Output& output); +}; + +#endif // hifi_CollectShapeVerticesTask_h + From b4c50b3f48fde359affe672ae9890ae86a8d21c1 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 27 Sep 2019 17:46:12 -0700 Subject: [PATCH 026/121] Remove unused parameter from getReweightedDeformers --- .../model-baker/src/model-baker/ReweightDeformersTask.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp index 2dd5030c78..98f9d419ba 100644 --- a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp +++ b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp @@ -11,7 +11,7 @@ #include "ReweightDeformersTask.h" -baker::ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const hfm::DynamicTransform* dynamicTransform, const std::vector deformers, const uint16_t weightsPerVertex) { +baker::ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const std::vector deformers, const uint16_t weightsPerVertex) { size_t numClusterIndices = numMeshVertices * weightsPerVertex; baker::ReweightedDeformers reweightedDeformers; reweightedDeformers.weightsPerVertex = weightsPerVertex; @@ -114,6 +114,6 @@ void ReweightDeformersTask::run(const baker::BakeContextPointer& context, const } } - reweightedDeformers.push_back(getReweightedDeformers((size_t)mesh.vertices.size(), dynamicTransform, meshDeformers, NUM_WEIGHTS_PER_VERTEX)); + reweightedDeformers.push_back(getReweightedDeformers((size_t)mesh.vertices.size(), meshDeformers, NUM_WEIGHTS_PER_VERTEX)); } } From f773bdeca264d629cbacb3fd83d085f11c55b8cb Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 30 Sep 2019 09:07:42 -0700 Subject: [PATCH 027/121] Remove unused variable --- .../model-baker/src/model-baker/CollectShapeVerticesTask.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp index 755b61b7df..8aeb0145d5 100644 --- a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp @@ -57,7 +57,6 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con vertexSources.push_back(vertexSource); auto& shapeVertices = shapeVerticesPerJoint[jointIndex]; - const uint16_t deformerIndex = dynamicTransform.deformers[j]; const auto& mesh = meshes[shape.mesh]; const auto& vertices = mesh.vertices; const auto& reweightedDeformer = reweightedDeformers[shape.mesh]; From 6eed3e43bcafc67484c7d0694d71f4c9199125ec Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 2 Oct 2019 12:47:04 -0700 Subject: [PATCH 028/121] Fix skinning deformer data being added to graphics::Mesh even if the mesh has no skinning --- .../model-baker/src/model-baker/ReweightDeformersTask.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp index 98f9d419ba..097833e110 100644 --- a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp +++ b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp @@ -12,8 +12,12 @@ #include "ReweightDeformersTask.h" baker::ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const std::vector deformers, const uint16_t weightsPerVertex) { - size_t numClusterIndices = numMeshVertices * weightsPerVertex; baker::ReweightedDeformers reweightedDeformers; + if (deformers.size() == 0) { + return reweightedDeformers; + } + + size_t numClusterIndices = numMeshVertices * weightsPerVertex; reweightedDeformers.weightsPerVertex = weightsPerVertex; // TODO: Consider having a rootCluster property in the DynamicTransform rather than appending the root to the end of the cluster list. reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(deformers.size() - 1)); From 001718224eece68d2f95493c0b29c7b92c6d0324 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Wed, 2 Oct 2019 14:39:00 -0700 Subject: [PATCH 029/121] fine tune the recent changes to be able to return to rendering of the hfm::Mesh loaded, not the shapes yet --- .../model-baker/src/model-baker/BuildGraphicsMeshTask.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index deacd6a977..8c27968de9 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -95,7 +95,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- The number of indices and weights for a deformer had different sizes and have been trimmed to match"); } // Record cluster sizes - const size_t numVertClusters = reweightedDeformers.indices.size() / reweightedDeformers.weightsPerVertex; + const size_t numVertClusters = (reweightedDeformers.weightsPerVertex ? hfmMesh.clusterIndices.size() / reweightedDeformers.weightsPerVertex : 0); const size_t clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize(); const size_t clusterWeightsSize = numVertClusters * clusterWeightElement.getSize(); @@ -404,8 +404,10 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const uint16_t numDeformerControllers = 0; if (reweightedDeformers.weightsPerVertex != 0) { uint32_t dynamicTransformIndex = dynamicTransformPerMesh[i]; - const hfm::DynamicTransform& dynamicTransform = dynamicTransforms[dynamicTransformIndex]; - numDeformerControllers = (uint16_t)dynamicTransform.deformers.size(); + if (dynamicTransformIndex != hfm::UNDEFINED_KEY) { + const hfm::DynamicTransform& dynamicTransform = dynamicTransforms[dynamicTransformIndex]; + numDeformerControllers = (uint16_t)dynamicTransform.deformers.size(); + } } // Try to create the graphics::Mesh From bac22c69c1ed645b3b1adfc494936c8e5c27c948 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 11 Sep 2019 17:17:30 -0700 Subject: [PATCH 030/121] Move ExtractedMesh out of HFM --- libraries/fbx/src/FBXSerializer.h | 9 ++++++++- libraries/hfm/src/hfm/HFM.h | 9 --------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.h b/libraries/fbx/src/FBXSerializer.h index 7d41f98444..c9468708a6 100644 --- a/libraries/fbx/src/FBXSerializer.h +++ b/libraries/fbx/src/FBXSerializer.h @@ -100,7 +100,14 @@ public: {} }; -class ExtractedMesh; +class ExtractedMesh { +public: + hfm::Mesh mesh; + QMultiHash newIndices; + QVector > blendshapeIndexMaps; + QVector > partMaterialTextures; + QHash texcoordSetMap; +}; class FBXSerializer : public HFMSerializer { public: diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index d4d6dd33d0..29c4af9ec9 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -381,15 +381,6 @@ public: }; -class ExtractedMesh { -public: - hfm::Mesh mesh; - QMultiHash newIndices; - QVector > blendshapeIndexMaps; - QVector > partMaterialTextures; - QHash texcoordSetMap; -}; - typedef hfm::Blendshape HFMBlendshape; typedef hfm::JointShapeInfo HFMJointShapeInfo; typedef hfm::Joint HFMJoint; From ff5fef9c3a18326d45529cc3c4b269b395cf190c Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 12 Sep 2019 10:58:52 -0700 Subject: [PATCH 031/121] Update FBXSerializer to reference shapes, support instancing (deformers WIP) --- libraries/fbx/src/FBXSerializer.cpp | 527 ++++++++++++----------- libraries/fbx/src/FBXSerializer.h | 1 + libraries/fbx/src/FBXSerializer_Mesh.cpp | 10 +- 3 files changed, 276 insertions(+), 262 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index e6b4a62b51..e8388451d4 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -145,40 +145,19 @@ public: bool isLimbNode; // is this FBXModel transform is a "LimbNode" i.e. a joint }; -glm::mat4 getGlobalTransform(const QMultiMap& _connectionParentMap, - const QHash& fbxModels, QString nodeID, bool mixamoHack, const QString& url) { - glm::mat4 globalTransform; - QVector visitedNodes; // Used to prevent following a cycle - while (!nodeID.isNull()) { - visitedNodes.append(nodeID); // Append each node we visit - - const FBXModel& fbxModel = fbxModels.value(nodeID); - globalTransform = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(fbxModel.preRotation * - fbxModel.rotation * fbxModel.postRotation) * fbxModel.postTransform * globalTransform; - if (fbxModel.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); - globalTransform = globalTransform * geometricOffset; - } - - if (mixamoHack) { - // there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform - return globalTransform; - } - QList parentIDs = _connectionParentMap.values(nodeID); - nodeID = QString(); - foreach (const QString& parentID, parentIDs) { - if (visitedNodes.contains(parentID)) { - qCWarning(modelformat) << "Ignoring loop detected in FBX connection map for" << url; - continue; - } - +std::vector getModelIDsForMeshID(const QString& meshID, const QHash& fbxModels, const QMultiMap& _connectionParentMap) { + std::vector modelsForMesh; + if (fbxModels.contains(meshID)) { + modelsForMesh.push_back(meshID); + } else { + // This mesh may have more than one parent model, with different material and transform, representing a different instance of the mesh + for (const auto& parentID : _connectionParentMap.values(meshID)) { if (fbxModels.contains(parentID)) { - nodeID = parentID; - break; + modelsForMesh.push_back(parentID); } } } - return globalTransform; + return modelsForMesh; } class ExtractedBlendshape { @@ -404,7 +383,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const QVector blendshapes; QHash fbxModels; - QHash clusters; + QHash fbxClusters; QHash animationCurves; QHash typeFlags; @@ -1058,9 +1037,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } - // skip empty clusters + // skip empty fbxClusters if (cluster.indices.size() > 0 && cluster.weights.size() > 0) { - clusters.insert(getID(object.properties), cluster); + fbxClusters.insert(getID(object.properties), cluster); } } else if (object.properties.last() == "BlendShapeChannel") { @@ -1233,13 +1212,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const QVector modelIDs; QSet remainingFBXModels; for (QHash::const_iterator fbxModel = fbxModels.constBegin(); fbxModel != fbxModels.constEnd(); fbxModel++) { - // models with clusters must be parented to the cluster top + // models with fbxClusters must be parented to the cluster top // Unless the model is a root node. bool isARootNode = !modelIDs.contains(_connectionParentMap.value(fbxModel.key())); if (!isARootNode) { foreach(const QString& deformerID, _connectionChildMap.values(fbxModel.key())) { foreach(const QString& clusterID, _connectionChildMap.values(deformerID)) { - if (!clusters.contains(clusterID)) { + if (!fbxClusters.contains(clusterID)) { continue; } QString topID = getTopModelID(_connectionParentMap, fbxModels, _connectionChildMap.value(clusterID), url); @@ -1283,8 +1262,15 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // convert the models to joints hfmModel.hasSkeletonJoints = false; + + // Note that these transform nodes are initially defined in world space + bool needMixamoHack = hfmModel.applicationName == "mixamo.com"; + hfmModel.transforms.reserve(modelIDs.size()); + std::vector globalTransforms; + globalTransforms.reserve(modelIDs.size()); - foreach (const QString& modelID, modelIDs) { + int jointIndex = 0; + for (const QString& modelID : modelIDs) { const FBXModel& fbxModel = fbxModels[modelID]; HFMJoint joint; joint.parentIndex = fbxModel.parentIndex; @@ -1358,6 +1344,42 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } hfmModel.joints.push_back(joint); + + // Now that we've initialized the joint, we can define the transform + // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate + glm::mat4 localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; + if (joint.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); + localTransform = localTransform * geometricOffset; + } + glm::mat4 globalTransform; + if (joint.parentIndex != -1 && joint.parentIndex < jointIndex) { + hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; + glm::mat4& parentGlobalTransform = globalTransforms[joint.parentIndex]; + if (needMixamoHack) { + // there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform + globalTransform = localTransform; + localTransform = globalTransform * glm::inverse(parentGlobalTransform); + } else { + if (parentJoint.hasGeometricOffset) { + // Per the FBX standard, geometric offsets should not propagate to children + glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); + globalTransform = localTransform * parentGlobalTransform * glm::inverse(parentGeometricOffset); + localTransform = globalTransform * glm::inverse(parentGlobalTransform); + } else { + globalTransform = localTransform * parentGlobalTransform; + } + } + } else { + globalTransform = localTransform; + } + hfm::TransformNode transformNode; + transformNode.parent = joint.parentIndex == -1 ? hfm::UNDEFINED_KEY : joint.parentIndex; + transformNode.transform = Transform(localTransform); + globalTransforms.push_back(globalTransform); + hfmModel.transforms.push_back(transformNode); + + ++jointIndex; } // NOTE: shapeVertices are in joint-frame @@ -1401,235 +1423,222 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } #endif + std::unordered_map materialNameToID; for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) { + materialNameToID[materialIt.key().toStdString()] = hfmModel.materials.size(); hfmModel.materials.push_back(materialIt.value()); } // see if any materials have texture children bool materialsHaveTextures = checkMaterialsHaveTextures(_hfmMaterials, _textureFilenames, _connectionChildMap); + // Note that the transforms in the TransformNodes are initially in world-space, and need to be converted to parent-space + std::vector transformNodes; + for (QMap::iterator it = meshes.begin(); it != meshes.end(); it++) { - ExtractedMesh& extracted = it.value(); + const QString& meshID = it.key(); + const ExtractedMesh& extracted = it.value(); + const auto& partMaterialTextures = extracted.partMaterialTextures; + const auto& newIndices = extracted.newIndices; - extracted.mesh.meshExtents.reset(); - - // accumulate local transforms - QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key()); - glm::mat4 modelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url); - - // compute the mesh extents from the transformed vertices - foreach (const glm::vec3& vertex, extracted.mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f)); - hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex); - hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex); - - extracted.mesh.meshExtents.minimum = glm::min(extracted.mesh.meshExtents.minimum, transformedVertex); - extracted.mesh.meshExtents.maximum = glm::max(extracted.mesh.meshExtents.maximum, transformedVertex); - extracted.mesh.modelTransform = modelTransform; - } - - // look for textures, material properties - // allocate the Part material library - // NOTE: extracted.partMaterialTextures is empty for FBX_DRACO_MESH_VERSION >= 2. In that case, the mesh part's materialID string is already defined. - int materialIndex = 0; - int textureIndex = 0; - QList children = _connectionChildMap.values(modelID); - for (int i = children.size() - 1; i >= 0; i--) { - - const QString& childID = children.at(i); - if (_hfmMaterials.contains(childID)) { - // the pure material associated with this part - HFMMaterial material = _hfmMaterials.value(childID); - - for (int j = 0; j < extracted.partMaterialTextures.size(); j++) { - if (extracted.partMaterialTextures.at(j).first == materialIndex) { - HFMMeshPart& part = extracted.mesh.parts[j]; - part.materialID = material.materialID; - } - } - - materialIndex++; - } else if (_textureFilenames.contains(childID)) { - // NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale") - // I'm leaving the second parameter blank right now as this code may never be used. - HFMTexture texture = getTexture(childID, ""); - for (int j = 0; j < extracted.partMaterialTextures.size(); j++) { - int partTexture = extracted.partMaterialTextures.at(j).second; - if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) { - // TODO: DO something here that replaces this legacy code - // Maybe create a material just for this part with the correct textures? - // extracted.mesh.parts[j].diffuseTexture = texture; - } - } - textureIndex++; - } - } - - // find the clusters with which the mesh is associated - QVector clusterIDs; - foreach (const QString& childID, _connectionChildMap.values(it.key())) { - foreach (const QString& clusterID, _connectionChildMap.values(childID)) { - if (!clusters.contains(clusterID)) { - continue; - } - HFMCluster hfmCluster; - const Cluster& cluster = clusters[clusterID]; - clusterIDs.append(clusterID); - - // see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion - // of skinning information in FBX - QString jointID = _connectionChildMap.value(clusterID); - hfmCluster.jointIndex = modelIDs.indexOf(jointID); - if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { - qCDebug(modelformat) << "Joint not in model list: " << jointID; - hfmCluster.jointIndex = 0; - } - - hfmCluster.inverseBindMatrix = glm::inverse(cluster.transformLink) * modelTransform; - - // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and - // sometimes floating point fuzz can be introduced after the inverse. - hfmCluster.inverseBindMatrix[0][3] = 0.0f; - hfmCluster.inverseBindMatrix[1][3] = 0.0f; - hfmCluster.inverseBindMatrix[2][3] = 0.0f; - hfmCluster.inverseBindMatrix[3][3] = 1.0f; - - hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix); - - extracted.mesh.clusters.append(hfmCluster); - - // override the bind rotation with the transform link - HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex]; - joint.inverseBindRotation = glm::inverse(extractRotation(cluster.transformLink)); - joint.bindTransform = cluster.transformLink; - joint.bindTransformFoundInCluster = true; - - // update the bind pose extents - glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform); - hfmModel.bindExtents.addPoint(bindTranslation); - } - } - - // the last cluster is the root cluster - { - HFMCluster cluster; - cluster.jointIndex = modelIDs.indexOf(modelID); - if (cluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { - qCDebug(modelformat) << "Model not in model list: " << modelID; - cluster.jointIndex = 0; - } - extracted.mesh.clusters.append(cluster); - } - - // whether we're skinned depends on how many clusters are attached - if (clusterIDs.size() > 1) { - // this is a multi-mesh joint - const int WEIGHTS_PER_VERTEX = 4; - int numClusterIndices = extracted.mesh.vertices.size() * WEIGHTS_PER_VERTEX; - extracted.mesh.clusterIndices.fill(extracted.mesh.clusters.size() - 1, numClusterIndices); - QVector weightAccumulators; - weightAccumulators.fill(0.0f, numClusterIndices); - - for (int i = 0; i < clusterIDs.size(); i++) { - QString clusterID = clusterIDs.at(i); - const Cluster& cluster = clusters[clusterID]; - const HFMCluster& hfmCluster = extracted.mesh.clusters.at(i); - int jointIndex = hfmCluster.jointIndex; - HFMJoint& joint = hfmModel.joints[jointIndex]; - - glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform; - ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex); - - for (int j = 0; j < cluster.indices.size(); j++) { - int oldIndex = cluster.indices.at(j); - float weight = cluster.weights.at(j); - for (QMultiHash::const_iterator it = extracted.newIndices.constFind(oldIndex); - it != extracted.newIndices.end() && it.key() == oldIndex; it++) { - int newIndex = it.value(); - - // remember vertices with at least 1/4 weight - // FIXME: vertices with no weightpainting won't get recorded here - const float EXPANSION_WEIGHT_THRESHOLD = 0.25f; - if (weight >= EXPANSION_WEIGHT_THRESHOLD) { - // transform to joint-frame and save for later - const glm::mat4 vertexTransform = meshToJoint * glm::translate(extracted.mesh.vertices.at(newIndex)); - points.push_back(extractTranslation(vertexTransform)); - } - - // look for an unused slot in the weights vector - int weightIndex = newIndex * WEIGHTS_PER_VERTEX; - int lowestIndex = -1; - float lowestWeight = FLT_MAX; - int k = 0; - for (; k < WEIGHTS_PER_VERTEX; k++) { - if (weightAccumulators[weightIndex + k] == 0.0f) { - extracted.mesh.clusterIndices[weightIndex + k] = i; - weightAccumulators[weightIndex + k] = weight; - break; - } - if (weightAccumulators[weightIndex + k] < lowestWeight) { - lowestIndex = k; - lowestWeight = weightAccumulators[weightIndex + k]; - } - } - if (k == WEIGHTS_PER_VERTEX && weight > lowestWeight) { - // no space for an additional weight; we must replace the lowest - weightAccumulators[weightIndex + lowestIndex] = weight; - extracted.mesh.clusterIndices[weightIndex + lowestIndex] = i; - } - } - } - } - - // now that we've accumulated the most relevant weights for each vertex - // normalize and compress to 16-bits - extracted.mesh.clusterWeights.fill(0, numClusterIndices); - int numVertices = extracted.mesh.vertices.size(); - for (int i = 0; i < numVertices; ++i) { - int j = i * WEIGHTS_PER_VERTEX; - - // normalize weights into uint16_t - float totalWeight = 0.0f; - for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { - totalWeight += weightAccumulators[k]; - } - - const float ALMOST_HALF = 0.499f; - if (totalWeight > 0.0f) { - float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; - for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { - extracted.mesh.clusterWeights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF); - } - } else { - extracted.mesh.clusterWeights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); - } - } - } else { - // this is a single-joint mesh - const HFMCluster& firstHFMCluster = extracted.mesh.clusters.at(0); - int jointIndex = firstHFMCluster.jointIndex; - HFMJoint& joint = hfmModel.joints[jointIndex]; - - // transform cluster vertices to joint-frame and save for later - glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform; - ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex); - for (const glm::vec3& vertex : extracted.mesh.vertices) { - const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertex); - points.push_back(extractTranslation(vertexTransform)); - } - - // Apply geometric offset, if present, by transforming the vertices directly - if (joint.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); - for (int i = 0; i < extracted.mesh.vertices.size(); i++) { - extracted.mesh.vertices[i] = transformPoint(geometricOffset, extracted.mesh.vertices[i]); - } - } - } - - hfmModel.meshes.push_back(extracted.mesh); - uint32_t meshIndex = (uint32_t)hfmModel.meshes.size() - 1; + uint32_t meshIndex = (uint32_t)hfmModel.meshes.size(); meshIDsToMeshIndices.insert(it.key(), meshIndex); + hfmModel.meshes.push_back(extracted.mesh); + hfm::Mesh& mesh = hfmModel.meshes.back(); + + std::vector instanceModelIDs = getModelIDsForMeshID(meshID, fbxModels, _connectionParentMap); + // meshShapes will be added to hfmModel at the very end + std::vector meshShapes; + meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size()); + for (const QString& modelID : instanceModelIDs) { + // The transform node has the same indexing order as the joints + const uint32_t transformNodeIndex = (uint32_t)modelIDs.indexOf(modelID); + + // accumulate local transforms + glm::mat4 modelTransform = globalTransforms[transformNodeIndex]; + // compute the mesh extents from the transformed vertices + for (const glm::vec3& vertex : mesh.vertices) { + glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f)); + hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex); + hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex); + } + + // partShapes will be added to meshShapes at the very end + std::vector partShapes { mesh.parts.size() }; + for (uint32_t i = 0; i < (uint32_t)partShapes.size(); ++i) { + hfm::Shape& shape = partShapes[i]; + shape.mesh = meshIndex; + shape.meshPart = i; + shape.transform = transformNodeIndex; + glm::mat4 shapeGlobalTransform = globalTransforms[transformNodeIndex]; + + shape.transformedExtents.reset(); + // compute the shape extents from the transformed vertices + for (const glm::vec3& vertex : mesh.vertices) { + glm::vec3 transformedVertex = glm::vec3(shapeGlobalTransform * glm::vec4(vertex, 1.0f)); + shape.transformedExtents.minimum = glm::min(shape.transformedExtents.minimum, transformedVertex); + shape.transformedExtents.maximum = glm::max(shape.transformedExtents.maximum, transformedVertex); + } + } + + // For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures + int materialIndex = 0; + int textureIndex = 0; + QList children = _connectionChildMap.values(modelID); + for (int i = children.size() - 1; i >= 0; i--) { + const QString& childID = children.at(i); + if (_hfmMaterials.contains(childID)) { + // the pure material associated with this part + const HFMMaterial& material = _hfmMaterials.value(childID); + for (int j = 0; j < partMaterialTextures.size(); j++) { + if (partMaterialTextures.at(j).first == materialIndex) { + hfm::Shape& shape = partShapes[j]; + shape.material = materialNameToID[material.materialID.toStdString()]; + } + } + materialIndex++; + } else if (_textureFilenames.contains(childID)) { + // NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale") + // I'm leaving the second parameter blank right now as this code may never be used. + HFMTexture texture = getTexture(childID, ""); + for (int j = 0; j < partMaterialTextures.size(); j++) { + int partTexture = partMaterialTextures.at(j).second; + if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) { + // TODO: DO something here that replaces this legacy code + // Maybe create a material just for this part with the correct textures? + // material.albedoTexture = texture; + // partShapes[j].material = materialIndex; + } + } + textureIndex++; + } + } + // For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart + if (!extracted.materialIDPerMeshPart.empty()) { + for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) { + hfm::Shape& shape = partShapes[i]; + const std::string& materialID = extracted.materialIDPerMeshPart[i]; + auto materialIt = materialNameToID.find(materialID); + if (materialIt != materialNameToID.end()) { + shape.material = materialIt->second; + } + } + } + + // find the clusters with which the mesh is associated + QVector clusterIDs; + for (const QString& childID : _connectionChildMap.values(meshID)) { + for (const QString& clusterID : _connectionChildMap.values(childID)) { + if (!fbxClusters.contains(clusterID)) { + continue; + } + clusterIDs.append(clusterID); + } + } + + auto rootJointIndex = modelIDs.indexOf(modelID); + if (rootJointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { + qCDebug(modelformat) << "Model not in model list: " << modelID; + rootJointIndex = 0; + } + + // whether we're skinned depends on how many clusters are attached + if (clusterIDs.size() > 1) { + hfm::DynamicTransform dynamicTransform; + auto& clusters = dynamicTransform.clusters; + std::vector deformers; + for (const auto& clusterID : clusterIDs) { + HFMCluster hfmCluster; + const Cluster& fbxCluster = fbxClusters[clusterID]; + + // see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion + // of skinning information in FBX + QString jointID = _connectionChildMap.value(clusterID); + hfmCluster.jointIndex = modelIDs.indexOf(jointID); + if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { + qCDebug(modelformat) << "Joint not in model list: " << jointID; + hfmCluster.jointIndex = 0; + } + + hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * modelTransform; + + // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and + // sometimes floating point fuzz can be introduced after the inverse. + hfmCluster.inverseBindMatrix[0][3] = 0.0f; + hfmCluster.inverseBindMatrix[1][3] = 0.0f; + hfmCluster.inverseBindMatrix[2][3] = 0.0f; + hfmCluster.inverseBindMatrix[3][3] = 1.0f; + + hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix); + + clusters.push_back(hfmCluster); + + // override the bind rotation with the transform link + HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex]; + joint.inverseBindRotation = glm::inverse(extractRotation(fbxCluster.transformLink)); + joint.bindTransform = fbxCluster.transformLink; + joint.bindTransformFoundInCluster = true; + + // update the bind pose extents + glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform); + hfmModel.bindExtents.addPoint(bindTranslation); + + // the last cluster is the root cluster + HFMCluster cluster; + cluster.jointIndex = rootJointIndex; + clusters.push_back(cluster); + } + + // Skinned mesh instances have a dynamic transform + dynamicTransform.deformers.reserve(clusterIDs.size()); + clusters.reserve(clusterIDs.size()); + for (const auto& clusterID : clusterIDs) { + const Cluster& fbxCluster = fbxClusters[clusterID]; + dynamicTransform.deformers.emplace_back(); + deformers.emplace_back(); + hfm::Deformer& deformer = deformers.back(); + size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size()); + deformer.indices.reserve(indexWeightPairs); + deformer.weights.reserve(indexWeightPairs); + for (size_t i = 0; i < indexWeightPairs; i++) { + int oldIndex = fbxCluster.indices[i]; + uint32_t newIndex = (uint32_t)extracted.newIndices.value(oldIndex); + deformer.indices.push_back(newIndex); + deformer.indices.push_back((float)fbxCluster.weights[i]); + } + } + + // Store this model's deformers, this dynamic transform's deformer IDs + uint32_t deformerMinID = (uint32_t)hfmModel.deformers.size(); + hfmModel.deformers.insert(hfmModel.deformers.end(), deformers.cbegin(), deformers.cend()); + dynamicTransform.deformers.resize(deformers.size()); + std::iota(dynamicTransform.deformers.begin(), dynamicTransform.deformers.end(), deformerMinID); + + // Store the model's dynamic transform, and put its ID in the shapes + hfmModel.dynamicTransforms.push_back(dynamicTransform); + uint32_t dynamicTransformID = (uint32_t)(hfmModel.dynamicTransforms.size() - 1); + for (hfm::Shape& shape : partShapes) { + shape.dynamicTransform = dynamicTransformID; + } + } else { + // this is a single-joint mesh + HFMJoint& joint = hfmModel.joints[rootJointIndex]; + + // Apply geometric offset, if present, by transforming the vertices directly + if (joint.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); + for (int i = 0; i < mesh.vertices.size(); i++) { + mesh.vertices[i] = transformPoint(geometricOffset, mesh.vertices[i]); + } + } + } + + // Store the parts for this mesh (or instance of this mesh, as the case may be) + meshShapes.insert(meshShapes.cend(), partShapes.cbegin(), partShapes.cend()); + } + + // Store the shapes for the mesh (or multiple instances of the mesh, as the case may be) + hfmModel.shapes.insert(hfmModel.shapes.cend(), meshShapes.cbegin(), meshShapes.cend()); } // attempt to map any meshes to a named model @@ -1651,9 +1660,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const if (applyUpAxisZRotation) { hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation)); hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation)); - for (auto &mesh : hfmModelPtr->meshes) { - mesh.modelTransform *= glm::mat4_cast(upAxisZRotation); - mesh.meshExtents.transform(glm::mat4_cast(upAxisZRotation)); + for (auto &shape : hfmModelPtr->shapes) { + auto transformIndex = shape.transform; + auto& transformNode = hfmModelPtr->transforms[transformIndex]; + transformNode.transform.postRotate(upAxisZRotation); + shape.transformedExtents.transform(glm::mat4_cast(upAxisZRotation)); } } return hfmModelPtr; diff --git a/libraries/fbx/src/FBXSerializer.h b/libraries/fbx/src/FBXSerializer.h index c9468708a6..2044d82710 100644 --- a/libraries/fbx/src/FBXSerializer.h +++ b/libraries/fbx/src/FBXSerializer.h @@ -103,6 +103,7 @@ public: class ExtractedMesh { public: hfm::Mesh mesh; + std::vector materialIDPerMeshPart; QMultiHash newIndices; QVector > blendshapeIndexMaps; QVector > partMaterialTextures; diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index 479e7acfc9..a89be38fe3 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -355,7 +355,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me // Check for additional metadata unsigned int dracoMeshNodeVersion = 1; - std::vector dracoMaterialList; + std::vector dracoMaterialList; for (const auto& dracoChild : child.children) { if (dracoChild.name == "FBXDracoMeshVersion") { if (!dracoChild.properties.isEmpty()) { @@ -364,7 +364,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } else if (dracoChild.name == "MaterialList") { dracoMaterialList.reserve(dracoChild.properties.size()); for (const auto& materialID : dracoChild.properties) { - dracoMaterialList.push_back(materialID.toString()); + dracoMaterialList.push_back(materialID.toString().toStdString()); } } } @@ -467,6 +467,8 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } } + ExtractedMesh& extracted = data.extracted; + extracted.materialIDPerMeshPart.resize(dracoMaterialList.size()); for (uint32_t i = 0; i < dracoMesh->num_faces(); ++i) { // grab the material ID and texture ID for this face, if we have it auto& dracoFace = dracoMesh->face(draco::FaceIndex(i)); @@ -487,13 +489,13 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); - HFMMeshPart& part = data.extracted.mesh.parts.back(); + HFMMeshPart& part = extracted.mesh.parts.back(); // Figure out what material this part is if (dracoMeshNodeVersion >= 2) { // Define the materialID now if (materialID < dracoMaterialList.size()) { - part.materialID = dracoMaterialList[materialID]; + extracted.materialIDPerMeshPart[materialID] = dracoMaterialList[materialID]; } } else { // Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap From ba6833df8fcf0468411ebcf25d4ecfd5fec8b51c Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 10:49:14 -0700 Subject: [PATCH 032/121] Make small improvements to FBXSerializer code changes --- libraries/fbx/src/FBXSerializer.cpp | 61 +++++++++++------------- libraries/fbx/src/FBXSerializer_Mesh.cpp | 19 ++++---- 2 files changed, 37 insertions(+), 43 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index e8388451d4..424c06b1c4 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1263,13 +1263,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // convert the models to joints hfmModel.hasSkeletonJoints = false; - // Note that these transform nodes are initially defined in world space bool needMixamoHack = hfmModel.applicationName == "mixamo.com"; hfmModel.transforms.reserve(modelIDs.size()); std::vector globalTransforms; globalTransforms.reserve(modelIDs.size()); - int jointIndex = 0; for (const QString& modelID : modelIDs) { const FBXModel& fbxModel = fbxModels[modelID]; HFMJoint joint; @@ -1378,13 +1376,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const transformNode.transform = Transform(localTransform); globalTransforms.push_back(globalTransform); hfmModel.transforms.push_back(transformNode); - - ++jointIndex; } - // NOTE: shapeVertices are in joint-frame - hfmModel.shapeVertices.resize(std::max((size_t)1, hfmModel.joints.size()) ); - hfmModel.bindExtents.reset(); hfmModel.meshExtents.reset(); @@ -1482,35 +1475,37 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } // For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures - int materialIndex = 0; - int textureIndex = 0; - QList children = _connectionChildMap.values(modelID); - for (int i = children.size() - 1; i >= 0; i--) { - const QString& childID = children.at(i); - if (_hfmMaterials.contains(childID)) { - // the pure material associated with this part - const HFMMaterial& material = _hfmMaterials.value(childID); - for (int j = 0; j < partMaterialTextures.size(); j++) { - if (partMaterialTextures.at(j).first == materialIndex) { - hfm::Shape& shape = partShapes[j]; - shape.material = materialNameToID[material.materialID.toStdString()]; + if (!partMaterialTextures.empty()) { + int materialIndex = 0; + int textureIndex = 0; + QList children = _connectionChildMap.values(modelID); + for (int i = children.size() - 1; i >= 0; i--) { + const QString& childID = children.at(i); + if (_hfmMaterials.contains(childID)) { + // the pure material associated with this part + const HFMMaterial& material = _hfmMaterials.value(childID); + for (int j = 0; j < partMaterialTextures.size(); j++) { + if (partMaterialTextures.at(j).first == materialIndex) { + hfm::Shape& shape = partShapes[j]; + shape.material = materialNameToID[material.materialID.toStdString()]; + } } - } - materialIndex++; - } else if (_textureFilenames.contains(childID)) { - // NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale") - // I'm leaving the second parameter blank right now as this code may never be used. - HFMTexture texture = getTexture(childID, ""); - for (int j = 0; j < partMaterialTextures.size(); j++) { - int partTexture = partMaterialTextures.at(j).second; - if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) { - // TODO: DO something here that replaces this legacy code - // Maybe create a material just for this part with the correct textures? - // material.albedoTexture = texture; - // partShapes[j].material = materialIndex; + materialIndex++; + } else if (_textureFilenames.contains(childID)) { + // NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale") + // I'm leaving the second parameter blank right now as this code may never be used. + HFMTexture texture = getTexture(childID, ""); + for (int j = 0; j < partMaterialTextures.size(); j++) { + int partTexture = partMaterialTextures.at(j).second; + if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) { + // TODO: DO something here that replaces this legacy code + // Maybe create a material just for this part with the correct textures? + // material.albedoTexture = texture; + // partShapes[j].material = materialIndex; + } } + textureIndex++; } - textureIndex++; } } // For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index a89be38fe3..f19cd7c526 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -369,6 +369,11 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } } + if (dracoMeshNodeVersion >= 2) { + // Define the materialIDs now + data.extracted.materialIDPerMeshPart = dracoMaterialList; + } + // load the draco mesh from the FBX and create a draco::Mesh draco::Decoder decoder; draco::DecoderBuffer decodedBuffer; @@ -467,8 +472,6 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } } - ExtractedMesh& extracted = data.extracted; - extracted.materialIDPerMeshPart.resize(dracoMaterialList.size()); for (uint32_t i = 0; i < dracoMesh->num_faces(); ++i) { // grab the material ID and texture ID for this face, if we have it auto& dracoFace = dracoMesh->face(draco::FaceIndex(i)); @@ -489,18 +492,14 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); - HFMMeshPart& part = extracted.mesh.parts.back(); + HFMMeshPart& part = data.extracted.mesh.parts.back(); - // Figure out what material this part is - if (dracoMeshNodeVersion >= 2) { - // Define the materialID now - if (materialID < dracoMaterialList.size()) { - extracted.materialIDPerMeshPart[materialID] = dracoMaterialList[materialID]; - } - } else { + // Figure out if this is the older way of defining the per-part material for baked FBX + if (dracoMeshNodeVersion < 2) { // Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap data.extracted.partMaterialTextures.append(materialTexture); } + // in dracoMeshNodeVersion >= 2, fbx meshes have their per-part materials already defined in data.extracted.materialIDPerMeshPart partIndexPlusOne = (int)data.extracted.mesh.parts.size(); } From 4a8cdee38ab0485fc399b2f5ccadc660b173fff8 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 14:30:03 -0700 Subject: [PATCH 033/121] Update HFM format and deprecate hfm::TransformNode --- libraries/hfm/src/hfm/HFM.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 29c4af9ec9..08410f17f2 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -118,6 +118,9 @@ public: glm::vec3 geometricTranslation; glm::quat geometricRotation; glm::vec3 geometricScaling; + + // globalTransform is the transform of the joint with all parent transforms applied, plus the geometric offset + glm::mat4 globalTransform; }; @@ -245,7 +248,7 @@ public: QVector clusters; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::clusters) Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents) - glm::mat4 modelTransform; // DEPRECATED (see hfm::Shape::transform, hfm::TransformNode, hfm::Model::transforms) + glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints) QVector blendshapes; @@ -289,6 +292,7 @@ public: bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; } }; +// DEPRECATED in favor of using hfm::Joint class TransformNode { public: uint32_t parent { 0 }; @@ -316,9 +320,9 @@ public: uint32_t mesh { UNDEFINED_KEY }; uint32_t meshPart { UNDEFINED_KEY }; uint32_t material { UNDEFINED_KEY }; - uint32_t transform { UNDEFINED_KEY }; // The static transform node when not taking into account rigging/skinning + uint32_t transform { UNDEFINED_KEY }; // The hfm::Joint associated with this shape, containing transform information // TODO: Have all serializers calculate hfm::Shape::transformedExtents in world space where they previously calculated hfm::Mesh::meshExtents. Change all code that uses hfm::Mesh::meshExtents to use this instead. - Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after the transform node and parent transform nodes are applied, while not taking into account rigging/skinning + Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after transform information is applied, while not taking into account rigging/skinning uint32_t dynamicTransform { UNDEFINED_KEY }; }; From e8d421fa3549330b0972f46d8c5f1d578eb9d4d2 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 14:30:20 -0700 Subject: [PATCH 034/121] Fix transforms and other issues with FBXSerializer --- libraries/fbx/src/FBXSerializer.cpp | 83 ++++++++++++----------------- 1 file changed, 33 insertions(+), 50 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 424c06b1c4..e7d6000c28 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1264,9 +1264,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfmModel.hasSkeletonJoints = false; bool needMixamoHack = hfmModel.applicationName == "mixamo.com"; - hfmModel.transforms.reserve(modelIDs.size()); - std::vector globalTransforms; - globalTransforms.reserve(modelIDs.size()); for (const QString& modelID : modelIDs) { const FBXModel& fbxModel = fbxModels[modelID]; @@ -1341,41 +1338,28 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } - hfmModel.joints.push_back(joint); // Now that we've initialized the joint, we can define the transform // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate - glm::mat4 localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; + joint.globalTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; + if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { + hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; + joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; + if (parentJoint.hasGeometricOffset) { + // Per the FBX standard, geometric offset should not propagate to children. + // However, we must be careful when modifying the behavior of FBXSerializer. + // So, we leave this here, as a breakpoint for debugging, or stub for implementation. + // qCDebug(modelformat) << "Geometric offset encountered on non-leaf node. jointIndex: " << jointIndex << ", modelURL: " << url; + // glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); + // globalTransform = globalTransform * glm::inverse(geometricOffset); + } + } if (joint.hasGeometricOffset) { glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); - localTransform = localTransform * geometricOffset; + joint.globalTransform = joint.globalTransform * geometricOffset; } - glm::mat4 globalTransform; - if (joint.parentIndex != -1 && joint.parentIndex < jointIndex) { - hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; - glm::mat4& parentGlobalTransform = globalTransforms[joint.parentIndex]; - if (needMixamoHack) { - // there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform - globalTransform = localTransform; - localTransform = globalTransform * glm::inverse(parentGlobalTransform); - } else { - if (parentJoint.hasGeometricOffset) { - // Per the FBX standard, geometric offsets should not propagate to children - glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); - globalTransform = localTransform * parentGlobalTransform * glm::inverse(parentGeometricOffset); - localTransform = globalTransform * glm::inverse(parentGlobalTransform); - } else { - globalTransform = localTransform * parentGlobalTransform; - } - } - } else { - globalTransform = localTransform; - } - hfm::TransformNode transformNode; - transformNode.parent = joint.parentIndex == -1 ? hfm::UNDEFINED_KEY : joint.parentIndex; - transformNode.transform = Transform(localTransform); - globalTransforms.push_back(globalTransform); - hfmModel.transforms.push_back(transformNode); + + hfmModel.joints.push_back(joint); } hfmModel.bindExtents.reset(); @@ -1418,7 +1402,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const std::unordered_map materialNameToID; for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) { - materialNameToID[materialIt.key().toStdString()] = hfmModel.materials.size(); + materialNameToID[materialIt.key().toStdString()] = (uint32_t)hfmModel.materials.size(); hfmModel.materials.push_back(materialIt.value()); } @@ -1445,13 +1429,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size()); for (const QString& modelID : instanceModelIDs) { // The transform node has the same indexing order as the joints - const uint32_t transformNodeIndex = (uint32_t)modelIDs.indexOf(modelID); + const uint32_t transformIndex = (uint32_t)modelIDs.indexOf(modelID); // accumulate local transforms - glm::mat4 modelTransform = globalTransforms[transformNodeIndex]; + glm::mat4 globalTransform = hfmModel.joints[transformIndex].globalTransform; // compute the mesh extents from the transformed vertices for (const glm::vec3& vertex : mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f)); + glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex); hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex); } @@ -1462,13 +1446,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfm::Shape& shape = partShapes[i]; shape.mesh = meshIndex; shape.meshPart = i; - shape.transform = transformNodeIndex; - glm::mat4 shapeGlobalTransform = globalTransforms[transformNodeIndex]; + shape.transform = transformIndex; shape.transformedExtents.reset(); // compute the shape extents from the transformed vertices for (const glm::vec3& vertex : mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(shapeGlobalTransform * glm::vec4(vertex, 1.0f)); + glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); shape.transformedExtents.minimum = glm::min(shape.transformedExtents.minimum, transformedVertex); shape.transformedExtents.maximum = glm::max(shape.transformedExtents.maximum, transformedVertex); } @@ -1555,7 +1538,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfmCluster.jointIndex = 0; } - hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * modelTransform; + hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * globalTransform; // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and // sometimes floating point fuzz can be introduced after the inverse. @@ -1577,13 +1560,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // update the bind pose extents glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform); hfmModel.bindExtents.addPoint(bindTranslation); - - // the last cluster is the root cluster - HFMCluster cluster; - cluster.jointIndex = rootJointIndex; - clusters.push_back(cluster); } + // the last cluster is the root cluster + HFMCluster cluster; + cluster.jointIndex = rootJointIndex; + clusters.push_back(cluster); + // Skinned mesh instances have a dynamic transform dynamicTransform.deformers.reserve(clusterIDs.size()); clusters.reserve(clusterIDs.size()); @@ -1595,7 +1578,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size()); deformer.indices.reserve(indexWeightPairs); deformer.weights.reserve(indexWeightPairs); - for (size_t i = 0; i < indexWeightPairs; i++) { + for (int i = 0; i < (int)indexWeightPairs; i++) { int oldIndex = fbxCluster.indices[i]; uint32_t newIndex = (uint32_t)extracted.newIndices.value(oldIndex); deformer.indices.push_back(newIndex); @@ -1655,12 +1638,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const if (applyUpAxisZRotation) { hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation)); hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation)); - for (auto &shape : hfmModelPtr->shapes) { - auto transformIndex = shape.transform; - auto& transformNode = hfmModelPtr->transforms[transformIndex]; - transformNode.transform.postRotate(upAxisZRotation); + for (auto& shape : hfmModelPtr->shapes) { shape.transformedExtents.transform(glm::mat4_cast(upAxisZRotation)); } + for (auto& joint : hfmModelPtr->joints) { + joint.globalTransform = joint.globalTransform * glm::mat4_cast(upAxisZRotation); + } } return hfmModelPtr; } From 41de373570d4587635c18b188ed93ffc49b8b7d7 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 15:17:26 -0700 Subject: [PATCH 035/121] Fix not allocating shapeVerticesPerJoint in CollectShapeVerticesTask.cpp --- .../model-baker/src/model-baker/CollectShapeVerticesTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp index 8aeb0145d5..36c2aa04a6 100644 --- a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp @@ -33,7 +33,7 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con const auto& reweightedDeformers = input.get4(); auto& shapeVerticesPerJoint = output; - shapeVerticesPerJoint.reserve(joints.size()); + shapeVerticesPerJoint.resize(joints.size()); std::vector> vertexSourcesPerJoint; vertexSourcesPerJoint.resize(joints.size()); for (size_t i = 0; i < shapes.size(); ++i) { From b15771e9fefb32306a1d697569514925ba13d121 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 16:47:41 -0700 Subject: [PATCH 036/121] Fix build warnings and wrong use of indexOf --- libraries/fbx/src/FBXSerializer.cpp | 5 ++--- libraries/fbx/src/FBXSerializer_Mesh.cpp | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index e7d6000c28..78bc1836c3 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1416,7 +1416,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const const QString& meshID = it.key(); const ExtractedMesh& extracted = it.value(); const auto& partMaterialTextures = extracted.partMaterialTextures; - const auto& newIndices = extracted.newIndices; uint32_t meshIndex = (uint32_t)hfmModel.meshes.size(); meshIDsToMeshIndices.insert(it.key(), meshIndex); @@ -1515,7 +1514,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } auto rootJointIndex = modelIDs.indexOf(modelID); - if (rootJointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { + if (rootJointIndex == -1) { qCDebug(modelformat) << "Model not in model list: " << modelID; rootJointIndex = 0; } @@ -1533,7 +1532,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // of skinning information in FBX QString jointID = _connectionChildMap.value(clusterID); hfmCluster.jointIndex = modelIDs.indexOf(jointID); - if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { + if (hfmCluster.jointIndex == -1) { qCDebug(modelformat) << "Joint not in model list: " << jointID; hfmCluster.jointIndex = 0; } diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index f19cd7c526..7c6be5740a 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -492,7 +492,6 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); - HFMMeshPart& part = data.extracted.mesh.parts.back(); // Figure out if this is the older way of defining the per-part material for baked FBX if (dracoMeshNodeVersion < 2) { From 09a8d69555837696397476f539af1adcfc14b692 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Fri, 4 Oct 2019 18:02:45 -0700 Subject: [PATCH 037/121] landing on the new code base --- libraries/fbx/src/FBXSerializer.cpp | 46 ++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 78bc1836c3..1752c2f024 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -145,6 +145,42 @@ public: bool isLimbNode; // is this FBXModel transform is a "LimbNode" i.e. a joint }; + +glm::mat4 getGlobalTransform(const QMultiMap& _connectionParentMap, + const QHash& fbxModels, QString nodeID, bool mixamoHack, const QString& url) { + glm::mat4 globalTransform; + QVector visitedNodes; // Used to prevent following a cycle + while (!nodeID.isNull()) { + visitedNodes.append(nodeID); // Append each node we visit + + const FBXModel& fbxModel = fbxModels.value(nodeID); + globalTransform = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(fbxModel.preRotation * + fbxModel.rotation * fbxModel.postRotation) * fbxModel.postTransform * globalTransform; + if (fbxModel.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); + globalTransform = globalTransform * geometricOffset; + } + + if (mixamoHack) { + // there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform + return globalTransform; + } + QList parentIDs = _connectionParentMap.values(nodeID); + nodeID = QString(); + foreach(const QString& parentID, parentIDs) { + if (visitedNodes.contains(parentID)) { + qCWarning(modelformat) << "Ignoring loop detected in FBX connection map for" << url; + continue; + } + if (fbxModels.contains(parentID)) { + nodeID = parentID; + break; + } + } + } + return globalTransform; +} + std::vector getModelIDsForMeshID(const QString& meshID, const QHash& fbxModels, const QMultiMap& _connectionParentMap) { std::vector modelsForMesh; if (fbxModels.contains(meshID)) { @@ -1344,7 +1380,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.globalTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; - joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; + // joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; + joint.globalTransform = parentJoint.globalTransform * joint.globalTransform; if (parentJoint.hasGeometricOffset) { // Per the FBX standard, geometric offset should not propagate to children. // However, we must be careful when modifying the behavior of FBXSerializer. @@ -1359,6 +1396,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.globalTransform = joint.globalTransform * geometricOffset; } + // accumulate local transforms + // QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key()); + glm::mat4 anotherModelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url); + /* if (anotherModelTransform != joint.globalTransform) { + joint.globalTransform = anotherModelTransform; + } +*/ hfmModel.joints.push_back(joint); } From 521ce3936bc689639c558e9a893756bb79738c6c Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Mon, 7 Oct 2019 15:19:23 -0700 Subject: [PATCH 038/121] I changed names! Renaming the Geometry to NetworkModel, the GeometryResource to ModelResource, i think there is no need for the 2, only one would be enough in my opinion... --- interface/src/avatar/AvatarDoctor.cpp | 4 +- interface/src/avatar/AvatarDoctor.h | 2 +- interface/src/raypick/CollisionPick.cpp | 6 +- interface/src/raypick/CollisionPick.h | 6 +- .../src/avatars-renderer/Avatar.cpp | 2 +- .../src/avatars-renderer/SkeletonModel.cpp | 2 +- .../src/RenderableModelEntityItem.cpp | 6 +- .../src/RenderableModelEntityItem.h | 2 +- .../RenderableParticleEffectEntityItem.cpp | 6 +- .../src/RenderableParticleEffectEntityItem.h | 4 +- libraries/entities/src/ZoneEntityItem.cpp | 4 +- libraries/entities/src/ZoneEntityItem.h | 2 +- libraries/fbx/src/FBXSerializer.cpp | 24 ++-- libraries/hfm/src/hfm/HFM.h | 1 + .../src/material-networking/MaterialCache.h | 2 +- .../src/model-networking/ModelCache.cpp | 119 +++++++++--------- .../src/model-networking/ModelCache.h | 46 +++---- .../render-utils/src/CauterizedModel.cpp | 2 +- .../render-utils/src/MeshPartPayload.cpp | 8 +- libraries/render-utils/src/Model.cpp | 24 ++-- libraries/render-utils/src/Model.h | 8 +- 21 files changed, 150 insertions(+), 130 deletions(-) diff --git a/interface/src/avatar/AvatarDoctor.cpp b/interface/src/avatar/AvatarDoctor.cpp index a12b4dfcc0..4ff6fb7553 100644 --- a/interface/src/avatar/AvatarDoctor.cpp +++ b/interface/src/avatar/AvatarDoctor.cpp @@ -79,7 +79,7 @@ void AvatarDoctor::startDiagnosing() { _missingTextureCount = 0; _unsupportedTextureCount = 0; - const auto resource = DependencyManager::get()->getGeometryResource(_avatarFSTFileUrl); + const auto resource = DependencyManager::get()->getModelResource(_avatarFSTFileUrl); resource->refresh(); const auto resourceLoaded = [this, resource](bool success) { @@ -297,7 +297,7 @@ void AvatarDoctor::startDiagnosing() { if (resource->isLoaded()) { resourceLoaded(!resource->isFailed()); } else { - connect(resource.data(), &GeometryResource::finished, this, resourceLoaded); + connect(resource.data(), &ModelResource::finished, this, resourceLoaded); } } else { addError("Model file cannot be opened", "missing-file"); diff --git a/interface/src/avatar/AvatarDoctor.h b/interface/src/avatar/AvatarDoctor.h index 1465a5defc..1e3c84e02f 100644 --- a/interface/src/avatar/AvatarDoctor.h +++ b/interface/src/avatar/AvatarDoctor.h @@ -53,7 +53,7 @@ private: int _materialMappingCount = 0; int _materialMappingLoadedCount = 0; - GeometryResource::Pointer _model; + ModelResource::Pointer _model; bool _isDiagnosing = false; }; diff --git a/interface/src/raypick/CollisionPick.cpp b/interface/src/raypick/CollisionPick.cpp index fe943d5b84..9f8510c603 100644 --- a/interface/src/raypick/CollisionPick.cpp +++ b/interface/src/raypick/CollisionPick.cpp @@ -134,7 +134,7 @@ bool CollisionPick::getShapeInfoReady(const CollisionRegion& pick) { return _mathPick.loaded; } -void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource) { +void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource) { ShapeType type = shapeInfo.getType(); glm::vec3 dimensions = pick.transform.getScale(); QString modelURL = (resource ? resource->getURL().toString() : ""); @@ -147,7 +147,7 @@ void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick, } } -void CollisionPick::computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource) { +void CollisionPick::computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource) { // This code was copied and modified from RenderableModelEntityItem::computeShapeInfo // TODO: Move to some shared code area (in entities-renderer? model-networking?) // after we verify this is working and do a diff comparison with RenderableModelEntityItem::computeShapeInfo @@ -381,7 +381,7 @@ CollisionPick::CollisionPick(const PickFilter& filter, float maxDistance, bool e _scaleWithParent(scaleWithParent), _physicsEngine(physicsEngine) { if (collisionRegion.shouldComputeShapeInfo()) { - _cachedResource = DependencyManager::get()->getCollisionGeometryResource(collisionRegion.modelURL); + _cachedResource = DependencyManager::get()->getCollisionModelResource(collisionRegion.modelURL); } _mathPick.loaded = isLoaded(); } diff --git a/interface/src/raypick/CollisionPick.h b/interface/src/raypick/CollisionPick.h index 24317bf19a..115ee1727e 100644 --- a/interface/src/raypick/CollisionPick.h +++ b/interface/src/raypick/CollisionPick.h @@ -63,14 +63,14 @@ protected: bool isLoaded() const; // Returns true if _mathPick.shapeInfo is valid. Otherwise, attempts to get the _mathPick ready for use. bool getShapeInfoReady(const CollisionRegion& pick); - void computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource); - void computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource); + void computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource); + void computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource); void filterIntersections(std::vector& intersections) const; bool _scaleWithParent; PhysicsEnginePointer _physicsEngine; - QSharedPointer _cachedResource; + QSharedPointer _cachedResource; // Options for what information to get from collision results bool _includeNormals; diff --git a/libraries/avatars-renderer/src/avatars-renderer/Avatar.cpp b/libraries/avatars-renderer/src/avatars-renderer/Avatar.cpp index 75a7693de8..e9ae278710 100644 --- a/libraries/avatars-renderer/src/avatars-renderer/Avatar.cpp +++ b/libraries/avatars-renderer/src/avatars-renderer/Avatar.cpp @@ -955,7 +955,7 @@ void Avatar::simulateAttachments(float deltaTime) { bool texturesLoaded = _attachmentModelsTexturesLoaded.at(i); // Watch for texture loading - if (!texturesLoaded && model->getGeometry() && model->getGeometry()->areTexturesLoaded()) { + if (!texturesLoaded && model->getNetworkModel() && model->getNetworkModel()->areTexturesLoaded()) { _attachmentModelsTexturesLoaded[i] = true; model->updateRenderItems(); } diff --git a/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp b/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp index ac9819803e..bda7fab0a4 100644 --- a/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp +++ b/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp @@ -177,7 +177,7 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) { // FIXME: This texture loading logic should probably live in Avatar, to mirror RenderableModelEntityItem, // but Avatars don't get updates in the same way - if (!_texturesLoaded && getGeometry() && getGeometry()->areTexturesLoaded()) { + if (!_texturesLoaded && getNetworkModel() && getNetworkModel()->areTexturesLoaded()) { _texturesLoaded = true; updateRenderItems(); } diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index 7c9e8e5f13..e75b28f9ed 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -282,7 +282,7 @@ bool RenderableModelEntityItem::findDetailedParabolaIntersection(const glm::vec3 } void RenderableModelEntityItem::fetchCollisionGeometryResource() { - _collisionGeometryResource = DependencyManager::get()->getCollisionGeometryResource(getCollisionShapeURL()); + _collisionGeometryResource = DependencyManager::get()->getCollisionModelResource(getCollisionShapeURL()); } bool RenderableModelEntityItem::unableToLoadCollisionShape() { @@ -504,7 +504,7 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { meshes.push_back(hfmMesh._mesh); } } else { - meshes = model->getGeometry()->getMeshes(); + meshes = model->getNetworkModel()->getMeshes(); } int32_t numMeshes = (int32_t)(meshes.size()); @@ -1431,7 +1431,7 @@ void ModelEntityRenderer::doRenderUpdateSynchronousTyped(const ScenePointer& sce } } - if (!_texturesLoaded && model->getGeometry() && model->getGeometry()->areTexturesLoaded()) { + if (!_texturesLoaded && model->getNetworkModel() && model->getNetworkModel()->areTexturesLoaded()) { withWriteLock([&] { _texturesLoaded = true; }); diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.h b/libraries/entities-renderer/src/RenderableModelEntityItem.h index c32dad901f..d9a8c08890 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.h +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.h @@ -120,7 +120,7 @@ private: bool readyToAnimate() const; void fetchCollisionGeometryResource(); - GeometryResource::Pointer _collisionGeometryResource; + ModelResource::Pointer _collisionGeometryResource; std::vector _jointMap; QVariantMap _originalTextures; bool _jointMapCompleted { false }; diff --git a/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp b/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp index a97cc7c84c..853a8385f0 100644 --- a/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.cpp @@ -194,7 +194,7 @@ float importanceSample3DDimension(float startDim) { } ParticleEffectEntityRenderer::CpuParticle ParticleEffectEntityRenderer::createParticle(uint64_t now, const Transform& baseTransform, const particle::Properties& particleProperties, - const ShapeType& shapeType, const GeometryResource::Pointer& geometryResource, + const ShapeType& shapeType, const ModelResource::Pointer& geometryResource, const TriangleInfo& triangleInfo) { CpuParticle particle; @@ -379,7 +379,7 @@ void ParticleEffectEntityRenderer::stepSimulation() { particle::Properties particleProperties; ShapeType shapeType; - GeometryResource::Pointer geometryResource; + ModelResource::Pointer geometryResource; withReadLock([&] { particleProperties = _particleProperties; shapeType = _shapeType; @@ -482,7 +482,7 @@ void ParticleEffectEntityRenderer::fetchGeometryResource() { if (hullURL.isEmpty()) { _geometryResource.reset(); } else { - _geometryResource = DependencyManager::get()->getCollisionGeometryResource(hullURL); + _geometryResource = DependencyManager::get()->getCollisionModelResource(hullURL); } } diff --git a/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.h b/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.h index cc907f2b1d..d585104f5c 100644 --- a/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.h +++ b/libraries/entities-renderer/src/RenderableParticleEffectEntityItem.h @@ -89,7 +89,7 @@ private: } _triangleInfo; static CpuParticle createParticle(uint64_t now, const Transform& baseTransform, const particle::Properties& particleProperties, - const ShapeType& shapeType, const GeometryResource::Pointer& geometryResource, + const ShapeType& shapeType, const ModelResource::Pointer& geometryResource, const TriangleInfo& triangleInfo); void stepSimulation(); @@ -108,7 +108,7 @@ private: QString _compoundShapeURL; void fetchGeometryResource(); - GeometryResource::Pointer _geometryResource; + ModelResource::Pointer _geometryResource; NetworkTexturePointer _networkTexture; ScenePointer _scene; diff --git a/libraries/entities/src/ZoneEntityItem.cpp b/libraries/entities/src/ZoneEntityItem.cpp index 0771d9ad54..d54998e74f 100644 --- a/libraries/entities/src/ZoneEntityItem.cpp +++ b/libraries/entities/src/ZoneEntityItem.cpp @@ -345,7 +345,7 @@ bool ZoneEntityItem::findDetailedParabolaIntersection(const glm::vec3& origin, c } bool ZoneEntityItem::contains(const glm::vec3& point) const { - GeometryResource::Pointer resource = _shapeResource; + ModelResource::Pointer resource = _shapeResource; if (_shapeType == SHAPE_TYPE_COMPOUND && resource) { if (resource->isLoaded()) { const HFMModel& hfmModel = resource->getHFMModel(); @@ -462,7 +462,7 @@ void ZoneEntityItem::fetchCollisionGeometryResource() { if (hullURL.isEmpty()) { _shapeResource.reset(); } else { - _shapeResource = DependencyManager::get()->getCollisionGeometryResource(hullURL); + _shapeResource = DependencyManager::get()->getCollisionModelResource(hullURL); } } diff --git a/libraries/entities/src/ZoneEntityItem.h b/libraries/entities/src/ZoneEntityItem.h index 34ad47f095..d6647e701e 100644 --- a/libraries/entities/src/ZoneEntityItem.h +++ b/libraries/entities/src/ZoneEntityItem.h @@ -167,7 +167,7 @@ protected: static bool _zonesArePickable; void fetchCollisionGeometryResource(); - GeometryResource::Pointer _shapeResource; + ModelResource::Pointer _shapeResource; }; diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 1752c2f024..0299648294 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1377,11 +1377,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // Now that we've initialized the joint, we can define the transform // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate - joint.globalTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; + joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; + joint.globalTransform = joint.localTransform; if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; - // joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; - joint.globalTransform = parentJoint.globalTransform * joint.globalTransform; + // SG Change: i think this not correct and the [parent]*[local] is the correct answer here + //joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; + joint.globalTransform = parentJoint.globalTransform * joint.localTransform; if (parentJoint.hasGeometricOffset) { // Per the FBX standard, geometric offset should not propagate to children. // However, we must be careful when modifying the behavior of FBXSerializer. @@ -1396,13 +1398,21 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.globalTransform = joint.globalTransform * geometricOffset; } - // accumulate local transforms + // TODO: Remove these lines, just here to make sure we are not breaking the transform computation // QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key()); glm::mat4 anotherModelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url); - /* if (anotherModelTransform != joint.globalTransform) { - joint.globalTransform = anotherModelTransform; + auto col0 = (glm::epsilonNotEqual(anotherModelTransform[0], joint.globalTransform[0], 0.001f)); + auto col1 = (glm::epsilonNotEqual(anotherModelTransform[1], joint.globalTransform[1], 0.001f)); + auto col2 = (glm::epsilonNotEqual(anotherModelTransform[2], joint.globalTransform[2], 0.001f)); + auto col3 = (glm::epsilonNotEqual(anotherModelTransform[3], joint.globalTransform[3], 0.001f)); + if ( glm::any(col0) + || glm::any(col1) + || glm::any(col2) + || glm::any(col3)) { + anotherModelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url); + // joint.globalTransform = anotherModelTransform; } -*/ + hfmModel.joints.push_back(joint); } diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 08410f17f2..96030672f2 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -120,6 +120,7 @@ public: glm::vec3 geometricScaling; // globalTransform is the transform of the joint with all parent transforms applied, plus the geometric offset + glm::mat4 localTransform; glm::mat4 globalTransform; }; diff --git a/libraries/material-networking/src/material-networking/MaterialCache.h b/libraries/material-networking/src/material-networking/MaterialCache.h index aa103adb1e..31bbd02198 100644 --- a/libraries/material-networking/src/material-networking/MaterialCache.h +++ b/libraries/material-networking/src/material-networking/MaterialCache.h @@ -50,7 +50,7 @@ public: Textures getTextures() { return _textures; } protected: - friend class Geometry; + friend class NetworkModel; Textures _textures; diff --git a/libraries/model-networking/src/model-networking/ModelCache.cpp b/libraries/model-networking/src/model-networking/ModelCache.cpp index 1fcfcfcc70..e1df0f95c7 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.cpp +++ b/libraries/model-networking/src/model-networking/ModelCache.cpp @@ -203,23 +203,23 @@ QUrl resolveTextureBaseUrl(const QUrl& url, const QUrl& textureBaseUrl) { return textureBaseUrl.isValid() ? textureBaseUrl : url; } -GeometryResource::GeometryResource(const GeometryResource& other) : +ModelResource::ModelResource(const ModelResource& other) : Resource(other), - Geometry(other), + NetworkModel(other), _modelLoader(other._modelLoader), _mappingPair(other._mappingPair), _textureBaseURL(other._textureBaseURL), _combineParts(other._combineParts), _isCacheable(other._isCacheable) { - if (other._geometryResource) { + if (other._modelResource) { _startedLoading = false; } } -void GeometryResource::downloadFinished(const QByteArray& data) { +void ModelResource::downloadFinished(const QByteArray& data) { if (_effectiveBaseURL.fileName().toLower().endsWith(".fst")) { - PROFILE_ASYNC_BEGIN(resource_parse_geometry, "GeometryResource::downloadFinished", _url.toString(), { { "url", _url.toString() } }); + PROFILE_ASYNC_BEGIN(resource_parse_geometry, "ModelResource::downloadFinished", _url.toString(), { { "url", _url.toString() } }); // store parsed contents of FST file _mapping = FSTReader::readMapping(data); @@ -267,19 +267,19 @@ void GeometryResource::downloadFinished(const QByteArray& data) { auto modelCache = DependencyManager::get(); GeometryExtra extra { GeometryMappingPair(base, _mapping), _textureBaseURL, false }; - // Get the raw GeometryResource - _geometryResource = modelCache->getResource(url, QUrl(), &extra, std::hash()(extra)).staticCast(); + // Get the raw ModelResource + _modelResource = modelCache->getResource(url, QUrl(), &extra, std::hash()(extra)).staticCast(); // Avoid caching nested resources - their references will be held by the parent - _geometryResource->_isCacheable = false; + _modelResource->_isCacheable = false; - if (_geometryResource->isLoaded()) { - onGeometryMappingLoaded(!_geometryResource->getURL().isEmpty()); + if (_modelResource->isLoaded()) { + onGeometryMappingLoaded(!_modelResource->getURL().isEmpty()); } else { if (_connection) { disconnect(_connection); } - _connection = connect(_geometryResource.data(), &Resource::finished, this, &GeometryResource::onGeometryMappingLoaded); + _connection = connect(_modelResource.data(), &Resource::finished, this, &ModelResource::onGeometryMappingLoaded); } } } else { @@ -291,32 +291,32 @@ void GeometryResource::downloadFinished(const QByteArray& data) { } } -void GeometryResource::onGeometryMappingLoaded(bool success) { - if (success && _geometryResource) { - _hfmModel = _geometryResource->_hfmModel; - _materialMapping = _geometryResource->_materialMapping; - _meshParts = _geometryResource->_meshParts; - _meshes = _geometryResource->_meshes; - _materials = _geometryResource->_materials; +void ModelResource::onGeometryMappingLoaded(bool success) { + if (success && _modelResource) { + _hfmModel = _modelResource->_hfmModel; + _materialMapping = _modelResource->_materialMapping; + _meshParts = _modelResource->_meshParts; + _meshes = _modelResource->_meshes; + _materials = _modelResource->_materials; // Avoid holding onto extra references - _geometryResource.reset(); + _modelResource.reset(); // Make sure connection will not trigger again disconnect(_connection); // FIXME Should not have to do this } - PROFILE_ASYNC_END(resource_parse_geometry, "GeometryResource::downloadFinished", _url.toString()); + PROFILE_ASYNC_END(resource_parse_geometry, "ModelResource::downloadFinished", _url.toString()); finishedLoading(success); } -void GeometryResource::setExtra(void* extra) { +void ModelResource::setExtra(void* extra) { const GeometryExtra* geometryExtra = static_cast(extra); _mappingPair = geometryExtra ? geometryExtra->mapping : GeometryMappingPair(QUrl(), QVariantHash()); _textureBaseURL = geometryExtra ? resolveTextureBaseUrl(_url, geometryExtra->textureBaseUrl) : QUrl(); _combineParts = geometryExtra ? geometryExtra->combineParts : true; } -void GeometryResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const MaterialMapping& materialMapping) { +void ModelResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const MaterialMapping& materialMapping) { // Assume ownership of the processed HFMModel _hfmModel = hfmModel; _materialMapping = materialMapping; @@ -348,12 +348,12 @@ void GeometryResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const M finishedLoading(true); } -void GeometryResource::deleter() { +void ModelResource::deleter() { resetTextures(); Resource::deleter(); } -void GeometryResource::setTextures() { +void ModelResource::setTextures() { if (_hfmModel) { for (const HFMMaterial& material : _hfmModel->materials) { _materials.push_back(std::make_shared(material, _textureBaseURL)); @@ -361,7 +361,7 @@ void GeometryResource::setTextures() { } } -void GeometryResource::resetTextures() { +void ModelResource::resetTextures() { _materials.clear(); } @@ -377,17 +377,17 @@ ModelCache::ModelCache() { } QSharedPointer ModelCache::createResource(const QUrl& url) { - return QSharedPointer(new GeometryResource(url, _modelLoader), &GeometryResource::deleter); + return QSharedPointer(new ModelResource(url, _modelLoader), &ModelResource::deleter); } QSharedPointer ModelCache::createResourceCopy(const QSharedPointer& resource) { - return QSharedPointer(new GeometryResource(*resource.staticCast()), &GeometryResource::deleter); + return QSharedPointer(new ModelResource(*resource.staticCast()), &ModelResource::deleter); } -GeometryResource::Pointer ModelCache::getGeometryResource(const QUrl& url, const GeometryMappingPair& mapping, const QUrl& textureBaseUrl) { +ModelResource::Pointer ModelCache::getModelResource(const QUrl& url, const GeometryMappingPair& mapping, const QUrl& textureBaseUrl) { bool combineParts = true; GeometryExtra geometryExtra = { mapping, textureBaseUrl, combineParts }; - GeometryResource::Pointer resource = getResource(url, QUrl(), &geometryExtra, std::hash()(geometryExtra)).staticCast(); + ModelResource::Pointer resource = getResource(url, QUrl(), &geometryExtra, std::hash()(geometryExtra)).staticCast(); if (resource) { if (resource->isLoaded() && resource->shouldSetTextures()) { resource->setTextures(); @@ -396,12 +396,12 @@ GeometryResource::Pointer ModelCache::getGeometryResource(const QUrl& url, const return resource; } -GeometryResource::Pointer ModelCache::getCollisionGeometryResource(const QUrl& url, +ModelResource::Pointer ModelCache::getCollisionModelResource(const QUrl& url, const GeometryMappingPair& mapping, const QUrl& textureBaseUrl) { bool combineParts = false; GeometryExtra geometryExtra = { mapping, textureBaseUrl, combineParts }; - GeometryResource::Pointer resource = getResource(url, QUrl(), &geometryExtra, std::hash()(geometryExtra)).staticCast(); + ModelResource::Pointer resource = getResource(url, QUrl(), &geometryExtra, std::hash()(geometryExtra)).staticCast(); if (resource) { if (resource->isLoaded() && resource->shouldSetTextures()) { resource->setTextures(); @@ -410,7 +410,7 @@ GeometryResource::Pointer ModelCache::getCollisionGeometryResource(const QUrl& u return resource; } -const QVariantMap Geometry::getTextures() const { +const QVariantMap NetworkModel::getTextures() const { QVariantMap textures; for (const auto& material : _materials) { for (const auto& texture : material->_textures) { @@ -424,22 +424,22 @@ const QVariantMap Geometry::getTextures() const { } // FIXME: The materials should only be copied when modified, but the Model currently caches the original -Geometry::Geometry(const Geometry& geometry) { - _hfmModel = geometry._hfmModel; - _materialMapping = geometry._materialMapping; - _meshes = geometry._meshes; - _meshParts = geometry._meshParts; +NetworkModel::NetworkModel(const NetworkModel& networkModel) { + _hfmModel = networkModel._hfmModel; + _materialMapping = networkModel._materialMapping; + _meshes = networkModel._meshes; + _meshParts = networkModel._meshParts; - _materials.reserve(geometry._materials.size()); - for (const auto& material : geometry._materials) { + _materials.reserve(networkModel._materials.size()); + for (const auto& material : networkModel._materials) { _materials.push_back(std::make_shared(*material)); } - _animGraphOverrideUrl = geometry._animGraphOverrideUrl; - _mapping = geometry._mapping; + _animGraphOverrideUrl = networkModel._animGraphOverrideUrl; + _mapping = networkModel._mapping; } -void Geometry::setTextures(const QVariantMap& textureMap) { +void NetworkModel::setTextures(const QVariantMap& textureMap) { if (_meshes->size() > 0) { for (auto& material : _materials) { // Check if any material textures actually changed @@ -447,7 +447,7 @@ void Geometry::setTextures(const QVariantMap& textureMap) { [&textureMap](const NetworkMaterial::Textures::value_type& it) { return it.second.texture && textureMap.contains(it.second.name); })) { // FIXME: The Model currently caches the materials (waste of space!) - // so they must be copied in the Geometry copy-ctor + // so they must be copied in the NetworkModel copy-ctor // if (material->isOriginal()) { // // Copy the material to avoid mutating the cached version // material = std::make_shared(*material); @@ -461,11 +461,11 @@ void Geometry::setTextures(const QVariantMap& textureMap) { // If we only use cached textures, they should all be loaded areTexturesLoaded(); } else { - qCWarning(modelnetworking) << "Ignoring setTextures(); geometry not ready"; + qCWarning(modelnetworking) << "Ignoring setTextures(); NetworkModel not ready"; } } -bool Geometry::areTexturesLoaded() const { +bool NetworkModel::areTexturesLoaded() const { if (!_areTexturesLoaded) { for (auto& material : _materials) { if (material->isMissingTexture()) { @@ -500,30 +500,35 @@ bool Geometry::areTexturesLoaded() const { return true; } -const std::shared_ptr Geometry::getShapeMaterial(int partID) const { - if ((partID >= 0) && (partID < (int)_meshParts->size())) { +const std::shared_ptr NetworkModel::getShapeMaterial(int partID) const { + /* if ((partID >= 0) && (partID < (int)_meshParts->size())) { int materialID = _meshParts->at(partID)->materialID; if ((materialID >= 0) && (materialID < (int)_materials.size())) { return _materials[materialID]; } + }*/ + + auto materialID = getHFMModel().shapes[partID].material; + if ((materialID >= 0) && (materialID < (int)_materials.size())) { + return _materials[materialID]; } return nullptr; } -void GeometryResourceWatcher::startWatching() { - connect(_resource.data(), &Resource::finished, this, &GeometryResourceWatcher::resourceFinished); - connect(_resource.data(), &Resource::onRefresh, this, &GeometryResourceWatcher::resourceRefreshed); +void ModelResourceWatcher::startWatching() { + connect(_resource.data(), &Resource::finished, this, &ModelResourceWatcher::resourceFinished); + connect(_resource.data(), &Resource::onRefresh, this, &ModelResourceWatcher::resourceRefreshed); if (_resource->isLoaded()) { resourceFinished(!_resource->getURL().isEmpty()); } } -void GeometryResourceWatcher::stopWatching() { - disconnect(_resource.data(), &Resource::finished, this, &GeometryResourceWatcher::resourceFinished); - disconnect(_resource.data(), &Resource::onRefresh, this, &GeometryResourceWatcher::resourceRefreshed); +void ModelResourceWatcher::stopWatching() { + disconnect(_resource.data(), &Resource::finished, this, &ModelResourceWatcher::resourceFinished); + disconnect(_resource.data(), &Resource::onRefresh, this, &ModelResourceWatcher::resourceRefreshed); } -void GeometryResourceWatcher::setResource(GeometryResource::Pointer resource) { +void ModelResourceWatcher::setResource(ModelResource::Pointer resource) { if (_resource) { stopWatching(); } @@ -537,14 +542,14 @@ void GeometryResourceWatcher::setResource(GeometryResource::Pointer resource) { } } -void GeometryResourceWatcher::resourceFinished(bool success) { +void ModelResourceWatcher::resourceFinished(bool success) { if (success) { - _geometryRef = std::make_shared(*_resource); + _networkModelRef = std::make_shared(*_resource); } emit finished(success); } -void GeometryResourceWatcher::resourceRefreshed() { +void ModelResourceWatcher::resourceRefreshed() { // FIXME: Model is not set up to handle a refresh // _instance.reset(); } diff --git a/libraries/model-networking/src/model-networking/ModelCache.h b/libraries/model-networking/src/model-networking/ModelCache.h index 5b78c18184..9fdae339f7 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.h +++ b/libraries/model-networking/src/model-networking/ModelCache.h @@ -27,14 +27,14 @@ class MeshPart; using GeometryMappingPair = std::pair; Q_DECLARE_METATYPE(GeometryMappingPair) -class Geometry { +class NetworkModel { public: - using Pointer = std::shared_ptr; - using WeakPointer = std::weak_ptr; + using Pointer = std::shared_ptr; + using WeakPointer = std::weak_ptr; - Geometry() = default; - Geometry(const Geometry& geometry); - virtual ~Geometry() = default; + NetworkModel() = default; + NetworkModel(const NetworkModel& geometry); + virtual ~NetworkModel() = default; // Immutable over lifetime using GeometryMeshes = std::vector>; @@ -76,22 +76,22 @@ private: }; /// A geometry loaded from the network. -class GeometryResource : public Resource, public Geometry { +class ModelResource : public Resource, public NetworkModel { Q_OBJECT public: - using Pointer = QSharedPointer; + using Pointer = QSharedPointer; - GeometryResource(const QUrl& url, const ModelLoader& modelLoader) : Resource(url), _modelLoader(modelLoader) {} - GeometryResource(const GeometryResource& other); + ModelResource(const QUrl& url, const ModelLoader& modelLoader) : Resource(url), _modelLoader(modelLoader) {} + ModelResource(const ModelResource& other); - QString getType() const override { return "Geometry"; } + QString getType() const override { return "Model"; } virtual void deleter() override; virtual void downloadFinished(const QByteArray& data) override; void setExtra(void* extra) override; - virtual bool areTexturesLoaded() const override { return isLoaded() && Geometry::areTexturesLoaded(); } + virtual bool areTexturesLoaded() const override { return isLoaded() && NetworkModel::areTexturesLoaded(); } private slots: void onGeometryMappingLoaded(bool success); @@ -115,21 +115,21 @@ private: QUrl _textureBaseURL; bool _combineParts; - GeometryResource::Pointer _geometryResource; + ModelResource::Pointer _modelResource; QMetaObject::Connection _connection; bool _isCacheable{ true }; }; -class GeometryResourceWatcher : public QObject { +class ModelResourceWatcher : public QObject { Q_OBJECT public: - using Pointer = std::shared_ptr; + using Pointer = std::shared_ptr; - GeometryResourceWatcher() = delete; - GeometryResourceWatcher(Geometry::Pointer& geometryPtr) : _geometryRef(geometryPtr) {} + ModelResourceWatcher() = delete; + ModelResourceWatcher(NetworkModel::Pointer& geometryPtr) : _networkModelRef(geometryPtr) {} - void setResource(GeometryResource::Pointer resource); + void setResource(ModelResource::Pointer resource); QUrl getURL() const { return (bool)_resource ? _resource->getURL() : QUrl(); } int getResourceDownloadAttempts() { return _resource ? _resource->getDownloadAttempts() : 0; } @@ -147,8 +147,8 @@ private slots: void resourceRefreshed(); private: - GeometryResource::Pointer _resource; - Geometry::Pointer& _geometryRef; + ModelResource::Pointer _resource; + NetworkModel::Pointer& _networkModelRef; }; /// Stores cached model geometries. @@ -158,18 +158,18 @@ class ModelCache : public ResourceCache, public Dependency { public: - GeometryResource::Pointer getGeometryResource(const QUrl& url, + ModelResource::Pointer getModelResource(const QUrl& url, const GeometryMappingPair& mapping = GeometryMappingPair(QUrl(), QVariantHash()), const QUrl& textureBaseUrl = QUrl()); - GeometryResource::Pointer getCollisionGeometryResource(const QUrl& url, + ModelResource::Pointer getCollisionModelResource(const QUrl& url, const GeometryMappingPair& mapping = GeometryMappingPair(QUrl(), QVariantHash()), const QUrl& textureBaseUrl = QUrl()); protected: - friend class GeometryResource; + friend class ModelResource; virtual QSharedPointer createResource(const QUrl& url) override; QSharedPointer createResourceCopy(const QSharedPointer& resource) override; diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 9cdefa0545..6e8f37d354 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -87,7 +87,7 @@ void CauterizedModel::createRenderItemSet() { for (int partIndex = 0; partIndex < numParts; partIndex++) { auto ptr = std::make_shared(shared_from_this(), i, partIndex, shapeID, transform, offset); _modelMeshRenderItems << std::static_pointer_cast(ptr); - auto material = getGeometry()->getShapeMaterial(shapeID); + auto material = getNetworkModel()->getShapeMaterial(shapeID); _modelMeshMaterialNames.push_back(material ? material->getName() : ""); _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i }); shapeID++; diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index 1a6e5dbadc..fcf0ffaa48 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -202,9 +202,13 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in assert(model && model->isLoaded()); + auto shape = model->getHFMModel().shapes[shapeIndex]; + assert(shape.mesh == meshIndex); + assert(shape.meshPart == partIndex); + bool useDualQuaternionSkinning = model->getUseDualQuaternionSkinning(); - auto& modelMesh = model->getGeometry()->getMeshes().at(_meshIndex); + auto& modelMesh = model->getNetworkModel()->getMeshes().at(_meshIndex); _meshNumVertices = (int)modelMesh->getNumVertices(); const Model::MeshState& state = model->getMeshState(_meshIndex); @@ -263,7 +267,7 @@ void ModelMeshPartPayload::initCache(const ModelPointer& model) { _hasTangents = !mesh.tangents.isEmpty(); } - auto networkMaterial = model->getGeometry()->getShapeMaterial(_shapeID); + auto networkMaterial = model->getNetworkModel()->getShapeMaterial(_shapeID); if (networkMaterial) { addMaterial(graphics::MaterialLayer(networkMaterial, 0)); } diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index aa3708fb1e..f9e980b2ac 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -42,7 +42,7 @@ using namespace std; int nakedModelPointerTypeId = qRegisterMetaType(); -int weakGeometryResourceBridgePointerTypeId = qRegisterMetaType(); +int weakGeometryResourceBridgePointerTypeId = qRegisterMetaType(); int vec3VectorTypeId = qRegisterMetaType>(); int normalTypeVecTypeId = qRegisterMetaType>("QVector"); float Model::FAKE_DIMENSION_PLACEHOLDER = -1.0f; @@ -71,7 +71,7 @@ Model::Model(QObject* parent, SpatiallyNestable* spatiallyNestableOverride) : setSnapModelToRegistrationPoint(true, glm::vec3(0.5f)); - connect(&_renderWatcher, &GeometryResourceWatcher::finished, this, &Model::loadURLFinished); + connect(&_renderWatcher, &ModelResourceWatcher::finished, this, &Model::loadURLFinished); } Model::~Model() { @@ -151,7 +151,7 @@ void Model::setOffset(const glm::vec3& offset) { } void Model::calculateTextureInfo() { - if (!_hasCalculatedTextureInfo && isLoaded() && getGeometry()->areTexturesLoaded() && !_modelMeshRenderItemsMap.isEmpty()) { + if (!_hasCalculatedTextureInfo && isLoaded() && getNetworkModel()->areTexturesLoaded() && !_modelMeshRenderItemsMap.isEmpty()) { size_t textureSize = 0; int textureCount = 0; bool allTexturesLoaded = true; @@ -178,12 +178,12 @@ int Model::getRenderInfoTextureCount() { } bool Model::shouldInvalidatePayloadShapeKey(int meshIndex) { - if (!getGeometry()) { + if (!getNetworkModel()) { return true; } const HFMModel& hfmModel = getHFMModel(); - const auto& networkMeshes = getGeometry()->getMeshes(); + const auto& networkMeshes = getNetworkModel()->getMeshes(); // if our index is ever out of range for either meshes or networkMeshes, then skip it, and set our _meshGroupsKnown // to false to rebuild out mesh groups. if (meshIndex < 0 || meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)hfmModel.meshes.size() || meshIndex >= (int)_meshStates.size()) { @@ -643,8 +643,8 @@ glm::mat4 Model::getWorldToHFMMatrix() const { // TODO: deprecate and remove MeshProxyList Model::getMeshes() const { MeshProxyList result; - const Geometry::Pointer& renderGeometry = getGeometry(); - const Geometry::GeometryMeshes& meshes = renderGeometry->getMeshes(); + const NetworkModel::Pointer& renderGeometry = getNetworkModel(); + const NetworkModel::GeometryMeshes& meshes = renderGeometry->getMeshes(); if (!isLoaded()) { return result; @@ -772,7 +772,7 @@ scriptable::ScriptableModelBase Model::getScriptableModel() { int numParts = (int)mesh->getNumParts(); for (int partIndex = 0; partIndex < numParts; partIndex++) { auto& materialName = _modelMeshMaterialNames[shapeID]; - result.appendMaterial(graphics::MaterialLayer(getGeometry()->getShapeMaterial(shapeID), 0), shapeID, materialName); + result.appendMaterial(graphics::MaterialLayer(getNetworkModel()->getShapeMaterial(shapeID), 0), shapeID, materialName); { std::unique_lock lock(_materialMappingMutex); @@ -1196,7 +1196,7 @@ void Model::setURL(const QUrl& url) { invalidCalculatedMeshBoxes(); deleteGeometry(); - auto resource = DependencyManager::get()->getGeometryResource(url); + auto resource = DependencyManager::get()->getModelResource(url); if (resource) { resource->setLoadPriority(this, _loadingPriority); _renderWatcher.setResource(resource); @@ -1487,7 +1487,7 @@ void Model::createRenderItemSet() { int numParts = (int)mesh->getNumParts(); for (int partIndex = 0; partIndex < numParts; partIndex++) { _modelMeshRenderItems << std::make_shared(shared_from_this(), i, partIndex, shapeID, transform, offset); - auto material = getGeometry()->getShapeMaterial(shapeID); + auto material = getNetworkModel()->getShapeMaterial(shapeID); _modelMeshMaterialNames.push_back(material ? material->getName() : ""); _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i }); shapeID++; @@ -1680,7 +1680,7 @@ void Model::removeMaterial(graphics::MaterialPointer material, const std::string AbstractViewStateInterface::instance()->getMain3DScene()->enqueueTransaction(transaction); } -class CollisionRenderGeometry : public Geometry { +class CollisionRenderGeometry : public NetworkModel { public: CollisionRenderGeometry(graphics::MeshPointer mesh) { _hfmModel = std::make_shared(); @@ -1838,7 +1838,7 @@ void Blender::run() { bool Model::maybeStartBlender() { if (isLoaded()) { - QThreadPool::globalInstance()->start(new Blender(getThisPointer(), getGeometry()->getConstHFMModelPointer(), + QThreadPool::globalInstance()->start(new Blender(getThisPointer(), getNetworkModel()->getConstHFMModelPointer(), ++_blendNumber, _blendshapeCoefficients)); return true; } diff --git a/libraries/render-utils/src/Model.h b/libraries/render-utils/src/Model.h index 2ea1f87fae..79ddaeb68d 100644 --- a/libraries/render-utils/src/Model.h +++ b/libraries/render-utils/src/Model.h @@ -178,7 +178,7 @@ public: virtual void updateClusterMatrices(); /// Returns a reference to the shared geometry. - const Geometry::Pointer& getGeometry() const { return _renderGeometry; } + const NetworkModel::Pointer& getNetworkModel() const { return _renderGeometry; } const QVariantMap getTextures() const { assert(isLoaded()); return _renderGeometry->getTextures(); } Q_INVOKABLE virtual void setTextures(const QVariantMap& textures); @@ -391,9 +391,9 @@ protected: /// \return true if joint exists bool getJointPosition(int jointIndex, glm::vec3& position) const; - Geometry::Pointer _renderGeometry; // only ever set by its watcher + NetworkModel::Pointer _renderGeometry; // only ever set by its watcher - GeometryResourceWatcher _renderWatcher; + ModelResourceWatcher _renderWatcher; SpatiallyNestable* _spatiallyNestableOverride; @@ -515,7 +515,7 @@ private: }; Q_DECLARE_METATYPE(ModelPointer) -Q_DECLARE_METATYPE(Geometry::WeakPointer) +Q_DECLARE_METATYPE(NetworkModel::WeakPointer) Q_DECLARE_METATYPE(BlendshapeOffset) /// Handle management of pending models that need blending From da4ffcd91b5f7f0348a46015b65b1a27a81d6498 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 4 Oct 2019 14:51:55 -0700 Subject: [PATCH 039/121] GLTF WIP --- libraries/fbx/src/GLTFSerializer.cpp | 29 +++++++--------------------- 1 file changed, 7 insertions(+), 22 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 7fdbcce141..70e765c23a 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1005,8 +1005,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& // Build joints HFMJoint joint; hfmModel.jointIndices["x"] = numNodes; - QVector globalTransforms; - globalTransforms.resize(numNodes); for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { auto& node = _file.nodes[nodeIndex]; @@ -1022,10 +1020,12 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& joint.rotation = glmExtractRotation(joint.transform); glm::vec3 scale = extractScale(joint.transform); joint.postTransform = glm::scale(glm::mat4(), scale); - globalTransforms[nodeIndex] = joint.transform; - if (joint.parentIndex != -1) { - globalTransforms[nodeIndex] = globalTransforms[joint.parentIndex] * globalTransforms[nodeIndex]; + joint.globalTransform = joint.transform; + // Nodes are sorted, so we can apply the full transform just by getting the global transform of the already defined parent + if (joint.parentIndex != -1 && joint.parentIndex != nodeIndex) { + const auto& parentJoint = hfmModel.joints[(size_t)nodeIndex]; + joint.globalTransform = parentJoint.globalTransform * joint.globalTransform; } joint.name = node.name; @@ -1368,7 +1368,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mesh.normals.push_back(glm::vec3(normals[n], normals[n + 1], normals[n + 2])); } - // TODO: add correct tangent generation if (tangents.size() == partVerticesCount * tangentStride) { mesh.tangents.reserve(partVerticesCount); for (int n = 0; n < tangents.size(); n += tangentStride) { @@ -1582,22 +1581,8 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } } } - } - -#if 0 - for(const glm::vec3& vertex : mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(globalTransforms[nodeIndex] * glm::vec4(vertex, 1.0f)); - mesh.meshExtents.addPoint(transformedVertex); - hfmModel.meshExtents.addPoint(transformedVertex); - } -#endif + } } - - // Add epsilon to mesh extents to compensate for planar meshes - mesh.meshExtents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); - mesh.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); - hfmModel.meshExtents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); - hfmModel.meshExtents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); } @@ -1611,7 +1596,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& int primCount = (int)mesh.primitives.size(); for (int primIndex = 0; primIndex < primCount; ++primIndex) { const auto& primitive = mesh.primitives[primIndex]; - hfmModel.shapes.push_back({}); + hfmModel.shapes.emplace_back(); auto& hfmShape = hfmModel.shapes.back(); hfmShape.transform = nodeIndex; hfmShape.mesh = node.mesh; From 09f584818b0b9dae93a03f4dd7020689f795b09d Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 4 Oct 2019 14:15:44 -0700 Subject: [PATCH 040/121] Add extent calculation utilities, CalculateExtentsTask, make tangent calculation more lenient --- libraries/hfm/src/hfm/HFMModelMath.cpp | 66 +++++++++++++++++++ libraries/hfm/src/hfm/HFMModelMath.h | 30 +++++++++ .../model-baker/src/model-baker/Baker.cpp | 17 ++++- .../src/model-baker/CalculateExtentsTask.cpp | 41 ++++++++++++ .../src/model-baker/CalculateExtentsTask.h | 29 ++++++++ .../model-baker/CalculateMeshTangentsTask.cpp | 2 +- 6 files changed, 181 insertions(+), 4 deletions(-) create mode 100644 libraries/hfm/src/hfm/HFMModelMath.cpp create mode 100644 libraries/hfm/src/hfm/HFMModelMath.h create mode 100644 libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp create mode 100644 libraries/model-baker/src/model-baker/CalculateExtentsTask.h diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp new file mode 100644 index 0000000000..8812163fe2 --- /dev/null +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -0,0 +1,66 @@ +// +// HFMModelMath.cpp +// model-baker/src/model-baker +// +// Created by Sabrina Shanman on 2019/10/04. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "HFMModelMath.h" + +namespace hfm { + +void forEachIndex(const hfm::MeshPart& meshPart, std::function func) { + for (int i = 0; i <= meshPart.quadIndices.size() - 4; i += 4) { + func((uint32_t)meshPart.quadIndices[i]); + func((uint32_t)meshPart.quadIndices[i+1]); + func((uint32_t)meshPart.quadIndices[i+2]); + func((uint32_t)meshPart.quadIndices[i+3]); + } + for (int i = 0; i <= meshPart.triangleIndices.size() - 3; i += 3) { + func((uint32_t)meshPart.triangleIndices[i]); + func((uint32_t)meshPart.triangleIndices[i+1]); + func((uint32_t)meshPart.triangleIndices[i+2]); + } +} + +void thickenFlatExtents(Extents& extents) { + // Add epsilon to extents to compensate for flat plane + extents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON); + extents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); +} + +void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints) { + auto& shapeExtents = shape.transformedExtents; + shapeExtents.reset(); + + const auto& mesh = meshes[shape.mesh]; + const auto& meshPart = mesh.parts[shape.meshPart]; + + glm::mat4 globalTransform = joints[shape.transform].globalTransform; + forEachIndex(meshPart, [&](int32_t idx){ + if (mesh.vertices.size() <= idx) { + return; + } + const glm::vec3& vertex = mesh.vertices[idx]; + const glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); + shapeExtents.addPoint(vertex); + }); + + thickenFlatExtents(shapeExtents); +} + +void calculateExtentsForModel(Extents& modelExtents, const std::vector& shapes) { + modelExtents.reset(); + + for (size_t i = 0; i < shapes.size(); ++i) { + const auto& shape = shapes[i]; + const auto& shapeExtents = shape.transformedExtents; + modelExtents.addExtents(shapeExtents); + } +} + +}; diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h new file mode 100644 index 0000000000..d1e3c09763 --- /dev/null +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -0,0 +1,30 @@ +// +// HFMModelMath.h +// model-baker/src/model-baker +// +// Created by Sabrina Shanman on 2019/10/04. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_hfm_ModelMath_h +#define hifi_hfm_ModelMath_h + +#include "HFM.h" + +namespace hfm { + +void forEachIndex(const hfm::MeshPart& meshPart, std::function func); + +void initializeExtents(Extents& extents); + +// This can't be moved to model-baker until +void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints); + +void calculateExtentsForModel(Extents& modelExtents, const std::vector& shapes); + +}; + +#endif // #define hifi_hfm_ModelMath_h diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index ccb5e1816f..1b7242f2d4 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -21,6 +21,7 @@ #include "CalculateBlendshapeNormalsTask.h" #include "CalculateBlendshapeTangentsTask.h" #include "PrepareJointsTask.h" +#include "CalculateExtentsTask.h" #include "BuildDracoMeshTask.h" #include "ParseFlowDataTask.h" @@ -29,7 +30,7 @@ namespace baker { class GetModelPartsTask { public: using Input = hfm::Model::Pointer; - using Output = VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, std::vector>; + using Output = VaryingSet9, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, std::vector, Extents>; using JobModel = Job::ModelIO; void run(const BakeContextPointer& context, const Input& input, Output& output) { @@ -46,6 +47,7 @@ namespace baker { output.edit5() = hfmModelIn->shapes; output.edit6() = hfmModelIn->dynamicTransforms; output.edit7() = hfmModelIn->deformers; + output.edit8() = hfmModelIn->meshExtents; } }; @@ -106,7 +108,7 @@ namespace baker { class BuildModelTask { public: - using Input = VaryingSet7, std::vector, QMap, QHash, FlowData, std::vector>; + using Input = VaryingSet9, std::vector, QMap, QHash, FlowData, std::vector, std::vector, Extents>; using Output = hfm::Model::Pointer; using JobModel = Job::ModelIO; @@ -118,6 +120,8 @@ namespace baker { hfmModelOut->jointIndices = input.get4(); hfmModelOut->flowData = input.get5(); hfmModelOut->shapeVertices = input.get6(); + hfmModelOut->shapes = input.get7(); + hfmModelOut->meshExtents = input.get8(); // These depend on the ShapeVertices // TODO: Create a task for this rather than calculating it here hfmModelOut->computeKdops(); @@ -145,6 +149,7 @@ namespace baker { const auto shapesIn = modelPartsIn.getN(5); const auto dynamicTransformsIn = modelPartsIn.getN(6); const auto deformersIn = modelPartsIn.getN(7); + const auto modelExtentsIn = modelPartsIn.getN(8); // Calculate normals and tangents for meshes and blendshapes if they do not exist // Note: Normals are never calculated here for OBJ models. OBJ files optionally define normals on a per-face basis, so for consistency normals are calculated beforehand in OBJSerializer. @@ -175,6 +180,12 @@ namespace baker { const auto jointRotationOffsets = jointInfoOut.getN(1); const auto jointIndices = jointInfoOut.getN(2); + // Use transform information to compute extents + const auto calculateExtentsInputs = CalculateExtentsTask::Input(modelExtentsIn, meshesIn, shapesIn, jointsOut).asVarying(); + const auto calculateExtentsOutputs = model.addJob("CalculateExtents", calculateExtentsInputs); + const auto modelExtentsOut = calculateExtentsOutputs.getN(0); + const auto shapesOut = calculateExtentsOutputs.getN(1); + // Parse material mapping const auto parseMaterialMappingInputs = ParseMaterialMappingTask::Input(mapping, materialMappingBaseURL).asVarying(); const auto materialMapping = model.addJob("ParseMaterialMapping", parseMaterialMappingInputs); @@ -198,7 +209,7 @@ namespace baker { const auto blendshapesPerMeshOut = model.addJob("BuildBlendshapes", buildBlendshapesInputs); const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, graphicsMeshes, normalsPerMesh, tangentsPerMesh, blendshapesPerMeshOut).asVarying(); const auto meshesOut = model.addJob("BuildMeshes", buildMeshesInputs); - const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData, shapeVerticesPerJoint).asVarying(); + const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData, shapeVerticesPerJoint, shapesOut, modelExtentsOut).asVarying(); const auto hfmModelOut = model.addJob("BuildModel", buildModelInputs); output = Output(hfmModelOut, materialMapping, dracoMeshes, dracoErrors, materialList); diff --git a/libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp b/libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp new file mode 100644 index 0000000000..e237cdb402 --- /dev/null +++ b/libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp @@ -0,0 +1,41 @@ +// +// CalculateExtentsTask.cpp +// model-baker/src/model-baker +// +// Created by Sabrina Shanman on 2019/10/04. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#include "CalculateExtentsTask.h" + +#include "hfm/HFMModelMath.h" + +void CalculateExtentsTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) { + const auto& modelExtentsIn = input.get0(); + const auto& meshes = input.get1(); + const auto& shapesIn = input.get2(); + const auto& joints = input.get3(); + auto& modelExtentsOut = output.edit0(); + auto& shapesOut = output.edit1(); + + shapesOut.reserve(shapesIn.size()); + for (size_t i = 0; i < shapesIn.size(); ++i) { + shapesOut.push_back(shapesIn[i]); + auto& shapeOut = shapesOut.back(); + + auto& shapeExtents = shapeOut.transformedExtents; + if (shapeExtents.isValid()) { + continue; + } + + hfm::calculateExtentsForShape(shapeOut, meshes, joints); + } + + modelExtentsOut = modelExtentsIn; + if (!modelExtentsOut.isValid()) { + hfm::calculateExtentsForModel(modelExtentsOut, shapesOut); + } +} diff --git a/libraries/model-baker/src/model-baker/CalculateExtentsTask.h b/libraries/model-baker/src/model-baker/CalculateExtentsTask.h new file mode 100644 index 0000000000..006688ec5a --- /dev/null +++ b/libraries/model-baker/src/model-baker/CalculateExtentsTask.h @@ -0,0 +1,29 @@ +// +// CalculateExtentsTask.h +// model-baker/src/model-baker +// +// Created by Sabrina Shanman on 2019/10/04. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_CalculateExtentsTask_h +#define hifi_CalculateExtentsTask_h + +#include "Engine.h" +#include "hfm/HFM.h" + +// Calculates any undefined extents in the shapes and the model. Precalculated extents will be left alone. +// Bind extents will currently not be calculated +class CalculateExtentsTask { +public: + using Input = baker::VaryingSet4, std::vector, std::vector>; + using Output = baker::VaryingSet2>; + using JobModel = baker::Job::ModelIO; + + void run(const baker::BakeContextPointer& context, const Input& input, Output& output); +}; + +#endif // hifi_CalculateExtentsTask_h diff --git a/libraries/model-baker/src/model-baker/CalculateMeshTangentsTask.cpp b/libraries/model-baker/src/model-baker/CalculateMeshTangentsTask.cpp index 297d8cbde7..6147ce72e7 100644 --- a/libraries/model-baker/src/model-baker/CalculateMeshTangentsTask.cpp +++ b/libraries/model-baker/src/model-baker/CalculateMeshTangentsTask.cpp @@ -30,7 +30,7 @@ void CalculateMeshTangentsTask::run(const baker::BakeContextPointer& context, co // Otherwise confirm if we have the normals and texcoords needed if (!tangentsIn.empty()) { tangentsOut = tangentsIn.toStdVector(); - } else if (!normals.empty() && mesh.vertices.size() == mesh.texCoords.size()) { + } else if (!normals.empty() && mesh.vertices.size() <= mesh.texCoords.size()) { tangentsOut.resize(normals.size()); baker::calculateTangents(mesh, [&mesh, &normals, &tangentsOut](int firstIndex, int secondIndex, glm::vec3* outVertices, glm::vec2* outTexCoords, glm::vec3& outNormal) { From 936ac6f120fe5fe6a56e719ac122f48f6ddd2d8d Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Mon, 7 Oct 2019 18:03:33 -0700 Subject: [PATCH 041/121] REmoving more repetitions of the meshParts --- .../src/model-networking/ModelCache.cpp | 22 +++---------------- .../src/model-networking/ModelCache.h | 12 ---------- libraries/render-utils/src/Model.cpp | 1 - 3 files changed, 3 insertions(+), 32 deletions(-) diff --git a/libraries/model-networking/src/model-networking/ModelCache.cpp b/libraries/model-networking/src/model-networking/ModelCache.cpp index e1df0f95c7..2376beba30 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.cpp +++ b/libraries/model-networking/src/model-networking/ModelCache.cpp @@ -295,7 +295,7 @@ void ModelResource::onGeometryMappingLoaded(bool success) { if (success && _modelResource) { _hfmModel = _modelResource->_hfmModel; _materialMapping = _modelResource->_materialMapping; - _meshParts = _modelResource->_meshParts; + // _meshParts = _modelResource->_meshParts; _meshes = _modelResource->_meshes; _materials = _modelResource->_materials; @@ -329,21 +329,13 @@ void ModelResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const Mate } std::shared_ptr meshes = std::make_shared(); - std::shared_ptr parts = std::make_shared(); int meshID = 0; for (const HFMMesh& mesh : _hfmModel->meshes) { // Copy mesh pointers meshes->emplace_back(mesh._mesh); - int partID = 0; - for (const HFMMeshPart& part : mesh.parts) { - // Construct local parts - parts->push_back(std::make_shared(meshID, partID, (int)materialIDAtlas[part.materialID])); - partID++; - } meshID++; } _meshes = meshes; - _meshParts = parts; finishedLoading(true); } @@ -428,7 +420,6 @@ NetworkModel::NetworkModel(const NetworkModel& networkModel) { _hfmModel = networkModel._hfmModel; _materialMapping = networkModel._materialMapping; _meshes = networkModel._meshes; - _meshParts = networkModel._meshParts; _materials.reserve(networkModel._materials.size()); for (const auto& material : networkModel._materials) { @@ -500,15 +491,8 @@ bool NetworkModel::areTexturesLoaded() const { return true; } -const std::shared_ptr NetworkModel::getShapeMaterial(int partID) const { - /* if ((partID >= 0) && (partID < (int)_meshParts->size())) { - int materialID = _meshParts->at(partID)->materialID; - if ((materialID >= 0) && (materialID < (int)_materials.size())) { - return _materials[materialID]; - } - }*/ - - auto materialID = getHFMModel().shapes[partID].material; +const std::shared_ptr NetworkModel::getShapeMaterial(int shapeID) const { + auto materialID = getHFMModel().shapes[shapeID].material; if ((materialID >= 0) && (materialID < (int)_materials.size())) { return _materials[materialID]; } diff --git a/libraries/model-networking/src/model-networking/ModelCache.h b/libraries/model-networking/src/model-networking/ModelCache.h index 9fdae339f7..87dbbe975d 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.h +++ b/libraries/model-networking/src/model-networking/ModelCache.h @@ -22,8 +22,6 @@ #include #include "ModelLoader.h" -class MeshPart; - using GeometryMappingPair = std::pair; Q_DECLARE_METATYPE(GeometryMappingPair) @@ -38,7 +36,6 @@ public: // Immutable over lifetime using GeometryMeshes = std::vector>; - using GeometryMeshParts = std::vector>; // Mutable, but must retain structure of vector using NetworkMaterials = std::vector>; @@ -63,7 +60,6 @@ protected: HFMModel::ConstPointer _hfmModel; MaterialMapping _materialMapping; std::shared_ptr _meshes; - std::shared_ptr _meshParts; // Copied to each geometry, mutable throughout lifetime via setTextures NetworkMaterials _materials; @@ -180,12 +176,4 @@ private: ModelLoader _modelLoader; }; -class MeshPart { -public: - MeshPart(int mesh, int part, int material) : meshID { mesh }, partID { part }, materialID { material } {} - int meshID { -1 }; - int partID { -1 }; - int materialID { -1 }; -}; - #endif // hifi_ModelCache_h diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index f9e980b2ac..f39038aa74 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -1687,7 +1687,6 @@ public: std::shared_ptr meshes = std::make_shared(); meshes->push_back(mesh); _meshes = meshes; - _meshParts = std::shared_ptr(); } }; From 1f3993c3080c93931f4e6e2e3d7639e1c2b7fe32 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Tue, 8 Oct 2019 18:13:24 -0700 Subject: [PATCH 042/121] getting the transform right for the rigid bodies --- interface/src/avatar/MyAvatar.cpp | 4 +-- libraries/fbx/src/FBXSerializer.cpp | 14 ++++----- .../render-utils/src/CauterizedModel.cpp | 12 ++++++-- .../render-utils/src/MeshPartPayload.cpp | 11 +++++-- libraries/render-utils/src/Model.cpp | 30 +++++++++++++++++-- libraries/render-utils/src/Model.h | 8 +++++ 6 files changed, 62 insertions(+), 17 deletions(-) diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index de6ae526b4..a7fd397915 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -2496,7 +2496,7 @@ void MyAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) { if (_fullAvatarModelName.isEmpty()) { // Store the FST file name into preferences - const auto& mapping = _skeletonModel->getGeometry()->getMapping(); + const auto& mapping = _skeletonModel->getNetworkModel()->getMapping(); if (mapping.value("name").isValid()) { _fullAvatarModelName = mapping.value("name").toString(); } @@ -2504,7 +2504,7 @@ void MyAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) { initHeadBones(); _skeletonModel->setCauterizeBoneSet(_headBoneSet); - _fstAnimGraphOverrideUrl = _skeletonModel->getGeometry()->getAnimGraphOverrideUrl(); + _fstAnimGraphOverrideUrl = _skeletonModel->getNetworkModel()->getAnimGraphOverrideUrl(); initAnimGraph(); initFlowFromFST(); diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 0299648294..2fa90af9db 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -530,8 +530,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const if (object.properties.at(2) == "Mesh") { meshes.insert(getID(object.properties), extractMesh(object, meshIndex, deduplicateIndices)); } else { // object.properties.at(2) == "Shape" - ExtractedBlendshape extracted = { getID(object.properties), extractBlendshape(object) }; - blendshapes.append(extracted); + ExtractedBlendshape blendshape = { getID(object.properties), extractBlendshape(object) }; + blendshapes.append(blendshape); } } else if (object.name == "Model") { QString name = getModelName(object.properties); @@ -705,8 +705,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // add the blendshapes included in the model, if any if (mesh) { - foreach (const ExtractedBlendshape& extracted, blendshapes) { - addBlendshapes(extracted, blendshapeIndices.values(extracted.id.toLatin1()), *mesh); + foreach (const ExtractedBlendshape& blendshape, blendshapes) { + addBlendshapes(blendshape, blendshapeIndices.values(blendshape.id.toLatin1()), *mesh); } } @@ -1229,11 +1229,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } // assign the blendshapes to their corresponding meshes - foreach (const ExtractedBlendshape& extracted, blendshapes) { - QString blendshapeChannelID = _connectionParentMap.value(extracted.id); + foreach (const ExtractedBlendshape& blendshape, blendshapes) { + QString blendshapeChannelID = _connectionParentMap.value(blendshape.id); QString blendshapeID = _connectionParentMap.value(blendshapeChannelID); QString meshID = _connectionParentMap.value(blendshapeID); - addBlendshapes(extracted, blendshapeChannelIndices.values(blendshapeChannelID), meshes[meshID]); + addBlendshapes(blendshape, blendshapeChannelIndices.values(blendshapeChannelID), meshes[meshID]); } // get offset transform from mapping diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 6e8f37d354..87eacc20ec 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -104,9 +104,11 @@ void CauterizedModel::updateClusterMatrices() { if (!_needsUpdateClusterMatrices || !isLoaded()) { return; } + + updateShapeStatesFromRig(); + _needsUpdateClusterMatrices = false; const HFMModel& hfmModel = getHFMModel(); - for (int i = 0; i < (int)_meshStates.size(); i++) { Model::MeshState& state = _meshStates[i]; const HFMMesh& mesh = hfmModel.meshes.at(i); @@ -221,13 +223,14 @@ void CauterizedModel::updateRenderItems() { auto itemID = self->_modelMeshRenderItemIDs[i]; auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; + const auto& shapeState = self->getShapeState(i); const auto& meshState = self->getMeshState(meshIndex); const auto& cauterizedMeshState = self->getCauterizeMeshState(meshIndex); bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); - transaction.updateItem(itemID, [modelTransform, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey, + transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, enableCauterization](ModelMeshPartPayload& mmppData) { CauterizedMeshPartPayload& data = static_cast(mmppData); if (useDualQuaternionSkinning) { @@ -241,7 +244,7 @@ void CauterizedModel::updateRenderItems() { } Transform renderTransform = modelTransform; - if (useDualQuaternionSkinning) { + /*if (useDualQuaternionSkinning) { if (meshState.clusterDualQuaternions.size() == 1 || meshState.clusterDualQuaternions.size() == 2) { const auto& dq = meshState.clusterDualQuaternions[0]; Transform transform(dq.getRotation(), @@ -253,6 +256,9 @@ void CauterizedModel::updateRenderItems() { if (meshState.clusterMatrices.size() == 1 || meshState.clusterMatrices.size() == 2) { renderTransform = modelTransform.worldTransform(Transform(meshState.clusterMatrices[0])); } + }*/ + if (meshState.clusterMatrices.size() <= 1) { + renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); } data.updateTransformForSkinnedMesh(renderTransform, modelTransform); diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index fcf0ffaa48..8f992ba329 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -221,8 +221,10 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in } updateTransform(transform, offsetTransform); + Transform renderTransform = transform; - if (useDualQuaternionSkinning) { + +/* if (useDualQuaternionSkinning) { if (state.clusterDualQuaternions.size() == 1) { const auto& dq = state.clusterDualQuaternions[0]; Transform transform(dq.getRotation(), @@ -235,6 +237,10 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in renderTransform = transform.worldTransform(Transform(state.clusterMatrices[0])); } } +*/ + + const Model::ShapeState& shapeState = model->getShapeState(shapeIndex); + renderTransform = transform.worldTransform(shapeState._rootFromJointTransform); updateTransformForSkinnedMesh(renderTransform, transform); initCache(model); @@ -320,7 +326,8 @@ void ModelMeshPartPayload::updateClusterBuffer(const std::vector_modelMeshRenderItemIDs[i]; auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; + const auto& shapeState = self->getShapeState(i); const auto& meshState = self->getMeshState(meshIndex); bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); - transaction.updateItem(itemID, [modelTransform, meshState, useDualQuaternionSkinning, + transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, cauterized](ModelMeshPartPayload& data) { if (useDualQuaternionSkinning) { data.updateClusterBuffer(meshState.clusterDualQuaternions); @@ -249,7 +250,7 @@ void Model::updateRenderItems() { Transform renderTransform = modelTransform; - if (useDualQuaternionSkinning) { + /*if (useDualQuaternionSkinning) { if (meshState.clusterDualQuaternions.size() == 1 || meshState.clusterDualQuaternions.size() == 2) { const auto& dq = meshState.clusterDualQuaternions[0]; Transform transform(dq.getRotation(), @@ -261,6 +262,9 @@ void Model::updateRenderItems() { if (meshState.clusterMatrices.size() == 1 || meshState.clusterMatrices.size() == 2) { renderTransform = modelTransform.worldTransform(Transform(meshState.clusterMatrices[0])); } + }*/ + if (meshState.clusterMatrices.size() <= 1) { + renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); } data.updateTransformForSkinnedMesh(renderTransform, modelTransform); @@ -293,6 +297,21 @@ void Model::reset() { } } +void Model::updateShapeStatesFromRig() { + const HFMModel& hfmModel = getHFMModel(); + // TODO: should all Models have a valid _rig? + { // Shapes state: + const auto& shapes = hfmModel.shapes; + _shapeStates.resize(shapes.size()); + for (int s = 0; s < shapes.size(); ++s) { + uint32_t jointId = shapes[s].transform; + if (jointId < _rig.getJointStateCount()) { + _shapeStates[s]._rootFromJointTransform = _rig.getJointTransform(shapes[s].transform); + } + } + } +} + bool Model::updateGeometry() { bool needFullUpdate = false; @@ -307,6 +326,8 @@ bool Model::updateGeometry() { initJointStates(); assert(_meshStates.empty()); + updateShapeStatesFromRig(); + const HFMModel& hfmModel = getHFMModel(); int i = 0; foreach (const HFMMesh& mesh, hfmModel.meshes) { @@ -1385,6 +1406,8 @@ void Model::updateClusterMatrices() { return; } + updateShapeStatesFromRig(); + _needsUpdateClusterMatrices = false; const HFMModel& hfmModel = getHFMModel(); for (int i = 0; i < (int) _meshStates.size(); i++) { @@ -1418,6 +1441,7 @@ void Model::updateClusterMatrices() { void Model::deleteGeometry() { _deleteGeometryCounter++; + _shapeStates.clear(); _meshStates.clear(); _rig.destroyAnimGraph(); _blendedBlendshapeCoefficients.clear(); @@ -1496,7 +1520,7 @@ void Model::createRenderItemSet() { } bool Model::isRenderable() const { - return !_meshStates.empty() || (isLoaded() && _renderGeometry->getMeshes().empty()); + return (!_shapeStates.empty() && !_meshStates.empty()) || (isLoaded() && _renderGeometry->getMeshes().empty()); } std::set Model::getMeshIDsFromMaterialID(QString parentMaterialName) { diff --git a/libraries/render-utils/src/Model.h b/libraries/render-utils/src/Model.h index 79ddaeb68d..85661d4b6b 100644 --- a/libraries/render-utils/src/Model.h +++ b/libraries/render-utils/src/Model.h @@ -343,6 +343,12 @@ public: const MeshState& getMeshState(int index) { return _meshStates.at(index); } + class ShapeState { + public: + glm::mat4 _rootFromJointTransform; + }; + const ShapeState& getShapeState(int index) { return _shapeStates.at(index); } + uint32_t getGeometryCounter() const { return _deleteGeometryCounter; } const QMap& getRenderItems() const { return _modelMeshRenderItemsMap; } BlendShapeOperator getModelBlendshapeOperator() const { return _modelBlendshapeOperator; } @@ -420,6 +426,8 @@ protected: glm::vec3 _registrationPoint = glm::vec3(0.5f); /// the point in model space our center is snapped to std::vector _meshStates; + std::vector _shapeStates; + void updateShapeStatesFromRig(); virtual void initJointStates(); From 9905fc076c74e7a79297b39ea7c8d627604cfb51 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Wed, 9 Oct 2019 16:44:06 -0700 Subject: [PATCH 043/121] Materials of baked models fbx are loading correctly, still using the name/id of the mateiral in the part struct --- libraries/fbx/src/FBXSerializer.cpp | 35 +++++++++++++++++++----- libraries/fbx/src/FBXSerializer_Mesh.cpp | 8 +++++- 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 2fa90af9db..c35a23ef3a 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1480,6 +1480,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // meshShapes will be added to hfmModel at the very end std::vector meshShapes; meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size()); + if (instanceModelIDs.size() > 1) { + qCDebug(modelformat) << "Mesh " << meshID << " made of " << mesh.parts.size() << " parts is instanced " << instanceModelIDs.size() << " times!!!"; + } + if (mesh.parts.size() < 1) { + qCDebug(modelformat) << "Mesh " << meshID << " made of " << mesh.parts.size() << " parts !!!!! "; + } for (const QString& modelID : instanceModelIDs) { // The transform node has the same indexing order as the joints const uint32_t transformIndex = (uint32_t)modelIDs.indexOf(modelID); @@ -1500,6 +1506,14 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const shape.mesh = meshIndex; shape.meshPart = i; shape.transform = transformIndex; + + auto matName = mesh.parts[i].materialID; + auto materialIt = materialNameToID.find(matName.toStdString()); + if (materialIt != materialNameToID.end()) { + shape.material = materialIt->second; + } else { + qCDebug(modelformat) << "Unknown material ? " << matName; + } shape.transformedExtents.reset(); // compute the shape extents from the transformed vertices @@ -1546,14 +1560,21 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } // For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart if (!extracted.materialIDPerMeshPart.empty()) { - for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) { - hfm::Shape& shape = partShapes[i]; - const std::string& materialID = extracted.materialIDPerMeshPart[i]; - auto materialIt = materialNameToID.find(materialID); - if (materialIt != materialNameToID.end()) { - shape.material = materialIt->second; + /* if (partShapes.size() == extracted.materialIDPerMeshPart.size()) { + for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) { + hfm::Shape& shape = partShapes[i]; + const std::string& materialID = extracted.materialIDPerMeshPart[i]; + auto materialIt = materialNameToID.find(materialID); + if (materialIt != materialNameToID.end()) { + shape.material = materialIt->second; + } } - } + } else { + for (int p = 0; p < mesh.parts.size(); p++) { + qCDebug(modelformat) << "mesh.parts[" << p <<"] is " << mesh.parts[p].materialID; + } + qCDebug(modelformat) << "partShapes is not the same size as materialIDPerMeshPart ?"; + }*/ } // find the clusters with which the mesh is associated diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index 7c6be5740a..37f2c9ec1b 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -492,9 +492,15 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); + HFMMeshPart& part = data.extracted.mesh.parts.back(); // Figure out if this is the older way of defining the per-part material for baked FBX - if (dracoMeshNodeVersion < 2) { + if (dracoMeshNodeVersion >= 2) { + // Define the materialID now + if (materialID < dracoMaterialList.size()) { + part.materialID = QString(dracoMaterialList[materialID].c_str()); + } + } else { // Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap data.extracted.partMaterialTextures.append(materialTexture); } From 0b924eea78c92e2a32daf6adf8e8fac105478089 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 8 Oct 2019 16:28:19 -0700 Subject: [PATCH 044/121] Fix GLTF skinning crash, misc fixes and cleanup --- libraries/fbx/src/GLTFSerializer.cpp | 116 ++------------------------- 1 file changed, 8 insertions(+), 108 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 70e765c23a..20ec4ce05e 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1003,10 +1003,10 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& // Build joints - HFMJoint joint; hfmModel.jointIndices["x"] = numNodes; for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { + HFMJoint joint; auto& node = _file.nodes[nodeIndex]; auto parentItr = parentIndices.find(nodeIndex); if (parentsEnd == parentItr) { @@ -1021,12 +1021,12 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& glm::vec3 scale = extractScale(joint.transform); joint.postTransform = glm::scale(glm::mat4(), scale); - joint.globalTransform = joint.transform; // Nodes are sorted, so we can apply the full transform just by getting the global transform of the already defined parent if (joint.parentIndex != -1 && joint.parentIndex != nodeIndex) { - const auto& parentJoint = hfmModel.joints[(size_t)nodeIndex]; - joint.globalTransform = parentJoint.globalTransform * joint.globalTransform; + const auto& parentJoint = hfmModel.joints[(size_t)joint.parentIndex]; + joint.transform = parentJoint.transform * joint.transform; } + joint.globalTransform = joint.transform; joint.name = node.name; joint.isSkeletonJoint = false; @@ -1102,7 +1102,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& int meshCount = _file.meshes.size(); hfmModel.meshes.resize(meshCount); hfmModel.meshExtents.reset(); - hfmModel.meshes.resize(meshCount); for (int meshIndex = 0; meshIndex < meshCount; ++meshIndex) { const auto& gltfMesh = _file.meshes[meshIndex]; auto& mesh = hfmModel.meshes[meshIndex]; @@ -1211,12 +1210,12 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& break; case GLTFVertexAttribute::JOINTS_0: - success = addArrayFromAttribute(vertexAttribute, accessor, colors); + success = addArrayFromAttribute(vertexAttribute, accessor, joints); jointStride = GLTFAccessorType::count((GLTFAccessorType::Value)accessor.type); break; case GLTFVertexAttribute::WEIGHTS_0: - success = addArrayFromAttribute(vertexAttribute, accessor, colors); + success = addArrayFromAttribute(vertexAttribute, accessor, weights); weightStride = GLTFAccessorType::count((GLTFAccessorType::Value)accessor.type); break; @@ -1241,106 +1240,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& int partVerticesCount = vertices.size() / 3; - // generate the normals if they don't exist - // FIXME move to GLTF post-load processing - if (normals.size() == 0) { - QVector newIndices; - QVector newVertices; - QVector newNormals; - QVector newTexcoords; - QVector newTexcoords2; - QVector newColors; - QVector newJoints; - QVector newWeights; - - for (int n = 0; n < indices.size(); n = n + 3) { - int v1_index = (indices[n + 0] * 3); - int v2_index = (indices[n + 1] * 3); - int v3_index = (indices[n + 2] * 3); - - glm::vec3 v1 = glm::vec3(vertices[v1_index], vertices[v1_index + 1], vertices[v1_index + 2]); - glm::vec3 v2 = glm::vec3(vertices[v2_index], vertices[v2_index + 1], vertices[v2_index + 2]); - glm::vec3 v3 = glm::vec3(vertices[v3_index], vertices[v3_index + 1], vertices[v3_index + 2]); - - newVertices.append(v1.x); - newVertices.append(v1.y); - newVertices.append(v1.z); - newVertices.append(v2.x); - newVertices.append(v2.y); - newVertices.append(v2.z); - newVertices.append(v3.x); - newVertices.append(v3.y); - newVertices.append(v3.z); - - glm::vec3 norm = glm::normalize(glm::cross(v2 - v1, v3 - v1)); - - newNormals.append(norm.x); - newNormals.append(norm.y); - newNormals.append(norm.z); - newNormals.append(norm.x); - newNormals.append(norm.y); - newNormals.append(norm.z); - newNormals.append(norm.x); - newNormals.append(norm.y); - newNormals.append(norm.z); - - if (texcoords.size() == partVerticesCount * TEX_COORD_STRIDE) { - GLTF_APPEND_ARRAY_2(newTexcoords, texcoords) - } - - if (texcoords2.size() == partVerticesCount * TEX_COORD_STRIDE) { - GLTF_APPEND_ARRAY_2(newTexcoords2, texcoords2) - } - - if (colors.size() == partVerticesCount * colorStride) { - if (colorStride == 4) { - GLTF_APPEND_ARRAY_4(newColors, colors) - } else { - GLTF_APPEND_ARRAY_3(newColors, colors) - } - } - - if (joints.size() == partVerticesCount * jointStride) { - if (jointStride == 4) { - GLTF_APPEND_ARRAY_4(newJoints, joints) - } else if (jointStride == 3) { - GLTF_APPEND_ARRAY_3(newJoints, joints) - } else if (jointStride == 2) { - GLTF_APPEND_ARRAY_2(newJoints, joints) - } else { - GLTF_APPEND_ARRAY_1(newJoints, joints) - } - } - - if (weights.size() == partVerticesCount * weightStride) { - if (weightStride == 4) { - GLTF_APPEND_ARRAY_4(newWeights, weights) - } else if (weightStride == 3) { - GLTF_APPEND_ARRAY_3(newWeights, weights) - } else if (weightStride == 2) { - GLTF_APPEND_ARRAY_2(newWeights, weights) - } else { - GLTF_APPEND_ARRAY_1(newWeights, weights) - } - } - newIndices.append(n); - newIndices.append(n + 1); - newIndices.append(n + 2); - } - - vertices = newVertices; - normals = newNormals; - tangents = QVector(); - texcoords = newTexcoords; - texcoords2 = newTexcoords2; - colors = newColors; - joints = newJoints; - weights = newWeights; - indices = newIndices; - - partVerticesCount = vertices.size() / 3; - } - QVector validatedIndices; for (int n = 0; n < indices.count(); ++n) { if (indices[n] < partVerticesCount) { @@ -1476,13 +1375,13 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& int numVertices = mesh.vertices.size() - prevMeshVerticesCount; // Append new cluster indices and weights for this mesh part + int prevMeshClusterWeightCount = mesh.clusterWeights.count(); for (int i = 0; i < numVertices * WEIGHTS_PER_VERTEX; ++i) { mesh.clusterIndices.push_back(mesh.clusters.size() - 1); mesh.clusterWeights.push_back(0); } // normalize and compress to 16-bits - int prevMeshClusterWeightCount = mesh.clusterWeights.count(); for (int i = 0; i < numVertices; ++i) { int j = i * WEIGHTS_PER_VERTEX; @@ -1984,6 +1883,7 @@ bool GLTFSerializer::addArrayFromAttribute(GLTFVertexAttribute::Value vertexAttr qWarning(modelformat) << "There was a problem reading glTF WEIGHTS_0 data for model " << _url; return false; } + break; default: qWarning(modelformat) << "Unexpected attribute type" << _url; From 726a91cdb98f0b0c6df5091bc1a49f8fe61b7ab1 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 9 Oct 2019 13:07:43 -0700 Subject: [PATCH 045/121] Fix un-transformed vertex being added to extents instead of transformed one --- libraries/hfm/src/hfm/HFMModelMath.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 8812163fe2..e6ba042e9c 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -47,7 +47,7 @@ void calculateExtentsForShape(hfm::Shape& shape, const std::vector& m } const glm::vec3& vertex = mesh.vertices[idx]; const glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); - shapeExtents.addPoint(vertex); + shapeExtents.addPoint(transformedVertex); }); thickenFlatExtents(shapeExtents); From d3ed0bc71bd76ebc9cefb31de6fe07285c4d20fe Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 9 Oct 2019 14:30:16 -0700 Subject: [PATCH 046/121] Remove unused joint/transform calculation in GLTFSerializer --- libraries/fbx/src/GLTFSerializer.cpp | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 20ec4ce05e..415b0f75a9 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -987,24 +987,11 @@ void GLTFFile::normalizeNodeTransforms() { bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mapping, const hifi::URL& url) { int numNodes = _file.nodes.size(); - hfmModel.transforms.resize(numNodes); - auto parentIndices = gltf::findParentIndices(_file.nodes); - const auto parentsEnd = parentIndices.end(); - for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { - auto& gltfNode = _file.nodes[nodeIndex]; - auto& hmfTransform = hfmModel.transforms[nodeIndex]; - auto parentItr = parentIndices.find(nodeIndex); - if (parentItr != parentsEnd ) { - hmfTransform.parent = parentItr->second; - } - hmfTransform.transform = getModelTransform(gltfNode); - } - - // Build joints hfmModel.jointIndices["x"] = numNodes; - + auto parentIndices = gltf::findParentIndices(_file.nodes); + const auto parentsEnd = parentIndices.end(); for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { HFMJoint joint; auto& node = _file.nodes[nodeIndex]; @@ -1032,7 +1019,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& joint.isSkeletonJoint = false; hfmModel.joints.push_back(joint); } - hfmModel.shapeVertices.resize(hfmModel.joints.size()); // get offset transform from mapping float unitScaleFactor = 1.0f; From 9fd3d44a0d8dd416b05fbed58ddab560bc9563b6 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 9 Oct 2019 15:53:28 -0700 Subject: [PATCH 047/121] Make GLTF joint.globalTransform more correct --- libraries/fbx/src/GLTFSerializer.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 415b0f75a9..af021e6509 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1008,12 +1008,13 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& glm::vec3 scale = extractScale(joint.transform); joint.postTransform = glm::scale(glm::mat4(), scale); + joint.globalTransform = joint.transform; // Nodes are sorted, so we can apply the full transform just by getting the global transform of the already defined parent if (joint.parentIndex != -1 && joint.parentIndex != nodeIndex) { const auto& parentJoint = hfmModel.joints[(size_t)joint.parentIndex]; joint.transform = parentJoint.transform * joint.transform; + joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; } - joint.globalTransform = joint.transform; joint.name = node.name; joint.isSkeletonJoint = false; From bf65711e04d1f4cc35261e55f295a33e8898c80b Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 10 Oct 2019 11:14:58 -0700 Subject: [PATCH 048/121] Fix build warning with cluster.jointIndex in FBXSerializer --- libraries/fbx/src/FBXSerializer.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 78bc1836c3..68268af5bd 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1428,7 +1428,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size()); for (const QString& modelID : instanceModelIDs) { // The transform node has the same indexing order as the joints - const uint32_t transformIndex = (uint32_t)modelIDs.indexOf(modelID); + int indexOfModelID = modelIDs.indexOf(modelID); + if (indexOfModelID == -1) { + qCDebug(modelformat) << "Model not in model list: " << modelID; + } + const uint32_t transformIndex = (indexOfModelID == -1) ? 0 : (uint32_t)indexOfModelID; // accumulate local transforms glm::mat4 globalTransform = hfmModel.joints[transformIndex].globalTransform; @@ -1513,12 +1517,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } - auto rootJointIndex = modelIDs.indexOf(modelID); - if (rootJointIndex == -1) { - qCDebug(modelformat) << "Model not in model list: " << modelID; - rootJointIndex = 0; - } - // whether we're skinned depends on how many clusters are attached if (clusterIDs.size() > 1) { hfm::DynamicTransform dynamicTransform; @@ -1531,10 +1529,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion // of skinning information in FBX QString jointID = _connectionChildMap.value(clusterID); - hfmCluster.jointIndex = modelIDs.indexOf(jointID); - if (hfmCluster.jointIndex == -1) { + int indexOfJointID = modelIDs.indexOf(jointID); + if (indexOfJointID == -1) { qCDebug(modelformat) << "Joint not in model list: " << jointID; hfmCluster.jointIndex = 0; + } else { + hfmCluster.jointIndex = (uint32_t)indexOfJointID; } hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * globalTransform; @@ -1563,7 +1563,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // the last cluster is the root cluster HFMCluster cluster; - cluster.jointIndex = rootJointIndex; + cluster.jointIndex = transformIndex; clusters.push_back(cluster); // Skinned mesh instances have a dynamic transform @@ -1599,7 +1599,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } else { // this is a single-joint mesh - HFMJoint& joint = hfmModel.joints[rootJointIndex]; + HFMJoint& joint = hfmModel.joints[transformIndex]; // Apply geometric offset, if present, by transforming the vertices directly if (joint.hasGeometricOffset) { From a995f2c09fa7a44741c9a8bc4fac7fa7f8bdc7fb Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Thu, 10 Oct 2019 17:09:54 -0700 Subject: [PATCH 049/121] in the middle of something --- libraries/fbx/src/FBXSerializer.cpp | 3 - libraries/hfm/src/hfm/HFM.h | 8 --- .../src/model-networking/ModelCache.cpp | 1 - .../src/CauterizedMeshPartPayload.cpp | 4 +- .../src/CauterizedMeshPartPayload.h | 2 +- .../render-utils/src/CauterizedModel.cpp | 56 ++++++++----------- .../render-utils/src/MeshPartPayload.cpp | 42 +++----------- libraries/render-utils/src/MeshPartPayload.h | 8 +-- libraries/render-utils/src/Model.cpp | 34 +++++------ libraries/render-utils/src/Model.h | 2 +- 10 files changed, 53 insertions(+), 107 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index c35a23ef3a..bec42ca01c 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1463,9 +1463,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // see if any materials have texture children bool materialsHaveTextures = checkMaterialsHaveTextures(_hfmMaterials, _textureFilenames, _connectionChildMap); - // Note that the transforms in the TransformNodes are initially in world-space, and need to be converted to parent-space - std::vector transformNodes; - for (QMap::iterator it = meshes.begin(); it != meshes.end(); it++) { const QString& meshID = it.key(); const ExtractedMesh& extracted = it.value(); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 96030672f2..f3330d9291 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -293,13 +293,6 @@ public: bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; } }; -// DEPRECATED in favor of using hfm::Joint -class TransformNode { -public: - uint32_t parent { 0 }; - Transform transform; -}; - // Formerly contained in hfm::Mesh class Deformer { public: @@ -343,7 +336,6 @@ public: std::vector materials; std::vector deformers; - std::vector transforms; std::vector dynamicTransforms; std::vector joints; diff --git a/libraries/model-networking/src/model-networking/ModelCache.cpp b/libraries/model-networking/src/model-networking/ModelCache.cpp index 2376beba30..8b7db5957b 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.cpp +++ b/libraries/model-networking/src/model-networking/ModelCache.cpp @@ -295,7 +295,6 @@ void ModelResource::onGeometryMappingLoaded(bool success) { if (success && _modelResource) { _hfmModel = _modelResource->_hfmModel; _materialMapping = _modelResource->_materialMapping; - // _meshParts = _modelResource->_meshParts; _meshes = _modelResource->_meshes; _materials = _modelResource->_materials; diff --git a/libraries/render-utils/src/CauterizedMeshPartPayload.cpp b/libraries/render-utils/src/CauterizedMeshPartPayload.cpp index a310c10136..6996ea3c29 100644 --- a/libraries/render-utils/src/CauterizedMeshPartPayload.cpp +++ b/libraries/render-utils/src/CauterizedMeshPartPayload.cpp @@ -18,8 +18,8 @@ using namespace render; -CauterizedMeshPartPayload::CauterizedMeshPartPayload(ModelPointer model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform, const Transform& offsetTransform) - : ModelMeshPartPayload(model, meshIndex, partIndex, shapeIndex, transform, offsetTransform) {} +CauterizedMeshPartPayload::CauterizedMeshPartPayload(ModelPointer model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform) + : ModelMeshPartPayload(model, meshIndex, partIndex, shapeIndex, transform) {} void CauterizedMeshPartPayload::updateClusterBuffer(const std::vector& clusterMatrices, const std::vector& cauterizedClusterMatrices) { diff --git a/libraries/render-utils/src/CauterizedMeshPartPayload.h b/libraries/render-utils/src/CauterizedMeshPartPayload.h index 9a6cea8b9f..87d8ce7ae9 100644 --- a/libraries/render-utils/src/CauterizedMeshPartPayload.h +++ b/libraries/render-utils/src/CauterizedMeshPartPayload.h @@ -13,7 +13,7 @@ class CauterizedMeshPartPayload : public ModelMeshPartPayload { public: - CauterizedMeshPartPayload(ModelPointer model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform, const Transform& offsetTransform); + CauterizedMeshPartPayload(ModelPointer model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform); // matrix palette skinning void updateClusterBuffer(const std::vector& clusterMatrices, diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 87eacc20ec..9f9acbf182 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -33,7 +33,20 @@ bool CauterizedModel::updateGeometry() { if (_isCauterized && needsFullUpdate) { assert(_cauterizeMeshStates.empty()); const HFMModel& hfmModel = getHFMModel(); - foreach (const HFMMesh& mesh, hfmModel.meshes) { + const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + for (int i = 0; i < hfmDynamicTransforms.size(); i++) { + const auto& dynT = hfmDynamicTransforms[i]; + MeshState state; + if (_useDualQuaternionSkinning) { + state.clusterDualQuaternions.resize(dynT.clusters.size()); + } else { + state.clusterMatrices.resize(dynT.clusters.size()); + } + _cauterizeMeshStates.append(state); + _meshStates.push_back(state); + } + + /* foreach (const HFMMesh& mesh, hfmModel.meshes) { Model::MeshState state; if (_useDualQuaternionSkinning) { state.clusterDualQuaternions.resize(mesh.clusters.size()); @@ -42,7 +55,7 @@ bool CauterizedModel::updateGeometry() { state.clusterMatrices.resize(mesh.clusters.size()); _cauterizeMeshStates.append(state); } - } + }*/ } return needsFullUpdate; } @@ -73,6 +86,8 @@ void CauterizedModel::createRenderItemSet() { offset.setScale(_scale); offset.postTranslate(_offset); + Transform::mult(transform, transform, offset); + // Run through all of the meshes, and place them into their segregated, but unsorted buckets int shapeID = 0; uint32_t numMeshes = (uint32_t)meshes.size(); @@ -85,7 +100,7 @@ void CauterizedModel::createRenderItemSet() { // Create the render payloads int numParts = (int)mesh->getNumParts(); for (int partIndex = 0; partIndex < numParts; partIndex++) { - auto ptr = std::make_shared(shared_from_this(), i, partIndex, shapeID, transform, offset); + auto ptr = std::make_shared(shared_from_this(), i, partIndex, shapeID, transform); _modelMeshRenderItems << std::static_pointer_cast(ptr); auto material = getNetworkModel()->getShapeMaterial(shapeID); _modelMeshMaterialNames.push_back(material ? material->getName() : ""); @@ -222,8 +237,11 @@ void CauterizedModel::updateRenderItems() { auto itemID = self->_modelMeshRenderItemIDs[i]; auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; + auto deformerIndex = self->_modelMeshRenderItemShapes[i].meshIndex; const auto& shapeState = self->getShapeState(i); + + const auto& meshState = self->getMeshState(meshIndex); const auto& cauterizedMeshState = self->getCauterizeMeshState(meshIndex); @@ -244,38 +262,10 @@ void CauterizedModel::updateRenderItems() { } Transform renderTransform = modelTransform; - /*if (useDualQuaternionSkinning) { - if (meshState.clusterDualQuaternions.size() == 1 || meshState.clusterDualQuaternions.size() == 2) { - const auto& dq = meshState.clusterDualQuaternions[0]; - Transform transform(dq.getRotation(), - dq.getScale(), - dq.getTranslation()); - renderTransform = modelTransform.worldTransform(transform); - } - } else { - if (meshState.clusterMatrices.size() == 1 || meshState.clusterMatrices.size() == 2) { - renderTransform = modelTransform.worldTransform(Transform(meshState.clusterMatrices[0])); - } - }*/ - if (meshState.clusterMatrices.size() <= 1) { + if (meshState.clusterMatrices.size() <= 2) { renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); } - data.updateTransformForSkinnedMesh(renderTransform, modelTransform); - - renderTransform = modelTransform; - if (useDualQuaternionSkinning) { - if (cauterizedMeshState.clusterDualQuaternions.size() == 1 || cauterizedMeshState.clusterDualQuaternions.size() == 2) { - const auto& dq = cauterizedMeshState.clusterDualQuaternions[0]; - Transform transform(dq.getRotation(), - dq.getScale(), - dq.getTranslation()); - renderTransform = modelTransform.worldTransform(Transform(transform)); - } - } else { - if (cauterizedMeshState.clusterMatrices.size() == 1 || cauterizedMeshState.clusterMatrices.size() == 2) { - renderTransform = modelTransform.worldTransform(Transform(cauterizedMeshState.clusterMatrices[0])); - } - } + data.updateTransform(renderTransform); data.updateTransformForCauterizedMesh(renderTransform); data.setEnableCauterization(enableCauterization); diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index 8f992ba329..7b37c847af 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -64,11 +64,10 @@ void MeshPartPayload::updateMeshPart(const std::shared_ptr } } -void MeshPartPayload::updateTransform(const Transform& transform, const Transform& offsetTransform) { - _transform = transform; - Transform::mult(_drawTransform, _transform, offsetTransform); +void MeshPartPayload::updateTransform(const Transform& transform) { + _worldFromLocalTransform = transform; _worldBound = _localBound; - _worldBound.transform(_drawTransform); + _worldBound.transform(_worldFromLocalTransform); } void MeshPartPayload::addMaterial(graphics::MaterialLayer material) { @@ -134,7 +133,7 @@ void MeshPartPayload::bindMesh(gpu::Batch& batch) { } void MeshPartPayload::bindTransform(gpu::Batch& batch, RenderArgs::RenderMode renderMode) const { - batch.setModelTransform(_drawTransform); + batch.setModelTransform(_worldFromLocalTransform); } @@ -196,7 +195,7 @@ template <> void payloadRender(const ModelMeshPartPayload::Pointer& payload, Ren } -ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform, const Transform& offsetTransform) : +ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform) : _meshIndex(meshIndex), _shapeID(shapeIndex) { @@ -220,28 +219,10 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in computeAdjustedLocalBound(state.clusterMatrices); } - updateTransform(transform, offsetTransform); - - Transform renderTransform = transform; - -/* if (useDualQuaternionSkinning) { - if (state.clusterDualQuaternions.size() == 1) { - const auto& dq = state.clusterDualQuaternions[0]; - Transform transform(dq.getRotation(), - dq.getScale(), - dq.getTranslation()); - renderTransform = transform.worldTransform(Transform(transform)); - } - } else { - if (state.clusterMatrices.size() == 1) { - renderTransform = transform.worldTransform(Transform(state.clusterMatrices[0])); - } - } -*/ - + Transform renderTransform = transform; const Model::ShapeState& shapeState = model->getShapeState(shapeIndex); renderTransform = transform.worldTransform(shapeState._rootFromJointTransform); - updateTransformForSkinnedMesh(renderTransform, transform); + updateTransform(renderTransform); initCache(model); @@ -323,13 +304,6 @@ void ModelMeshPartPayload::updateClusterBuffer(const std::vector& drawMesh, int partIndex); virtual void notifyLocationChanged() {} - void updateTransform(const Transform& transform, const Transform& offsetTransform); + void updateTransform(const Transform& transform); // Render Item interface virtual render::ItemKey getKey() const; @@ -52,8 +52,7 @@ public: virtual void bindTransform(gpu::Batch& batch, RenderArgs::RenderMode renderMode) const; // Payload resource cached values - Transform _drawTransform; - Transform _transform; + Transform _worldFromLocalTransform; int _partIndex = 0; bool _hasColorAttrib { false }; @@ -86,7 +85,7 @@ namespace render { class ModelMeshPartPayload : public MeshPartPayload { public: - ModelMeshPartPayload(ModelPointer model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform, const Transform& offsetTransform); + ModelMeshPartPayload(ModelPointer model, int meshIndex, int partIndex, int shapeIndex, const Transform& transform); typedef render::Payload Payload; typedef Payload::DataPointer Pointer; @@ -100,7 +99,6 @@ public: // dual quaternion skinning void updateClusterBuffer(const std::vector& clusterDualQuaternions); - void updateTransformForSkinnedMesh(const Transform& renderTransform, const Transform& boundTransform); // Render Item interface render::ShapeKey getShapeKey() const override; // shape interface diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 5d5de04537..0d555b605a 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -249,24 +249,10 @@ void Model::updateRenderItems() { } Transform renderTransform = modelTransform; - - /*if (useDualQuaternionSkinning) { - if (meshState.clusterDualQuaternions.size() == 1 || meshState.clusterDualQuaternions.size() == 2) { - const auto& dq = meshState.clusterDualQuaternions[0]; - Transform transform(dq.getRotation(), - dq.getScale(), - dq.getTranslation()); - renderTransform = modelTransform.worldTransform(Transform(transform)); - } - } else { - if (meshState.clusterMatrices.size() == 1 || meshState.clusterMatrices.size() == 2) { - renderTransform = modelTransform.worldTransform(Transform(meshState.clusterMatrices[0])); - } - }*/ if (meshState.clusterMatrices.size() <= 1) { renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); } - data.updateTransformForSkinnedMesh(renderTransform, modelTransform); + data.updateTransform(renderTransform); data.setCauterized(cauterized); data.updateKey(renderItemKeyGlobalFlags); @@ -305,7 +291,7 @@ void Model::updateShapeStatesFromRig() { _shapeStates.resize(shapes.size()); for (int s = 0; s < shapes.size(); ++s) { uint32_t jointId = shapes[s].transform; - if (jointId < _rig.getJointStateCount()) { + if (jointId < (uint32_t) _rig.getJointStateCount()) { _shapeStates[s]._rootFromJointTransform = _rig.getJointTransform(shapes[s].transform); } } @@ -329,14 +315,24 @@ bool Model::updateGeometry() { updateShapeStatesFromRig(); const HFMModel& hfmModel = getHFMModel(); - int i = 0; - foreach (const HFMMesh& mesh, hfmModel.meshes) { + const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + /* int i = 0; + for (const auto& mesh: hfmModel.meshes) { MeshState state; state.clusterDualQuaternions.resize(mesh.clusters.size()); state.clusterMatrices.resize(mesh.clusters.size()); _meshStates.push_back(state); i++; } + */ + for (int i = 0; i < hfmDynamicTransforms.size(); i++) { + const auto& dynT = hfmDynamicTransforms[i]; + MeshState state; + state.clusterDualQuaternions.resize(dynT.clusters.size()); + state.clusterMatrices.resize(dynT.clusters.size()); + _meshStates.push_back(state); + } + needFullUpdate = true; emit rigReady(); } @@ -1510,7 +1506,7 @@ void Model::createRenderItemSet() { // Create the render payloads int numParts = (int)mesh->getNumParts(); for (int partIndex = 0; partIndex < numParts; partIndex++) { - _modelMeshRenderItems << std::make_shared(shared_from_this(), i, partIndex, shapeID, transform, offset); + _modelMeshRenderItems << std::make_shared(shared_from_this(), i, partIndex, shapeID, transform); auto material = getNetworkModel()->getShapeMaterial(shapeID); _modelMeshMaterialNames.push_back(material ? material->getName() : ""); _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i }); diff --git a/libraries/render-utils/src/Model.h b/libraries/render-utils/src/Model.h index 85661d4b6b..0a102630b6 100644 --- a/libraries/render-utils/src/Model.h +++ b/libraries/render-utils/src/Model.h @@ -473,7 +473,7 @@ protected: QVector> _modelMeshRenderItems; QMap _modelMeshRenderItemsMap; render::ItemIDs _modelMeshRenderItemIDs; - using ShapeInfo = struct { int meshIndex; }; + using ShapeInfo = struct { int meshIndex; int deformerIndex; }; std::vector _modelMeshRenderItemShapes; std::vector _modelMeshMaterialNames; From 99a2fedd5f28da1f9562daa857ca74f83a735ceb Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 11 Oct 2019 14:16:30 -0700 Subject: [PATCH 050/121] Quick fix for non-matching vertex attributes in GLTF mesh primitives --- libraries/fbx/src/GLTFSerializer.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 7961cdb036..8d044f3e30 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1261,7 +1261,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mesh.tangents.push_back(glm::vec3(tanW * tangents[n], tangents[n + 1], tanW * tangents[n + 2])); } } else if (meshAttributes.contains("TANGENT")) { - mesh.tangents.resize(partVerticesCount); + mesh.tangents.resize(mesh.tangents.size() + partVerticesCount); } if (texcoords.size() == partVerticesCount * TEX_COORD_STRIDE) { @@ -1270,7 +1270,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mesh.texCoords.push_back(glm::vec2(texcoords[n], texcoords[n + 1])); } } else if (meshAttributes.contains("TEXCOORD_0")) { - mesh.texCoords.resize(partVerticesCount); + mesh.texCoords.resize(mesh.texCoords.size() + partVerticesCount); } if (texcoords2.size() == partVerticesCount * TEX_COORD_STRIDE) { @@ -1279,7 +1279,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mesh.texCoords1.push_back(glm::vec2(texcoords2[n], texcoords2[n + 1])); } } else if (meshAttributes.contains("TEXCOORD_1")) { - mesh.texCoords1.resize(partVerticesCount); + mesh.texCoords1.resize(mesh.texCoords1.size() + partVerticesCount); } if (colors.size() == partVerticesCount * colorStride) { From 6b46f8e6c49ff2c0031327aa7f5e449dc2592c74 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 11 Oct 2019 14:51:48 -0700 Subject: [PATCH 051/121] Remove empty for loop --- libraries/fbx/src/GLTFSerializer.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 8d044f3e30..bd7fc7dd7d 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1082,10 +1082,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } - for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { - } - - int meshCount = _file.meshes.size(); hfmModel.meshes.resize(meshCount); hfmModel.meshExtents.reset(); From 8a1f3648f90622d7f5ff9ab424a38bb3d76d3a77 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Fri, 11 Oct 2019 18:06:44 -0700 Subject: [PATCH 052/121] fooling around to get the cluster working --- libraries/fbx/src/FBXSerializer.cpp | 2 +- libraries/hfm/src/hfm/HFM.h | 2 +- libraries/render-utils/src/CauterizedModel.cpp | 18 +++++++++--------- libraries/render-utils/src/MeshPartPayload.cpp | 6 +++--- libraries/render-utils/src/Model.cpp | 17 +++++++++-------- 5 files changed, 23 insertions(+), 22 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index bec42ca01c..58a9ed2570 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1502,7 +1502,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfm::Shape& shape = partShapes[i]; shape.mesh = meshIndex; shape.meshPart = i; - shape.transform = transformIndex; + shape.joint = transformIndex; auto matName = mesh.parts[i].materialID; auto materialIt = materialNameToID.find(matName.toStdString()); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index f3330d9291..ba7e90bd92 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -314,7 +314,7 @@ public: uint32_t mesh { UNDEFINED_KEY }; uint32_t meshPart { UNDEFINED_KEY }; uint32_t material { UNDEFINED_KEY }; - uint32_t transform { UNDEFINED_KEY }; // The hfm::Joint associated with this shape, containing transform information + uint32_t joint { UNDEFINED_KEY }; // The hfm::Joint associated with this shape, containing transform information // TODO: Have all serializers calculate hfm::Shape::transformedExtents in world space where they previously calculated hfm::Mesh::meshExtents. Change all code that uses hfm::Mesh::meshExtents to use this instead. Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after transform information is applied, while not taking into account rigging/skinning uint32_t dynamicTransform { UNDEFINED_KEY }; diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 9f9acbf182..9849880822 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -33,7 +33,7 @@ bool CauterizedModel::updateGeometry() { if (_isCauterized && needsFullUpdate) { assert(_cauterizeMeshStates.empty()); const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + /* const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; for (int i = 0; i < hfmDynamicTransforms.size(); i++) { const auto& dynT = hfmDynamicTransforms[i]; MeshState state; @@ -44,9 +44,8 @@ bool CauterizedModel::updateGeometry() { } _cauterizeMeshStates.append(state); _meshStates.push_back(state); - } - - /* foreach (const HFMMesh& mesh, hfmModel.meshes) { + }*/ + foreach (const HFMMesh& mesh, hfmModel.meshes) { Model::MeshState state; if (_useDualQuaternionSkinning) { state.clusterDualQuaternions.resize(mesh.clusters.size()); @@ -55,7 +54,7 @@ bool CauterizedModel::updateGeometry() { state.clusterMatrices.resize(mesh.clusters.size()); _cauterizeMeshStates.append(state); } - }*/ + } } return needsFullUpdate; } @@ -68,7 +67,7 @@ void CauterizedModel::createRenderItemSet() { // all of our mesh vectors must match in size if (meshes.size() != _meshStates.size()) { qCDebug(renderutils) << "WARNING!!!! Mesh Sizes don't match! We will not segregate mesh groups yet."; - return; + // return; } // We should not have any existing renderItems if we enter this section of code @@ -241,9 +240,10 @@ void CauterizedModel::updateRenderItems() { const auto& shapeState = self->getShapeState(i); - - const auto& meshState = self->getMeshState(meshIndex); - const auto& cauterizedMeshState = self->getCauterizeMeshState(meshIndex); + // const auto& meshState = self->getMeshState(meshIndex); + // const auto& cauterizedMeshState = self->getCauterizeMeshState(meshIndex); + MeshState meshState; + MeshState cauterizedMeshState; bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index 7b37c847af..2fb36dad67 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -209,14 +209,14 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in auto& modelMesh = model->getNetworkModel()->getMeshes().at(_meshIndex); _meshNumVertices = (int)modelMesh->getNumVertices(); - const Model::MeshState& state = model->getMeshState(_meshIndex); + // const Model::MeshState& state = model->getMeshState(_meshIndex); updateMeshPart(modelMesh, partIndex); if (useDualQuaternionSkinning) { - computeAdjustedLocalBound(state.clusterDualQuaternions); + // computeAdjustedLocalBound(state.clusterDualQuaternions); } else { - computeAdjustedLocalBound(state.clusterMatrices); + // computeAdjustedLocalBound(state.clusterMatrices); } Transform renderTransform = transform; diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 0d555b605a..662b6f190a 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -233,7 +233,8 @@ void Model::updateRenderItems() { auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; const auto& shapeState = self->getShapeState(i); - const auto& meshState = self->getMeshState(meshIndex); + // const auto& meshState = self->getMeshState(meshIndex); + MeshState meshState; bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); @@ -290,9 +291,9 @@ void Model::updateShapeStatesFromRig() { const auto& shapes = hfmModel.shapes; _shapeStates.resize(shapes.size()); for (int s = 0; s < shapes.size(); ++s) { - uint32_t jointId = shapes[s].transform; + uint32_t jointId = shapes[s].joint; if (jointId < (uint32_t) _rig.getJointStateCount()) { - _shapeStates[s]._rootFromJointTransform = _rig.getJointTransform(shapes[s].transform); + _shapeStates[s]._rootFromJointTransform = _rig.getJointTransform(jointId); } } } @@ -316,7 +317,7 @@ bool Model::updateGeometry() { const HFMModel& hfmModel = getHFMModel(); const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; - /* int i = 0; + /* int i = 0; for (const auto& mesh: hfmModel.meshes) { MeshState state; state.clusterDualQuaternions.resize(mesh.clusters.size()); @@ -325,13 +326,13 @@ bool Model::updateGeometry() { i++; } */ - for (int i = 0; i < hfmDynamicTransforms.size(); i++) { + /*for (int i = 0; i < hfmDynamicTransforms.size(); i++) { const auto& dynT = hfmDynamicTransforms[i]; MeshState state; state.clusterDualQuaternions.resize(dynT.clusters.size()); state.clusterMatrices.resize(dynT.clusters.size()); _meshStates.push_back(state); - } + }*/ needFullUpdate = true; emit rigReady(); @@ -1476,7 +1477,7 @@ void Model::createRenderItemSet() { // all of our mesh vectors must match in size if (meshes.size() != _meshStates.size()) { qCDebug(renderutils) << "WARNING!!!! Mesh Sizes don't match! " << meshes.size() << _meshStates.size() << " We will not segregate mesh groups yet."; - return; + // return; } // We should not have any existing renderItems if we enter this section of code @@ -1516,7 +1517,7 @@ void Model::createRenderItemSet() { } bool Model::isRenderable() const { - return (!_shapeStates.empty() && !_meshStates.empty()) || (isLoaded() && _renderGeometry->getMeshes().empty()); + return (!_shapeStates.empty() /* && !_meshStates.empty()*/) || (isLoaded() && _renderGeometry->getMeshes().empty()); } std::set Model::getMeshIDsFromMaterialID(QString parentMaterialName) { From 05ac9aefa8fbd9121e27c009ac9b8b06f2e3cee7 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Mon, 14 Oct 2019 02:21:01 -0700 Subject: [PATCH 053/121] the clusterMatrices should be working, but n skin index and weights are assigned yet --- libraries/animation/src/AnimSkeleton.cpp | 29 ++++ libraries/animation/src/AnimSkeleton.h | 2 + .../render-utils/src/CauterizedModel.cpp | 158 ++++++++++++++---- .../render-utils/src/MeshPartPayload.cpp | 44 +---- libraries/render-utils/src/MeshPartPayload.h | 8 +- libraries/render-utils/src/Model.cpp | 116 +++++++++---- libraries/render-utils/src/Model.h | 2 +- 7 files changed, 242 insertions(+), 117 deletions(-) diff --git a/libraries/animation/src/AnimSkeleton.cpp b/libraries/animation/src/AnimSkeleton.cpp index b26d00d8d0..a68f5c869f 100644 --- a/libraries/animation/src/AnimSkeleton.cpp +++ b/libraries/animation/src/AnimSkeleton.cpp @@ -30,6 +30,34 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { // we make a copy of the inverseBindMatrices in order to prevent mutating the model bind pose // when we are dealing with a joint offset in the model + for (int i = 0; i < (int)hfmModel.dynamicTransforms.size(); i++) { + const auto& defor = hfmModel.dynamicTransforms[i]; + std::vector dummyClustersList; + + for (int j = 0; j < defor.clusters.size(); j++) { + std::vector bindMatrices; + // cast into a non-const reference, so we can mutate the FBXCluster + HFMCluster& cluster = const_cast(defor.clusters.at(j)); + + HFMCluster localCluster; + localCluster.jointIndex = cluster.jointIndex; + localCluster.inverseBindMatrix = cluster.inverseBindMatrix; + localCluster.inverseBindTransform.evalFromRawMatrix(localCluster.inverseBindMatrix); + + // if we have a joint offset in the fst file then multiply its inverse by the + // model cluster inverse bind matrix + if (hfmModel.jointRotationOffsets.contains(cluster.jointIndex)) { + AnimPose localOffset(hfmModel.jointRotationOffsets[cluster.jointIndex], glm::vec3()); + localCluster.inverseBindMatrix = (glm::mat4)localOffset.inverse() * cluster.inverseBindMatrix; + localCluster.inverseBindTransform.evalFromRawMatrix(localCluster.inverseBindMatrix); + } + dummyClustersList.push_back(localCluster); + } + _clusterBindMatrixOriginalValues.push_back(dummyClustersList); + } + + +/* for (int i = 0; i < (int)hfmModel.meshes.size(); i++) { const HFMMesh& mesh = hfmModel.meshes.at(i); std::vector dummyClustersList; @@ -55,6 +83,7 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { } _clusterBindMatrixOriginalValues.push_back(dummyClustersList); } +*/ } AnimSkeleton::AnimSkeleton(const std::vector& joints, const QMap jointOffsets) { diff --git a/libraries/animation/src/AnimSkeleton.h b/libraries/animation/src/AnimSkeleton.h index efc1c1599f..526959df9a 100644 --- a/libraries/animation/src/AnimSkeleton.h +++ b/libraries/animation/src/AnimSkeleton.h @@ -70,6 +70,8 @@ public: std::vector lookUpJointIndices(const std::vector& jointNames) const; const HFMCluster getClusterBindMatricesOriginalValues(const int meshIndex, const int clusterIndex) const { return _clusterBindMatrixOriginalValues[meshIndex][clusterIndex]; } + // const HFMCluster getClusterBindMatricesOriginalValues(const int meshIndex, const int clusterIndex) const { return _clusterBindMatrixOriginalValues[meshIndex][clusterIndex]; } + protected: void buildSkeletonFromJoints(const std::vector& joints, const QMap jointOffsets); diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 9849880822..3e7c694768 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -32,8 +32,8 @@ bool CauterizedModel::updateGeometry() { bool needsFullUpdate = Model::updateGeometry(); if (_isCauterized && needsFullUpdate) { assert(_cauterizeMeshStates.empty()); - const HFMModel& hfmModel = getHFMModel(); - /* const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + /* const HFMModel& hfmModel = getHFMModel(); + const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; for (int i = 0; i < hfmDynamicTransforms.size(); i++) { const auto& dynT = hfmDynamicTransforms[i]; MeshState state; @@ -45,7 +45,27 @@ bool CauterizedModel::updateGeometry() { _cauterizeMeshStates.append(state); _meshStates.push_back(state); }*/ - foreach (const HFMMesh& mesh, hfmModel.meshes) { + + const HFMModel& hfmModel = getHFMModel(); + const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + int i = 0; + /* for (const auto& mesh: hfmModel.meshes) { + MeshState state; + state.clusterDualQuaternions.resize(mesh.clusters.size()); + state.clusterMatrices.resize(mesh.clusters.size()); + _meshStates.push_back(state); + i++; + } + */ + for (int i = 0; i < hfmDynamicTransforms.size(); i++) { + const auto& dynT = hfmDynamicTransforms[i]; + MeshState state; + state.clusterDualQuaternions.resize(dynT.clusters.size()); + state.clusterMatrices.resize(dynT.clusters.size()); + _cauterizeMeshStates.push_back(state); + } + + /* foreach (const HFMMesh& mesh, hfmModel.meshes) { Model::MeshState state; if (_useDualQuaternionSkinning) { state.clusterDualQuaternions.resize(mesh.clusters.size()); @@ -54,7 +74,7 @@ bool CauterizedModel::updateGeometry() { state.clusterMatrices.resize(mesh.clusters.size()); _cauterizeMeshStates.append(state); } - } + }*/ } return needsFullUpdate; } @@ -64,11 +84,6 @@ void CauterizedModel::createRenderItemSet() { assert(isLoaded()); const auto& meshes = _renderGeometry->getMeshes(); - // all of our mesh vectors must match in size - if (meshes.size() != _meshStates.size()) { - qCDebug(renderutils) << "WARNING!!!! Mesh Sizes don't match! We will not segregate mesh groups yet."; - // return; - } // We should not have any existing renderItems if we enter this section of code Q_ASSERT(_modelMeshRenderItems.isEmpty()); @@ -88,7 +103,20 @@ void CauterizedModel::createRenderItemSet() { Transform::mult(transform, transform, offset); // Run through all of the meshes, and place them into their segregated, but unsorted buckets + // Run through all of the meshes, and place them into their segregated, but unsorted buckets int shapeID = 0; + const auto& shapes = _renderGeometry->getHFMModel().shapes; + for (shapeID; shapeID < shapes.size(); shapeID++) { + const auto& shape = shapes[shapeID]; + + _modelMeshRenderItems << std::make_shared(shared_from_this(), shape.mesh, shape.meshPart, shapeID, transform); + + auto material = getNetworkModel()->getShapeMaterial(shapeID); + _modelMeshMaterialNames.push_back(material ? material->getName() : ""); + _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.dynamicTransform }); + } + +/* int shapeID = 0; uint32_t numMeshes = (uint32_t)meshes.size(); for (uint32_t i = 0; i < numMeshes; i++) { const auto& mesh = meshes.at(i); @@ -106,7 +134,7 @@ void CauterizedModel::createRenderItemSet() { _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i }); shapeID++; } - } + }*/ } else { Model::createRenderItemSet(); } @@ -122,6 +150,38 @@ void CauterizedModel::updateClusterMatrices() { updateShapeStatesFromRig(); _needsUpdateClusterMatrices = false; + + + const HFMModel& hfmModel = getHFMModel(); + const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + for (int i = 0; i < (int)_meshStates.size(); i++) { + MeshState& state = _meshStates[i]; + const auto& deformer = hfmDynamicTransforms[i]; + + int meshIndex = i; + int clusterIndex = 0; + + for (int d = 0; d < deformer.clusters.size(); d++) { + const auto& cluster = deformer.clusters[d]; + clusterIndex = d; + + const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex); + + if (_useDualQuaternionSkinning) { + auto jointPose = _rig.getJointPose(cluster.jointIndex); + Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); + Transform clusterTransform; + Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); + state.clusterDualQuaternions[d] = Model::TransformDualQuaternion(clusterTransform); + } + else { + auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); + glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[d]); + } + + } + } +/* const HFMModel& hfmModel = getHFMModel(); for (int i = 0; i < (int)_meshStates.size(); i++) { Model::MeshState& state = _meshStates[i]; @@ -145,7 +205,7 @@ void CauterizedModel::updateClusterMatrices() { } } } - +*/ // as an optimization, don't build cautrizedClusterMatrices if the boneSet is empty. if (!_cauterizeBoneSet.empty()) { @@ -236,42 +296,66 @@ void CauterizedModel::updateRenderItems() { auto itemID = self->_modelMeshRenderItemIDs[i]; auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; - auto deformerIndex = self->_modelMeshRenderItemShapes[i].meshIndex; const auto& shapeState = self->getShapeState(i); - // const auto& meshState = self->getMeshState(meshIndex); - // const auto& cauterizedMeshState = self->getCauterizeMeshState(meshIndex); - MeshState meshState; - MeshState cauterizedMeshState; + auto deformerIndex = self->_modelMeshRenderItemShapes[i].deformerIndex; + bool isDeformed = (deformerIndex != hfm::UNDEFINED_KEY); + + + // auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; + // auto deformerIndex = self->_modelMeshRenderItemShapes[i].meshIndex; + + // const auto& shapeState = self->getShapeState(i); + bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); - transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey, - primitiveMode, renderItemKeyGlobalFlags, enableCauterization](ModelMeshPartPayload& mmppData) { - CauterizedMeshPartPayload& data = static_cast(mmppData); - if (useDualQuaternionSkinning) { - data.updateClusterBuffer(meshState.clusterDualQuaternions, - cauterizedMeshState.clusterDualQuaternions); - data.computeAdjustedLocalBound(meshState.clusterDualQuaternions); - } else { - data.updateClusterBuffer(meshState.clusterMatrices, - cauterizedMeshState.clusterMatrices); - data.computeAdjustedLocalBound(meshState.clusterMatrices); - } - Transform renderTransform = modelTransform; - if (meshState.clusterMatrices.size() <= 2) { + + if (isDeformed) { + + const auto& meshState = self->getMeshState(deformerIndex); + const auto& cauterizedMeshState = self->getCauterizeMeshState(deformerIndex); + + transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey, + primitiveMode, renderItemKeyGlobalFlags, enableCauterization](ModelMeshPartPayload& mmppData) { + CauterizedMeshPartPayload& data = static_cast(mmppData); + if (useDualQuaternionSkinning) { + data.updateClusterBuffer(meshState.clusterDualQuaternions, + cauterizedMeshState.clusterDualQuaternions); + } else { + data.updateClusterBuffer(meshState.clusterMatrices, + cauterizedMeshState.clusterMatrices); + } + + Transform renderTransform = modelTransform; + // if (meshState.clusterMatrices.size() <= 2) { + renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); + // } + data.updateTransform(renderTransform); + data.updateTransformForCauterizedMesh(renderTransform); + + data.setEnableCauterization(enableCauterization); + data.updateKey(renderItemKeyGlobalFlags); + data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning); + }); + } else { + transaction.updateItem(itemID, [modelTransform, shapeState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags](ModelMeshPartPayload& data) { + + Transform renderTransform = modelTransform; + // if (meshState.clusterMatrices.size() <= 1) { renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); - } - data.updateTransform(renderTransform); - data.updateTransformForCauterizedMesh(renderTransform); + // } + data.updateTransform(renderTransform); - data.setEnableCauterization(enableCauterization); - data.updateKey(renderItemKeyGlobalFlags); - data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning); - }); + // data.setEnableCauterization(enableCauterization); + data.updateKey(renderItemKeyGlobalFlags); + data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, false); + }); + + } } scene->enqueueTransaction(transaction); diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index 2fb36dad67..a242c94299 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -213,17 +213,13 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in updateMeshPart(modelMesh, partIndex); - if (useDualQuaternionSkinning) { - // computeAdjustedLocalBound(state.clusterDualQuaternions); - } else { - // computeAdjustedLocalBound(state.clusterMatrices); - } - Transform renderTransform = transform; const Model::ShapeState& shapeState = model->getShapeState(shapeIndex); renderTransform = transform.worldTransform(shapeState._rootFromJointTransform); updateTransform(renderTransform); + _deformerIndex = shape.dynamicTransform; + initCache(model); #if defined(Q_OS_MAC) || defined(Q_OS_ANDROID) @@ -245,7 +241,9 @@ void ModelMeshPartPayload::initCache(const ModelPointer& model) { if (_drawMesh) { auto vertexFormat = _drawMesh->getVertexFormat(); _hasColorAttrib = vertexFormat->hasAttribute(gpu::Stream::COLOR); - _isSkinned = vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT) && vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_INDEX); + if (_deformerIndex != hfm::UNDEFINED_KEY) { + _isSkinned = vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT) && vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_INDEX); + } const HFMModel& hfmModel = model->getHFMModel(); const HFMMesh& mesh = hfmModel.meshes.at(_meshIndex); @@ -432,38 +430,6 @@ void ModelMeshPartPayload::render(RenderArgs* args) { args->_details._trianglesRendered += _drawPart._numIndices / INDICES_PER_TRIANGLE; } -void ModelMeshPartPayload::computeAdjustedLocalBound(const std::vector& clusterMatrices) { - _adjustedLocalBound = _localBound; - if (clusterMatrices.size() > 0) { - _adjustedLocalBound.transform(clusterMatrices.back()); - - for (int i = 0; i < (int)clusterMatrices.size() - 1; ++i) { - AABox clusterBound = _localBound; - clusterBound.transform(clusterMatrices[i]); - _adjustedLocalBound += clusterBound; - } - } -} - -void ModelMeshPartPayload::computeAdjustedLocalBound(const std::vector& clusterDualQuaternions) { - _adjustedLocalBound = _localBound; - if (clusterDualQuaternions.size() > 0) { - Transform rootTransform(clusterDualQuaternions.back().getRotation(), - clusterDualQuaternions.back().getScale(), - clusterDualQuaternions.back().getTranslation()); - _adjustedLocalBound.transform(rootTransform); - - for (int i = 0; i < (int)clusterDualQuaternions.size() - 1; ++i) { - AABox clusterBound = _localBound; - Transform transform(clusterDualQuaternions[i].getRotation(), - clusterDualQuaternions[i].getScale(), - clusterDualQuaternions[i].getTranslation()); - clusterBound.transform(transform); - _adjustedLocalBound += clusterBound; - } - } -} - void ModelMeshPartPayload::setBlendshapeBuffer(const std::unordered_map& blendshapeBuffers, const QVector& blendedMeshSizes) { if (_meshIndex < blendedMeshSizes.length() && blendedMeshSizes.at(_meshIndex) == _meshNumVertices) { auto blendshapeBuffer = blendshapeBuffers.find(_meshIndex); diff --git a/libraries/render-utils/src/MeshPartPayload.h b/libraries/render-utils/src/MeshPartPayload.h index 9ddc62db40..50e06c024c 100644 --- a/libraries/render-utils/src/MeshPartPayload.h +++ b/libraries/render-utils/src/MeshPartPayload.h @@ -57,7 +57,6 @@ public: bool _hasColorAttrib { false }; graphics::Box _localBound; - graphics::Box _adjustedLocalBound; mutable graphics::Box _worldBound; std::shared_ptr _drawMesh; @@ -111,12 +110,6 @@ public: void bindMesh(gpu::Batch& batch) override; void bindTransform(gpu::Batch& batch, RenderArgs::RenderMode renderMode) const override; - // matrix palette skinning - void computeAdjustedLocalBound(const std::vector& clusterMatrices); - - // dual quaternion skinning - void computeAdjustedLocalBound(const std::vector& clusterDualQuaternions); - gpu::BufferPointer _clusterBuffer; enum class ClusterBufferType { Matrices, DualQuaternions }; @@ -124,6 +117,7 @@ public: int _meshIndex; int _shapeID; + int _deformerIndex; bool _isSkinned{ false }; bool _isBlendShaped { false }; diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 662b6f190a..27e8725572 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -186,7 +186,7 @@ bool Model::shouldInvalidatePayloadShapeKey(int meshIndex) { const auto& networkMeshes = getNetworkModel()->getMeshes(); // if our index is ever out of range for either meshes or networkMeshes, then skip it, and set our _meshGroupsKnown // to false to rebuild out mesh groups. - if (meshIndex < 0 || meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)hfmModel.meshes.size() || meshIndex >= (int)_meshStates.size()) { + if (meshIndex < 0 || meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)hfmModel.meshes.size() /* || meshIndex >= (int)_meshStates.size()*/) { _needsFixupInScene = true; // trigger remove/add cycle invalidCalculatedMeshBoxes(); // if we have to reload, we need to assume our mesh boxes are all invalid return true; @@ -233,32 +233,51 @@ void Model::updateRenderItems() { auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; const auto& shapeState = self->getShapeState(i); - // const auto& meshState = self->getMeshState(meshIndex); - MeshState meshState; + + auto deformerIndex = self->_modelMeshRenderItemShapes[i].deformerIndex; + bool isDeformed = (deformerIndex != hfm::UNDEFINED_KEY); bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); - bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); - transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, - invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, cauterized](ModelMeshPartPayload& data) { - if (useDualQuaternionSkinning) { - data.updateClusterBuffer(meshState.clusterDualQuaternions); - data.computeAdjustedLocalBound(meshState.clusterDualQuaternions); - } else { - data.updateClusterBuffer(meshState.clusterMatrices); - data.computeAdjustedLocalBound(meshState.clusterMatrices); - } + + if (isDeformed) { - Transform renderTransform = modelTransform; - if (meshState.clusterMatrices.size() <= 1) { + const auto& meshState = self->getMeshState(deformerIndex); + // MeshState meshState; + bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); + + + transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, + invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, cauterized](ModelMeshPartPayload& data) { + if (useDualQuaternionSkinning) { + data.updateClusterBuffer(meshState.clusterDualQuaternions); + } else { + data.updateClusterBuffer(meshState.clusterMatrices); + } + + Transform renderTransform = modelTransform; + // if (meshState.clusterMatrices.size() <= 1) { + renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); + // } + data.updateTransform(renderTransform); + + data.setCauterized(cauterized); + data.updateKey(renderItemKeyGlobalFlags); + data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning); + }); + } else { + transaction.updateItem(itemID, [modelTransform, shapeState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags](ModelMeshPartPayload& data) { + + Transform renderTransform = modelTransform; + // if (meshState.clusterMatrices.size() <= 1) { renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); - } - data.updateTransform(renderTransform); + // } + data.updateTransform(renderTransform); - data.setCauterized(cauterized); - data.updateKey(renderItemKeyGlobalFlags); - data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning); - }); + data.updateKey(renderItemKeyGlobalFlags); + data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, false); + }); + } } AbstractViewStateInterface::instance()->getMain3DScene()->enqueueTransaction(transaction); @@ -317,8 +336,8 @@ bool Model::updateGeometry() { const HFMModel& hfmModel = getHFMModel(); const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; - /* int i = 0; - for (const auto& mesh: hfmModel.meshes) { + int i = 0; + /* for (const auto& mesh: hfmModel.meshes) { MeshState state; state.clusterDualQuaternions.resize(mesh.clusters.size()); state.clusterMatrices.resize(mesh.clusters.size()); @@ -326,13 +345,13 @@ bool Model::updateGeometry() { i++; } */ - /*for (int i = 0; i < hfmDynamicTransforms.size(); i++) { + for (int i = 0; i < hfmDynamicTransforms.size(); i++) { const auto& dynT = hfmDynamicTransforms[i]; MeshState state; state.clusterDualQuaternions.resize(dynT.clusters.size()); state.clusterMatrices.resize(dynT.clusters.size()); _meshStates.push_back(state); - }*/ + } needFullUpdate = true; emit rigReady(); @@ -1407,8 +1426,34 @@ void Model::updateClusterMatrices() { _needsUpdateClusterMatrices = false; const HFMModel& hfmModel = getHFMModel(); + const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; for (int i = 0; i < (int) _meshStates.size(); i++) { MeshState& state = _meshStates[i]; + const auto& deformer = hfmDynamicTransforms[i]; + + int meshIndex = i; + int clusterIndex = 0; + + for (int d = 0; d < deformer.clusters.size(); d++) { + const auto& cluster = deformer.clusters[d]; + clusterIndex = d; + + const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex); + + if (_useDualQuaternionSkinning) { + auto jointPose = _rig.getJointPose(cluster.jointIndex); + Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); + Transform clusterTransform; + Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); + state.clusterDualQuaternions[d] = Model::TransformDualQuaternion(clusterTransform); + } + else { + auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); + glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[d]); + } + + } +/* int meshIndex = i; const HFMMesh& mesh = hfmModel.meshes.at(i); for (int j = 0; j < mesh.clusters.size(); j++) { @@ -1425,7 +1470,7 @@ void Model::updateClusterMatrices() { auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]); } - } + }*/ } // post the blender if we're not currently waiting for one to finish @@ -1474,12 +1519,6 @@ void Model::createRenderItemSet() { assert(isLoaded()); const auto& meshes = _renderGeometry->getMeshes(); - // all of our mesh vectors must match in size - if (meshes.size() != _meshStates.size()) { - qCDebug(renderutils) << "WARNING!!!! Mesh Sizes don't match! " << meshes.size() << _meshStates.size() << " We will not segregate mesh groups yet."; - // return; - } - // We should not have any existing renderItems if we enter this section of code Q_ASSERT(_modelMeshRenderItems.isEmpty()); @@ -1497,6 +1536,17 @@ void Model::createRenderItemSet() { // Run through all of the meshes, and place them into their segregated, but unsorted buckets int shapeID = 0; + const auto& shapes = _renderGeometry->getHFMModel().shapes; + for (shapeID; shapeID < shapes.size(); shapeID++) { + const auto& shape = shapes[shapeID]; + + _modelMeshRenderItems << std::make_shared(shared_from_this(), shape.mesh, shape.meshPart, shapeID, transform); + + auto material = getNetworkModel()->getShapeMaterial(shapeID); + _modelMeshMaterialNames.push_back(material ? material->getName() : ""); + _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.dynamicTransform }); + } +/* uint32_t numMeshes = (uint32_t)meshes.size(); for (uint32_t i = 0; i < numMeshes; i++) { const auto& mesh = meshes.at(i); @@ -1513,7 +1563,7 @@ void Model::createRenderItemSet() { _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i }); shapeID++; } - } + }*/ } bool Model::isRenderable() const { diff --git a/libraries/render-utils/src/Model.h b/libraries/render-utils/src/Model.h index 0a102630b6..09fb9b581e 100644 --- a/libraries/render-utils/src/Model.h +++ b/libraries/render-utils/src/Model.h @@ -473,7 +473,7 @@ protected: QVector> _modelMeshRenderItems; QMap _modelMeshRenderItemsMap; render::ItemIDs _modelMeshRenderItemIDs; - using ShapeInfo = struct { int meshIndex; int deformerIndex; }; + using ShapeInfo = struct { int meshIndex; uint32_t deformerIndex{ hfm::UNDEFINED_KEY }; }; std::vector _modelMeshRenderItemShapes; std::vector _modelMeshMaterialNames; From 82247072deff0c2d0d0735dc539efa88d21e6df2 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 11 Oct 2019 16:42:20 -0700 Subject: [PATCH 054/121] Create new mesh if GLTF primitive has different vertex attributes --- libraries/fbx/src/GLTFSerializer.cpp | 124 ++++++++++++++++++--------- 1 file changed, 83 insertions(+), 41 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index bd7fc7dd7d..4de937b547 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1082,14 +1082,20 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } - int meshCount = _file.meshes.size(); - hfmModel.meshes.resize(meshCount); + int gltfMeshCount = _file.meshes.size(); hfmModel.meshExtents.reset(); - for (int meshIndex = 0; meshIndex < meshCount; ++meshIndex) { - const auto& gltfMesh = _file.meshes[meshIndex]; - auto& mesh = hfmModel.meshes[meshIndex]; - mesh.meshIndex = meshIndex; + std::vector> templateShapePerPrimPerGLTFMesh; + for (int gltfMeshIndex = 0; gltfMeshIndex < gltfMeshCount; ++gltfMeshIndex) { + const auto& gltfMesh = _file.meshes[gltfMeshIndex]; + hfmModel.meshes.emplace_back(); + // NOTE: The number of hfm meshes may be greater than the number of gltf meshes, if a gltf mesh has primitives with different vertex attributes. In that case, this mesh reference may be reassigned. + hfm::Mesh* meshPtr = &hfmModel.meshes.back(); + const size_t firstMeshIndexForGLTFMesh = hfmModel.meshes.size() - 1; + meshPtr->meshIndex = gltfMeshIndex; + templateShapePerPrimPerGLTFMesh.emplace_back(); + std::vector& templateShapePerPrim = templateShapePerPrimPerGLTFMesh.back(); + // TODO: Rewrite GLTF skinning definition if (!hfmModel.hasSkeletonJoints) { HFMCluster cluster; #if 0 @@ -1097,31 +1103,70 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& #endif cluster.inverseBindMatrix = glm::mat4(); cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); - mesh.clusters.append(cluster); + meshPtr->clusters.append(cluster); } else { // skinned model for (int j = 0; j < numNodes; ++j) { HFMCluster cluster; cluster.jointIndex = j; cluster.inverseBindMatrix = jointInverseBindTransforms[j]; cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); - mesh.clusters.append(cluster); + meshPtr->clusters.append(cluster); } } HFMCluster root; root.jointIndex = 0; root.inverseBindMatrix = jointInverseBindTransforms[root.jointIndex]; root.inverseBindTransform = Transform(root.inverseBindMatrix); - mesh.clusters.append(root); + meshPtr->clusters.append(root); - QSet meshAttributes; - for(const auto &primitive : gltfMesh.primitives) { - for (const auto& attribute : primitive.attributes.values.keys()) { - meshAttributes.insert(attribute); + QSet primitiveAttributes; + if (!gltfMesh.primitives.empty()) { + for (const auto& attribute : gltfMesh.primitives[0].attributes.values.keys()) { + primitiveAttributes.insert(attribute); } } + std::vector> primitiveAttributeVariants; - for(auto &primitive : gltfMesh.primitives) { - HFMMeshPart part = HFMMeshPart(); + int primCount = (int)gltfMesh.primitives.size(); + size_t hfmMeshIndex = firstMeshIndexForGLTFMesh; + for(int primIndex = 0; primIndex < primCount; ++primIndex) { + auto& primitive = gltfMesh.primitives[primIndex]; + + QList keys = primitive.attributes.values.keys(); + QSet newPrimitiveAttributes; + for (const auto& key : keys) { + newPrimitiveAttributes.insert(key); + } + if (newPrimitiveAttributes != primitiveAttributes) { + assert(primIndex != 0); + + // We need to use a different mesh because the vertex attributes are different + auto attributeVariantIt = std::find(primitiveAttributeVariants.cbegin(), primitiveAttributeVariants.cend(), newPrimitiveAttributes); + if (attributeVariantIt == primitiveAttributeVariants.cend()) { + // Need to allocate a new mesh + hfmModel.meshes.emplace_back(); + meshPtr = &hfmModel.meshes.back(); + hfmMeshIndex = hfmModel.meshes.size() - 1; + meshPtr->meshIndex = gltfMeshIndex; + primitiveAttributeVariants.push_back(newPrimitiveAttributes); + } else { + // An hfm mesh already exists for this gltf mesh with the same vertex attributes. Use it again. + auto variantIndex = (size_t)(attributeVariantIt - primitiveAttributeVariants.cbegin()); + hfmMeshIndex = firstMeshIndexForGLTFMesh + variantIndex; + meshPtr = &hfmModel.meshes[hfmMeshIndex]; + } + primitiveAttributes = newPrimitiveAttributes; + } + // Now, allocate the part for the correct mesh... + hfm::Mesh& mesh = *meshPtr; + mesh.parts.emplace_back(); + hfm::MeshPart& part = mesh.parts.back(); + // ...and keep track of the relationship between the gltf mesh/primitive and the hfm mesh/part + templateShapePerPrim.emplace_back(); + hfm::Shape& templateShape = templateShapePerPrim.back(); + templateShape.mesh = (uint32_t)hfmMeshIndex; + templateShape.meshPart = (uint32_t)(mesh.parts.size() - 1); + templateShape.material = primitive.material; int indicesAccessorIdx = primitive.indices; @@ -1156,8 +1201,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& // Increment the triangle indices by the current mesh vertex count so each mesh part can all reference the same buffers within the mesh int prevMeshVerticesCount = mesh.vertices.count(); - - QList keys = primitive.attributes.values.keys(); QVector clusterJoints; QVector clusterWeights; @@ -1240,53 +1283,53 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& part.triangleIndices.append(validatedIndices); - mesh.vertices.reserve(partVerticesCount); + mesh.vertices.reserve(mesh.vertices.size() + partVerticesCount); for (int n = 0; n < vertices.size(); n = n + VERTEX_STRIDE) { mesh.vertices.push_back(glm::vec3(vertices[n], vertices[n + 1], vertices[n + 2])); } - mesh.normals.reserve(partVerticesCount); + mesh.normals.reserve(mesh.normals.size() + partVerticesCount); for (int n = 0; n < normals.size(); n = n + NORMAL_STRIDE) { mesh.normals.push_back(glm::vec3(normals[n], normals[n + 1], normals[n + 2])); } if (tangents.size() == partVerticesCount * tangentStride) { - mesh.tangents.reserve(partVerticesCount); + mesh.tangents.reserve(mesh.tangents.size() + partVerticesCount); for (int n = 0; n < tangents.size(); n += tangentStride) { float tanW = tangentStride == 4 ? tangents[n + 3] : 1; mesh.tangents.push_back(glm::vec3(tanW * tangents[n], tangents[n + 1], tanW * tangents[n + 2])); } - } else if (meshAttributes.contains("TANGENT")) { + } else if (primitiveAttributes.contains("TANGENT")) { mesh.tangents.resize(mesh.tangents.size() + partVerticesCount); } if (texcoords.size() == partVerticesCount * TEX_COORD_STRIDE) { - mesh.texCoords.reserve(partVerticesCount); + mesh.texCoords.reserve(mesh.texCoords.size() + partVerticesCount); for (int n = 0; n < texcoords.size(); n = n + 2) { mesh.texCoords.push_back(glm::vec2(texcoords[n], texcoords[n + 1])); } - } else if (meshAttributes.contains("TEXCOORD_0")) { + } else if (primitiveAttributes.contains("TEXCOORD_0")) { mesh.texCoords.resize(mesh.texCoords.size() + partVerticesCount); } if (texcoords2.size() == partVerticesCount * TEX_COORD_STRIDE) { - mesh.texCoords1.reserve(partVerticesCount); + mesh.texCoords1.reserve(mesh.texCoords1.size() + partVerticesCount); for (int n = 0; n < texcoords2.size(); n = n + 2) { mesh.texCoords1.push_back(glm::vec2(texcoords2[n], texcoords2[n + 1])); } - } else if (meshAttributes.contains("TEXCOORD_1")) { + } else if (primitiveAttributes.contains("TEXCOORD_1")) { mesh.texCoords1.resize(mesh.texCoords1.size() + partVerticesCount); } if (colors.size() == partVerticesCount * colorStride) { - mesh.colors.reserve(partVerticesCount); + mesh.colors.reserve(mesh.colors.size() + partVerticesCount); for (int n = 0; n < colors.size(); n += colorStride) { mesh.colors.push_back(glm::vec3(colors[n], colors[n + 1], colors[n + 2])); } - } else if (meshAttributes.contains("COLOR_0")) { - mesh.colors.reserve(partVerticesCount); + } else if (primitiveAttributes.contains("COLOR_0")) { + mesh.colors.reserve(mesh.colors.size() + partVerticesCount); for (int i = 0; i < partVerticesCount; ++i) { - mesh.colors.push_back(glm::vec3(1.0f, 1.0f, 1.0f)); + mesh.colors.push_back(glm::vec3(1.0f)); } } @@ -1312,7 +1355,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& clusterJoints.push_back(0); } } - } else if (meshAttributes.contains("JOINTS_0")) { + } else if (primitiveAttributes.contains("JOINTS_0")) { for (int i = 0; i < partVerticesCount; ++i) { for (int j = 0; j < 4; ++j) { clusterJoints.push_back(0); @@ -1342,10 +1385,10 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& clusterWeights.push_back(0.0f); } } - } else if (meshAttributes.contains("WEIGHTS_0")) { + } else if (primitiveAttributes.contains("WEIGHTS_0")) { for (int i = 0; i < partVerticesCount; ++i) { clusterWeights.push_back(1.0f); - for (int j = 1; j < 4; ++j) { + for (int j = 0; j < 4; ++j) { clusterWeights.push_back(0.0f); } } @@ -1383,8 +1426,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } } - mesh.parts.push_back(part); - // populate the texture coordinates if they don't exist if (mesh.texCoords.size() == 0 && !hfmModel.hasSkeletonJoints) { for (int i = 0; i < part.triangleIndices.size(); ++i) { mesh.texCoords.push_back(glm::vec2(0.0, 1.0)); } @@ -1468,22 +1509,23 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } + // Create the instance shapes for each transform node for (int nodeIndex = 0; nodeIndex < numNodes; ++nodeIndex) { const auto& node = _file.nodes[nodeIndex]; if (-1 == node.mesh) { continue; } - const auto& mesh = _file.meshes[node.mesh]; - int primCount = (int)mesh.primitives.size(); + const auto& gltfMesh = _file.meshes[node.mesh]; + const auto& templateShapePerPrim = templateShapePerPrimPerGLTFMesh[node.mesh]; + int primCount = (int)gltfMesh.primitives.size(); for (int primIndex = 0; primIndex < primCount; ++primIndex) { - const auto& primitive = mesh.primitives[primIndex]; - hfmModel.shapes.emplace_back(); + const auto& primitive = gltfMesh.primitives[primIndex]; + const auto& templateShape = templateShapePerPrim[primIndex]; + hfmModel.shapes.push_back(templateShape); auto& hfmShape = hfmModel.shapes.back(); + // Everything else is already defined (mesh, meshPart, material), so just define the new transform hfmShape.transform = nodeIndex; - hfmShape.mesh = node.mesh; - hfmShape.meshPart = primIndex; - hfmShape.material = primitive.material; } } From acca22e2d6b525a2bd6980ae16d2f2b67d378bef Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 14 Oct 2019 15:30:03 -0700 Subject: [PATCH 055/121] Remove unused primitive reference in GLTFSerializer --- libraries/fbx/src/GLTFSerializer.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 4de937b547..9fb00d3145 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1520,7 +1520,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& const auto& templateShapePerPrim = templateShapePerPrimPerGLTFMesh[node.mesh]; int primCount = (int)gltfMesh.primitives.size(); for (int primIndex = 0; primIndex < primCount; ++primIndex) { - const auto& primitive = gltfMesh.primitives[primIndex]; const auto& templateShape = templateShapePerPrim[primIndex]; hfmModel.shapes.push_back(templateShape); auto& hfmShape = hfmModel.shapes.back(); From 108d331d86923ffd1783ad5bd3a2ede8366c16f2 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Mon, 14 Oct 2019 17:45:07 -0700 Subject: [PATCH 056/121] more --- libraries/fbx/src/FBXSerializer.cpp | 2 +- libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp | 2 +- libraries/render-utils/src/Model.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 58a9ed2570..f15443aeb5 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1653,7 +1653,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const int oldIndex = fbxCluster.indices[i]; uint32_t newIndex = (uint32_t)extracted.newIndices.value(oldIndex); deformer.indices.push_back(newIndex); - deformer.indices.push_back((float)fbxCluster.weights[i]); + deformer.weights.push_back((float)fbxCluster.weights[i]); } } diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index 8c27968de9..7fefe614d4 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -95,7 +95,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- The number of indices and weights for a deformer had different sizes and have been trimmed to match"); } // Record cluster sizes - const size_t numVertClusters = (reweightedDeformers.weightsPerVertex ? hfmMesh.clusterIndices.size() / reweightedDeformers.weightsPerVertex : 0); + const size_t numVertClusters = (reweightedDeformers.weightsPerVertex ? reweightedDeformers.indices.size() / reweightedDeformers.weightsPerVertex : 0); const size_t clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize(); const size_t clusterWeightsSize = numVertClusters * clusterWeightElement.getSize(); diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 27e8725572..56dadf0537 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -257,7 +257,7 @@ void Model::updateRenderItems() { Transform renderTransform = modelTransform; // if (meshState.clusterMatrices.size() <= 1) { - renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); + renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); // } data.updateTransform(renderTransform); From f049ab7887af6c221abd7f6675a25c701dc761d8 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Tue, 15 Oct 2019 18:12:09 -0700 Subject: [PATCH 057/121] Understanding that CLuster can be just one for a mesh and we need the bindingMatrix to be applied correctly, differenciating intentionnally the transform for bound evaluation and the one used for render in the case of SKinned mesh because the clusterMatrices contain the extra offset from rig to model --- libraries/fbx/src/FBXSerializer.cpp | 4 ++-- .../render-utils/src/CauterizedModel.cpp | 23 +++++++++++-------- .../render-utils/src/MeshPartPayload.cpp | 5 ++++ libraries/render-utils/src/MeshPartPayload.h | 1 + libraries/render-utils/src/Model.cpp | 3 ++- 5 files changed, 24 insertions(+), 12 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index f15443aeb5..8d2d1336a9 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1592,7 +1592,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } // whether we're skinned depends on how many clusters are attached - if (clusterIDs.size() > 1) { + if (clusterIDs.size() > 0) { hfm::DynamicTransform dynamicTransform; auto& clusters = dynamicTransform.clusters; std::vector deformers; @@ -1670,7 +1670,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const shape.dynamicTransform = dynamicTransformID; } } else { - // this is a single-joint mesh + // this is a no cluster mesh HFMJoint& joint = hfmModel.joints[rootJointIndex]; // Apply geometric offset, if present, by transforming the vertices directly diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 3e7c694768..7d94cd61a5 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -319,7 +319,8 @@ void CauterizedModel::updateRenderItems() { const auto& meshState = self->getMeshState(deformerIndex); const auto& cauterizedMeshState = self->getCauterizeMeshState(deformerIndex); - transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey, + transaction.updateItem(itemID, + [modelTransform, shapeState, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, enableCauterization](ModelMeshPartPayload& mmppData) { CauterizedMeshPartPayload& data = static_cast(mmppData); if (useDualQuaternionSkinning) { @@ -331,26 +332,30 @@ void CauterizedModel::updateRenderItems() { } Transform renderTransform = modelTransform; - // if (meshState.clusterMatrices.size() <= 2) { - renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); + // if (meshState.clusterMatrices.size() <= 2) { + // renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); // } data.updateTransform(renderTransform); data.updateTransformForCauterizedMesh(renderTransform); + data.updateTransformAndBound(modelTransform.worldTransform(shapeState._rootFromJointTransform)); data.setEnableCauterization(enableCauterization); data.updateKey(renderItemKeyGlobalFlags); data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning); }); } else { - transaction.updateItem(itemID, [modelTransform, shapeState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags](ModelMeshPartPayload& data) { + transaction.updateItem(itemID, + [modelTransform, shapeState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, enableCauterization] + (ModelMeshPartPayload& mmppData) { + CauterizedMeshPartPayload& data = static_cast(mmppData); Transform renderTransform = modelTransform; - // if (meshState.clusterMatrices.size() <= 1) { - renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); - // } - data.updateTransform(renderTransform); - // data.setEnableCauterization(enableCauterization); + renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); + data.updateTransform(renderTransform); + data.updateTransformForCauterizedMesh(renderTransform); + + data.setEnableCauterization(enableCauterization); data.updateKey(renderItemKeyGlobalFlags); data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, false); }); diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index a242c94299..5dc39d8674 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -70,6 +70,11 @@ void MeshPartPayload::updateTransform(const Transform& transform) { _worldBound.transform(_worldFromLocalTransform); } +void MeshPartPayload::updateTransformAndBound(const Transform& transform) { + _worldBound = _localBound; + _worldBound.transform(transform); +} + void MeshPartPayload::addMaterial(graphics::MaterialLayer material) { _drawMaterials.push(material); } diff --git a/libraries/render-utils/src/MeshPartPayload.h b/libraries/render-utils/src/MeshPartPayload.h index 50e06c024c..5d351e90d4 100644 --- a/libraries/render-utils/src/MeshPartPayload.h +++ b/libraries/render-utils/src/MeshPartPayload.h @@ -39,6 +39,7 @@ public: virtual void notifyLocationChanged() {} void updateTransform(const Transform& transform); + void updateTransformAndBound(const Transform& transform ); // Render Item interface virtual render::ItemKey getKey() const; diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 56dadf0537..1e258a8dd1 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -257,9 +257,10 @@ void Model::updateRenderItems() { Transform renderTransform = modelTransform; // if (meshState.clusterMatrices.size() <= 1) { - renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); + // renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); // } data.updateTransform(renderTransform); + data.updateTransformAndBound(modelTransform.worldTransform(shapeState._rootFromJointTransform)); data.setCauterized(cauterized); data.updateKey(renderItemKeyGlobalFlags); From eecaeb11551cb5f7661e829cd675aebd96004965 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Wed, 16 Oct 2019 04:02:05 -0700 Subject: [PATCH 058/121] FOund the issue why the skinning was incorrect, removed unecessary data structures in the newly added objects and renamed Deformer to SkinCluster and DYnamicTransform to SkinDeformer --- libraries/animation/src/AnimSkeleton.cpp | 4 +- libraries/fbx/src/FBXSerializer.cpp | 49 +++++++++-------- libraries/hfm/src/hfm/HFM.h | 27 +++++----- .../model-baker/src/model-baker/Baker.cpp | 14 ++--- .../src/model-baker/BuildGraphicsMeshTask.cpp | 22 ++++---- .../src/model-baker/BuildGraphicsMeshTask.h | 2 +- .../model-baker/CollectShapeVerticesTask.cpp | 20 +++---- .../model-baker/CollectShapeVerticesTask.h | 2 +- .../src/model-baker/ReweightDeformersTask.cpp | 52 +++++++++---------- .../src/model-baker/ReweightDeformersTask.h | 2 +- .../render-utils/src/CauterizedModel.cpp | 8 +-- libraries/render-utils/src/Model.cpp | 12 ++--- 12 files changed, 110 insertions(+), 104 deletions(-) diff --git a/libraries/animation/src/AnimSkeleton.cpp b/libraries/animation/src/AnimSkeleton.cpp index a68f5c869f..bae1fb5b69 100644 --- a/libraries/animation/src/AnimSkeleton.cpp +++ b/libraries/animation/src/AnimSkeleton.cpp @@ -30,8 +30,8 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { // we make a copy of the inverseBindMatrices in order to prevent mutating the model bind pose // when we are dealing with a joint offset in the model - for (int i = 0; i < (int)hfmModel.dynamicTransforms.size(); i++) { - const auto& defor = hfmModel.dynamicTransforms[i]; + for (int i = 0; i < (int)hfmModel.skinDeformers.size(); i++) { + const auto& defor = hfmModel.skinDeformers[i]; std::vector dummyClustersList; for (int j = 0; j < defor.clusters.size(); j++) { diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 8d2d1336a9..ac01d6f31a 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1593,9 +1593,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // whether we're skinned depends on how many clusters are attached if (clusterIDs.size() > 0) { - hfm::DynamicTransform dynamicTransform; - auto& clusters = dynamicTransform.clusters; - std::vector deformers; + hfm::SkinDeformer skinDeformer; + auto& clusters = skinDeformer.clusters; + std::vector skinClusters; for (const auto& clusterID : clusterIDs) { HFMCluster hfmCluster; const Cluster& fbxCluster = fbxClusters[clusterID]; @@ -1639,35 +1639,40 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const clusters.push_back(cluster); // Skinned mesh instances have a dynamic transform - dynamicTransform.deformers.reserve(clusterIDs.size()); - clusters.reserve(clusterIDs.size()); + skinDeformer.skinClusterIndices.reserve(clusterIDs.size()); for (const auto& clusterID : clusterIDs) { const Cluster& fbxCluster = fbxClusters[clusterID]; - dynamicTransform.deformers.emplace_back(); - deformers.emplace_back(); - hfm::Deformer& deformer = deformers.back(); + skinDeformer.skinClusterIndices.emplace_back(); + skinClusters.emplace_back(); + hfm::SkinCluster& skinCluster = skinClusters.back(); size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size()); - deformer.indices.reserve(indexWeightPairs); - deformer.weights.reserve(indexWeightPairs); - for (int i = 0; i < (int)indexWeightPairs; i++) { - int oldIndex = fbxCluster.indices[i]; - uint32_t newIndex = (uint32_t)extracted.newIndices.value(oldIndex); - deformer.indices.push_back(newIndex); - deformer.weights.push_back((float)fbxCluster.weights[i]); + skinCluster.indices.reserve(indexWeightPairs); + skinCluster.weights.reserve(indexWeightPairs); + + for (int j = 0; j < fbxCluster.indices.size(); j++) { + int oldIndex = fbxCluster.indices.at(j); + float weight = fbxCluster.weights.at(j); + for (QMultiHash::const_iterator it = extracted.newIndices.constFind(oldIndex); + it != extracted.newIndices.end() && it.key() == oldIndex; it++) { + int newIndex = it.value(); + + skinCluster.indices.push_back(newIndex); + skinCluster.weights.push_back(weight); + } } } // Store this model's deformers, this dynamic transform's deformer IDs - uint32_t deformerMinID = (uint32_t)hfmModel.deformers.size(); - hfmModel.deformers.insert(hfmModel.deformers.end(), deformers.cbegin(), deformers.cend()); - dynamicTransform.deformers.resize(deformers.size()); - std::iota(dynamicTransform.deformers.begin(), dynamicTransform.deformers.end(), deformerMinID); + uint32_t deformerMinID = (uint32_t)hfmModel.skinClusters.size(); + hfmModel.skinClusters.insert(hfmModel.skinClusters.end(), skinClusters.cbegin(), skinClusters.cend()); + skinDeformer.skinClusterIndices.resize(skinClusters.size()); + std::iota(skinDeformer.skinClusterIndices.begin(), skinDeformer.skinClusterIndices.end(), deformerMinID); // Store the model's dynamic transform, and put its ID in the shapes - hfmModel.dynamicTransforms.push_back(dynamicTransform); - uint32_t dynamicTransformID = (uint32_t)(hfmModel.dynamicTransforms.size() - 1); + hfmModel.skinDeformers.push_back(skinDeformer); + uint32_t skinDeformerID = (uint32_t)(hfmModel.skinDeformers.size() - 1); for (hfm::Shape& shape : partShapes) { - shape.dynamicTransform = dynamicTransformID; + shape.skinDeformer = skinDeformerID; } } else { // this is a no cluster mesh diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index ba7e90bd92..15ed876d94 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -242,17 +242,20 @@ public: QVector colors; QVector texCoords; QVector texCoords1; - QVector clusterIndices; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::deformers, hfm::Deformer) - QVector clusterWeights; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::deformers, hfm::Deformer) - QVector originalIndices; QVector clusters; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::clusters) - Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents) glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints) + // Skinning cluster attributes + QVector clusterIndices; + QVector clusterWeights; + + // Blendshape attributes QVector blendshapes; + + QVector originalIndices; // Original indices of the vertices unsigned int meshIndex; // the order the meshes appeared in the object file graphics::MeshPointer _mesh; @@ -294,18 +297,16 @@ public: }; // Formerly contained in hfm::Mesh -class Deformer { +class SkinCluster { public: std::vector indices; std::vector weights; }; -class DynamicTransform { +class SkinDeformer { public: - std::vector deformers; - std::vector clusters; // affect the deformer of the same index - std::vector blendshapes; - // There are also the meshExtents and modelTransform, which for now are left in hfm::Mesh + std::vector skinClusterIndices; + std::vector clusters; }; // The lightweight model part description. @@ -317,7 +318,7 @@ public: uint32_t joint { UNDEFINED_KEY }; // The hfm::Joint associated with this shape, containing transform information // TODO: Have all serializers calculate hfm::Shape::transformedExtents in world space where they previously calculated hfm::Mesh::meshExtents. Change all code that uses hfm::Mesh::meshExtents to use this instead. Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after transform information is applied, while not taking into account rigging/skinning - uint32_t dynamicTransform { UNDEFINED_KEY }; + uint32_t skinDeformer { UNDEFINED_KEY }; }; /// The runtime model format. @@ -334,9 +335,9 @@ public: std::vector meshes; std::vector materials; - std::vector deformers; - std::vector dynamicTransforms; + std::vector skinDeformers; + std::vector skinClusters; std::vector joints; QHash jointIndices; ///< 1-based, so as to more easily detect missing indices diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index ccb5e1816f..0b23c39aeb 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -29,7 +29,7 @@ namespace baker { class GetModelPartsTask { public: using Input = hfm::Model::Pointer; - using Output = VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, std::vector>; + using Output = VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, std::vector>; using JobModel = Job::ModelIO; void run(const BakeContextPointer& context, const Input& input, Output& output) { @@ -44,8 +44,8 @@ namespace baker { } output.edit4() = hfmModelIn->joints; output.edit5() = hfmModelIn->shapes; - output.edit6() = hfmModelIn->dynamicTransforms; - output.edit7() = hfmModelIn->deformers; + output.edit6() = hfmModelIn->skinDeformers; + output.edit7() = hfmModelIn->skinClusters; } }; @@ -143,7 +143,7 @@ namespace baker { const auto blendshapesPerMeshIn = modelPartsIn.getN(3); const auto jointsIn = modelPartsIn.getN(4); const auto shapesIn = modelPartsIn.getN(5); - const auto dynamicTransformsIn = modelPartsIn.getN(6); + const auto skinDeformersIn = modelPartsIn.getN(6); const auto deformersIn = modelPartsIn.getN(7); // Calculate normals and tangents for meshes and blendshapes if they do not exist @@ -158,14 +158,14 @@ namespace baker { // Skinning weight calculations // NOTE: Due to limitations in the current graphics::MeshPointer representation, the output list of ReweightedDeformers is per-mesh. An element is empty if there are no deformers for the mesh of the same index. - const auto reweightDeformersInputs = ReweightDeformersTask::Input(meshesIn, shapesIn, dynamicTransformsIn, deformersIn).asVarying(); + const auto reweightDeformersInputs = ReweightDeformersTask::Input(meshesIn, shapesIn, skinDeformersIn, deformersIn).asVarying(); const auto reweightedDeformers = model.addJob("ReweightDeformers", reweightDeformersInputs); // Shape vertices are included/rejected based on skinning weight, and thus must use the reweighted deformers. - const auto collectShapeVerticesInputs = CollectShapeVerticesTask::Input(meshesIn, shapesIn, jointsIn, dynamicTransformsIn, reweightedDeformers).asVarying(); + const auto collectShapeVerticesInputs = CollectShapeVerticesTask::Input(meshesIn, shapesIn, jointsIn, skinDeformersIn, reweightedDeformers).asVarying(); const auto shapeVerticesPerJoint = model.addJob("CollectShapeVertices", collectShapeVerticesInputs); // Build the graphics::MeshPointer for each hfm::Mesh - const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, dynamicTransformsIn, reweightedDeformers).asVarying(); + const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, skinDeformersIn, reweightedDeformers).asVarying(); const auto graphicsMeshes = model.addJob("BuildGraphicsMesh", buildGraphicsMeshInputs); // Prepare joint information diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index 7fefe614d4..88546e0975 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -381,16 +381,16 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const const auto& normalsPerMesh = input.get3(); const auto& tangentsPerMesh = input.get4(); const auto& shapes = input.get5(); - const auto& dynamicTransforms = input.get6(); + const auto& skinDeformers = input.get6(); const auto& reweightedDeformersPerMesh = input.get7(); - // Currently, there is only (at most) one dynamicTransform per mesh - // An undefined shape.dynamicTransform has the value hfm::UNDEFINED_KEY - std::vector dynamicTransformPerMesh; - dynamicTransformPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY); + // Currently, there is only (at most) one skinDeformer per mesh + // An undefined shape.skinDeformer has the value hfm::UNDEFINED_KEY + std::vector skinDeformerPerMesh; + skinDeformerPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY); for (const auto& shape : shapes) { - uint32_t dynamicTransformIndex = shape.dynamicTransform; - dynamicTransformPerMesh[shape.mesh] = dynamicTransformIndex; + uint32_t skinDeformerIndex = shape.skinDeformer; + skinDeformerPerMesh[shape.mesh] = skinDeformerIndex; } auto& graphicsMeshes = output; @@ -403,10 +403,10 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const uint16_t numDeformerControllers = 0; if (reweightedDeformers.weightsPerVertex != 0) { - uint32_t dynamicTransformIndex = dynamicTransformPerMesh[i]; - if (dynamicTransformIndex != hfm::UNDEFINED_KEY) { - const hfm::DynamicTransform& dynamicTransform = dynamicTransforms[dynamicTransformIndex]; - numDeformerControllers = (uint16_t)dynamicTransform.deformers.size(); + uint32_t skinDeformerIndex = skinDeformerPerMesh[i]; + if (skinDeformerIndex != hfm::UNDEFINED_KEY) { + const hfm::SkinDeformer& skinDeformer = skinDeformers[skinDeformerIndex]; + numDeformerControllers = (uint16_t)skinDeformer.skinClusterIndices.size(); } } diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h index 1bb9b9be0c..b60f6f7a43 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h @@ -20,7 +20,7 @@ class BuildGraphicsMeshTask { public: - using Input = baker::VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector, std::vector, std::vector>; + using Input = baker::VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector, std::vector, std::vector>; using Output = std::vector; using JobModel = baker::Job::ModelIO; diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp index 36c2aa04a6..e597bbf507 100644 --- a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp @@ -13,15 +13,15 @@ #include -// Used to track and avoid duplicate shape vertices, as multiple shapes can have the same mesh and dynamicTransform +// Used to track and avoid duplicate shape vertices, as multiple shapes can have the same mesh and skinDeformer class VertexSource { public: uint32_t mesh; - uint32_t dynamicTransform; + uint32_t skinDeformer; bool operator==(const VertexSource& other) const { return mesh == other.mesh && - dynamicTransform == other.dynamicTransform; + skinDeformer == other.skinDeformer; } }; @@ -29,7 +29,7 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con const auto& meshes = input.get0(); const auto& shapes = input.get1(); const auto& joints = input.get2(); - const auto& dynamicTransforms = input.get3(); + const auto& skinDeformers = input.get3(); const auto& reweightedDeformers = input.get4(); auto& shapeVerticesPerJoint = output; @@ -38,18 +38,18 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con vertexSourcesPerJoint.resize(joints.size()); for (size_t i = 0; i < shapes.size(); ++i) { const auto& shape = shapes[i]; - const uint32_t dynamicTransformKey = shape.dynamicTransform; - if (dynamicTransformKey == hfm::UNDEFINED_KEY) { + const uint32_t skinDeformerKey = shape.skinDeformer; + if (skinDeformerKey == hfm::UNDEFINED_KEY) { continue; } VertexSource vertexSource; vertexSource.mesh = shape.mesh; - vertexSource.dynamicTransform = dynamicTransformKey; + vertexSource.skinDeformer = skinDeformerKey; - const auto& dynamicTransform = dynamicTransforms[dynamicTransformKey]; - for (size_t j = 0; j < dynamicTransform.clusters.size(); ++j) { - const auto& cluster = dynamicTransform.clusters[j]; + const auto& skinDeformer = skinDeformers[skinDeformerKey]; + for (size_t j = 0; j < skinDeformer.clusters.size(); ++j) { + const auto& cluster = skinDeformer.clusters[j]; const uint32_t jointIndex = cluster.jointIndex; auto& vertexSources = vertexSourcesPerJoint[jointIndex]; diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h index 3111dcadc1..f14c440f2f 100644 --- a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h @@ -19,7 +19,7 @@ class CollectShapeVerticesTask { public: - using Input = baker::VaryingSet5, std::vector, std::vector, std::vector, std::vector>; + using Input = baker::VaryingSet5, std::vector, std::vector, std::vector, std::vector>; using Output = std::vector; using JobModel = baker::Job::ModelIO; diff --git a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp index 097833e110..f210a5dd6f 100644 --- a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp +++ b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp @@ -11,30 +11,30 @@ #include "ReweightDeformersTask.h" -baker::ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const std::vector deformers, const uint16_t weightsPerVertex) { +baker::ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const std::vector skinClusters, const uint16_t weightsPerVertex) { baker::ReweightedDeformers reweightedDeformers; - if (deformers.size() == 0) { + if (skinClusters.size() == 0) { return reweightedDeformers; } size_t numClusterIndices = numMeshVertices * weightsPerVertex; reweightedDeformers.weightsPerVertex = weightsPerVertex; // TODO: Consider having a rootCluster property in the DynamicTransform rather than appending the root to the end of the cluster list. - reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(deformers.size() - 1)); + reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(skinClusters.size() - 1)); reweightedDeformers.weights.resize(numClusterIndices, 0); std::vector weightAccumulators; weightAccumulators.resize(numClusterIndices, 0.0f); - for (uint16_t i = 0; i < (uint16_t)deformers.size(); ++i) { - const hfm::Deformer& deformer = *deformers[i]; + for (uint16_t i = 0; i < (uint16_t)skinClusters.size(); ++i) { + const hfm::SkinCluster& skinCluster = *skinClusters[i]; - if (deformer.indices.size() != deformer.weights.size()) { + if (skinCluster.indices.size() != skinCluster.weights.size()) { reweightedDeformers.trimmedToMatch = true; } - size_t numIndicesOrWeights = std::min(deformer.indices.size(), deformer.weights.size()); + size_t numIndicesOrWeights = std::min(skinCluster.indices.size(), skinCluster.weights.size()); for (size_t j = 0; j < numIndicesOrWeights; ++j) { - uint32_t index = deformer.indices[j]; - float weight = deformer.weights[j]; + uint32_t index = skinCluster.indices[j]; + float weight = skinCluster.weights[j]; // look for an unused slot in the weights vector uint32_t weightIndex = index * weightsPerVertex; @@ -90,34 +90,34 @@ void ReweightDeformersTask::run(const baker::BakeContextPointer& context, const const auto& meshes = input.get0(); const auto& shapes = input.get1(); - const auto& dynamicTransforms = input.get2(); - const auto& deformers = input.get3(); + const auto& skinDeformers = input.get2(); + const auto& skinClusters = input.get3(); auto& reweightedDeformers = output; - // Currently, there is only (at most) one dynamicTransform per mesh - // An undefined shape.dynamicTransform has the value hfm::UNDEFINED_KEY - std::vector dynamicTransformPerMesh; - dynamicTransformPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY); + // Currently, there is only (at most) one skinDeformer per mesh + // An undefined shape.skinDeformer has the value hfm::UNDEFINED_KEY + std::vector skinDeformerPerMesh; + skinDeformerPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY); for (const auto& shape : shapes) { - uint32_t dynamicTransformIndex = shape.dynamicTransform; - dynamicTransformPerMesh[shape.mesh] = dynamicTransformIndex; + uint32_t skinDeformerIndex = shape.skinDeformer; + skinDeformerPerMesh[shape.mesh] = skinDeformerIndex; } reweightedDeformers.reserve(meshes.size()); for (size_t i = 0; i < meshes.size(); ++i) { const auto& mesh = meshes[i]; - uint32_t dynamicTransformIndex = dynamicTransformPerMesh[i]; + uint32_t skinDeformerIndex = skinDeformerPerMesh[i]; - const hfm::DynamicTransform* dynamicTransform = nullptr; - std::vector meshDeformers; - if (dynamicTransformIndex != hfm::UNDEFINED_KEY) { - dynamicTransform = &dynamicTransforms[dynamicTransformIndex]; - for (const auto& deformerIndex : dynamicTransform->deformers) { - const auto& deformer = deformers[deformerIndex]; - meshDeformers.push_back(&deformer); + const hfm::SkinDeformer* skinDeformer = nullptr; + std::vector meshSkinClusters; + if (skinDeformerIndex != hfm::UNDEFINED_KEY) { + skinDeformer = &skinDeformers[skinDeformerIndex]; + for (const auto& skinClusterIndex : skinDeformer->skinClusterIndices) { + const auto& skinCluster = skinClusters[skinClusterIndex]; + meshSkinClusters.push_back(&skinCluster); } } - reweightedDeformers.push_back(getReweightedDeformers((size_t)mesh.vertices.size(), meshDeformers, NUM_WEIGHTS_PER_VERTEX)); + reweightedDeformers.push_back(getReweightedDeformers((size_t)mesh.vertices.size(), meshSkinClusters, NUM_WEIGHTS_PER_VERTEX)); } } diff --git a/libraries/model-baker/src/model-baker/ReweightDeformersTask.h b/libraries/model-baker/src/model-baker/ReweightDeformersTask.h index 98befa8000..c40ad4c1b4 100644 --- a/libraries/model-baker/src/model-baker/ReweightDeformersTask.h +++ b/libraries/model-baker/src/model-baker/ReweightDeformersTask.h @@ -19,7 +19,7 @@ class ReweightDeformersTask { public: - using Input = baker::VaryingSet4, std::vector, std::vector, std::vector>; + using Input = baker::VaryingSet4, std::vector, std::vector, std::vector>; using Output = std::vector; using JobModel = baker::Job::ModelIO; diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 7d94cd61a5..cbd608e092 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -33,7 +33,7 @@ bool CauterizedModel::updateGeometry() { if (_isCauterized && needsFullUpdate) { assert(_cauterizeMeshStates.empty()); /* const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + const auto& hfmDynamicTransforms = hfmModel.skinDeformers; for (int i = 0; i < hfmDynamicTransforms.size(); i++) { const auto& dynT = hfmDynamicTransforms[i]; MeshState state; @@ -47,7 +47,7 @@ bool CauterizedModel::updateGeometry() { }*/ const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + const auto& hfmDynamicTransforms = hfmModel.skinDeformers; int i = 0; /* for (const auto& mesh: hfmModel.meshes) { MeshState state; @@ -113,7 +113,7 @@ void CauterizedModel::createRenderItemSet() { auto material = getNetworkModel()->getShapeMaterial(shapeID); _modelMeshMaterialNames.push_back(material ? material->getName() : ""); - _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.dynamicTransform }); + _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.skinDeformer }); } /* int shapeID = 0; @@ -153,7 +153,7 @@ void CauterizedModel::updateClusterMatrices() { const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + const auto& hfmDynamicTransforms = hfmModel.skinDeformers; for (int i = 0; i < (int)_meshStates.size(); i++) { MeshState& state = _meshStates[i]; const auto& deformer = hfmDynamicTransforms[i]; diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 1e258a8dd1..d3766fa65c 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -336,7 +336,7 @@ bool Model::updateGeometry() { updateShapeStatesFromRig(); const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + const auto& hfmSkinDeformers = hfmModel.skinDeformers; int i = 0; /* for (const auto& mesh: hfmModel.meshes) { MeshState state; @@ -346,8 +346,8 @@ bool Model::updateGeometry() { i++; } */ - for (int i = 0; i < hfmDynamicTransforms.size(); i++) { - const auto& dynT = hfmDynamicTransforms[i]; + for (int i = 0; i < hfmSkinDeformers.size(); i++) { + const auto& dynT = hfmSkinDeformers[i]; MeshState state; state.clusterDualQuaternions.resize(dynT.clusters.size()); state.clusterMatrices.resize(dynT.clusters.size()); @@ -1427,10 +1427,10 @@ void Model::updateClusterMatrices() { _needsUpdateClusterMatrices = false; const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.dynamicTransforms; + const auto& hfmSkinDeformers = hfmModel.skinDeformers; for (int i = 0; i < (int) _meshStates.size(); i++) { MeshState& state = _meshStates[i]; - const auto& deformer = hfmDynamicTransforms[i]; + const auto& deformer = hfmSkinDeformers[i]; int meshIndex = i; int clusterIndex = 0; @@ -1545,7 +1545,7 @@ void Model::createRenderItemSet() { auto material = getNetworkModel()->getShapeMaterial(shapeID); _modelMeshMaterialNames.push_back(material ? material->getName() : ""); - _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.dynamicTransform }); + _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.skinDeformer }); } /* uint32_t numMeshes = (uint32_t)meshes.size(); From 0a9c38964153e005ee99fcadbaefeb0338365798 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Wed, 16 Oct 2019 08:06:35 -0700 Subject: [PATCH 059/121] last issue --- libraries/render-utils/src/MeshPartPayload.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index 5dc39d8674..3f25d2ef80 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -223,7 +223,7 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in renderTransform = transform.worldTransform(shapeState._rootFromJointTransform); updateTransform(renderTransform); - _deformerIndex = shape.dynamicTransform; + _deformerIndex = shape.skinDeformer; initCache(model); From acbdd2b3b57d37986a732929a411e44d5024501d Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 16 Oct 2019 17:04:19 -0700 Subject: [PATCH 060/121] Fix not building due to removed shape.transform --- libraries/fbx/src/GLTFSerializer.cpp | 2 +- libraries/hfm/src/hfm/HFMModelMath.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 9fb00d3145..db272a534c 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1524,7 +1524,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& hfmModel.shapes.push_back(templateShape); auto& hfmShape = hfmModel.shapes.back(); // Everything else is already defined (mesh, meshPart, material), so just define the new transform - hfmShape.transform = nodeIndex; + hfmShape.joint = nodeIndex; } } diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index e6ba042e9c..de308297c4 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -40,7 +40,7 @@ void calculateExtentsForShape(hfm::Shape& shape, const std::vector& m const auto& mesh = meshes[shape.mesh]; const auto& meshPart = mesh.parts[shape.meshPart]; - glm::mat4 globalTransform = joints[shape.transform].globalTransform; + glm::mat4 globalTransform = joints[shape.joint].globalTransform; forEachIndex(meshPart, [&](int32_t idx){ if (mesh.vertices.size() <= idx) { return; From 9159258b503f7970b0fa05dea0053c1a61eeefde Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Wed, 16 Oct 2019 17:42:36 -0700 Subject: [PATCH 061/121] clean ups --- .../render-utils/src/CauterizedModel.cpp | 145 +++--------------- libraries/render-utils/src/Model.cpp | 69 +-------- 2 files changed, 33 insertions(+), 181 deletions(-) diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index cbd608e092..717d3cada7 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -32,31 +32,9 @@ bool CauterizedModel::updateGeometry() { bool needsFullUpdate = Model::updateGeometry(); if (_isCauterized && needsFullUpdate) { assert(_cauterizeMeshStates.empty()); - /* const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.skinDeformers; - for (int i = 0; i < hfmDynamicTransforms.size(); i++) { - const auto& dynT = hfmDynamicTransforms[i]; - MeshState state; - if (_useDualQuaternionSkinning) { - state.clusterDualQuaternions.resize(dynT.clusters.size()); - } else { - state.clusterMatrices.resize(dynT.clusters.size()); - } - _cauterizeMeshStates.append(state); - _meshStates.push_back(state); - }*/ const HFMModel& hfmModel = getHFMModel(); const auto& hfmDynamicTransforms = hfmModel.skinDeformers; - int i = 0; - /* for (const auto& mesh: hfmModel.meshes) { - MeshState state; - state.clusterDualQuaternions.resize(mesh.clusters.size()); - state.clusterMatrices.resize(mesh.clusters.size()); - _meshStates.push_back(state); - i++; - } - */ for (int i = 0; i < hfmDynamicTransforms.size(); i++) { const auto& dynT = hfmDynamicTransforms[i]; MeshState state; @@ -64,17 +42,6 @@ bool CauterizedModel::updateGeometry() { state.clusterMatrices.resize(dynT.clusters.size()); _cauterizeMeshStates.push_back(state); } - - /* foreach (const HFMMesh& mesh, hfmModel.meshes) { - Model::MeshState state; - if (_useDualQuaternionSkinning) { - state.clusterDualQuaternions.resize(mesh.clusters.size()); - _cauterizeMeshStates.append(state); - } else { - state.clusterMatrices.resize(mesh.clusters.size()); - _cauterizeMeshStates.append(state); - } - }*/ } return needsFullUpdate; } @@ -103,7 +70,6 @@ void CauterizedModel::createRenderItemSet() { Transform::mult(transform, transform, offset); // Run through all of the meshes, and place them into their segregated, but unsorted buckets - // Run through all of the meshes, and place them into their segregated, but unsorted buckets int shapeID = 0; const auto& shapes = _renderGeometry->getHFMModel().shapes; for (shapeID; shapeID < shapes.size(); shapeID++) { @@ -115,26 +81,6 @@ void CauterizedModel::createRenderItemSet() { _modelMeshMaterialNames.push_back(material ? material->getName() : ""); _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.skinDeformer }); } - -/* int shapeID = 0; - uint32_t numMeshes = (uint32_t)meshes.size(); - for (uint32_t i = 0; i < numMeshes; i++) { - const auto& mesh = meshes.at(i); - if (!mesh) { - continue; - } - - // Create the render payloads - int numParts = (int)mesh->getNumParts(); - for (int partIndex = 0; partIndex < numParts; partIndex++) { - auto ptr = std::make_shared(shared_from_this(), i, partIndex, shapeID, transform); - _modelMeshRenderItems << std::static_pointer_cast(ptr); - auto material = getNetworkModel()->getShapeMaterial(shapeID); - _modelMeshMaterialNames.push_back(material ? material->getName() : ""); - _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i }); - shapeID++; - } - }*/ } else { Model::createRenderItemSet(); } @@ -153,17 +99,13 @@ void CauterizedModel::updateClusterMatrices() { const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.skinDeformers; - for (int i = 0; i < (int)_meshStates.size(); i++) { - MeshState& state = _meshStates[i]; - const auto& deformer = hfmDynamicTransforms[i]; + const auto& hfmSkinDeformers = hfmModel.skinDeformers; + for (int meshIndex = 0; meshIndex < (int)_meshStates.size(); meshIndex++) { + MeshState& state = _meshStates[meshIndex]; + const auto& deformer = hfmSkinDeformers[meshIndex]; - int meshIndex = i; - int clusterIndex = 0; - - for (int d = 0; d < deformer.clusters.size(); d++) { - const auto& cluster = deformer.clusters[d]; - clusterIndex = d; + for (int clusterIndex = 0; clusterIndex < deformer.clusters.size(); clusterIndex++) { + const auto& cluster = deformer.clusters[clusterIndex]; const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex); @@ -172,40 +114,14 @@ void CauterizedModel::updateClusterMatrices() { Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); Transform clusterTransform; Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); - state.clusterDualQuaternions[d] = Model::TransformDualQuaternion(clusterTransform); - } - else { - auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); - glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[d]); - } - - } - } -/* - const HFMModel& hfmModel = getHFMModel(); - for (int i = 0; i < (int)_meshStates.size(); i++) { - Model::MeshState& state = _meshStates[i]; - const HFMMesh& mesh = hfmModel.meshes.at(i); - int meshIndex = i; - - for (int j = 0; j < mesh.clusters.size(); j++) { - const HFMCluster& cluster = mesh.clusters.at(j); - int clusterIndex = j; - - if (_useDualQuaternionSkinning) { - auto jointPose = _rig.getJointPose(cluster.jointIndex); - Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); - Transform clusterTransform; - Transform::mult(clusterTransform, jointTransform, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindTransform); - state.clusterDualQuaternions[j] = Model::TransformDualQuaternion(clusterTransform); - state.clusterDualQuaternions[j].setCauterizationParameters(0.0f, jointPose.trans()); + state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); } else { auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); - glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]); + glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } } } -*/ + // as an optimization, don't build cautrizedClusterMatrices if the boneSet is empty. if (!_cauterizeBoneSet.empty()) { @@ -219,32 +135,32 @@ void CauterizedModel::updateClusterMatrices() { glm::vec4(0.0f, 0.0f, 0.0f, 1.0f)); auto cauterizeMatrix = _rig.getJointTransform(_rig.indexOfJoint("Neck")) * zeroScale; - for (int i = 0; i < _cauterizeMeshStates.size(); i++) { - Model::MeshState& state = _cauterizeMeshStates[i]; - const HFMMesh& mesh = hfmModel.meshes.at(i); - int meshIndex = i; + for (int meshIndex = 0; meshIndex < _cauterizeMeshStates.size(); meshIndex++) { + Model::MeshState& state = _cauterizeMeshStates[meshIndex]; + const auto& deformer = hfmSkinDeformers[meshIndex]; - for (int j = 0; j < mesh.clusters.size(); j++) { - const HFMCluster& cluster = mesh.clusters.at(j); - int clusterIndex = j; + for (int clusterIndex = 0; clusterIndex < deformer.clusters.size(); clusterIndex++) { + const auto& cluster = deformer.clusters[clusterIndex]; + + const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex); if (_useDualQuaternionSkinning) { if (_cauterizeBoneSet.find(cluster.jointIndex) == _cauterizeBoneSet.end()) { // not cauterized so just copy the value from the non-cauterized version. - state.clusterDualQuaternions[j] = _meshStates[i].clusterDualQuaternions[j]; + state.clusterDualQuaternions[clusterIndex] = _meshStates[meshIndex].clusterDualQuaternions[clusterIndex]; } else { Transform jointTransform(cauterizePose.rot(), cauterizePose.scale(), cauterizePose.trans()); Transform clusterTransform; - Transform::mult(clusterTransform, jointTransform, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindTransform); - state.clusterDualQuaternions[j] = Model::TransformDualQuaternion(clusterTransform); - state.clusterDualQuaternions[j].setCauterizationParameters(1.0f, cauterizePose.trans()); + Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); + state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); + state.clusterDualQuaternions[clusterIndex].setCauterizationParameters(1.0f, cauterizePose.trans()); } } else { if (_cauterizeBoneSet.find(cluster.jointIndex) == _cauterizeBoneSet.end()) { // not cauterized so just copy the value from the non-cauterized version. - state.clusterMatrices[j] = _meshStates[i].clusterMatrices[j]; + state.clusterMatrices[clusterIndex] = _meshStates[meshIndex].clusterMatrices[clusterIndex]; } else { - glm_mat4u_mul(cauterizeMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]); + glm_mat4u_mul(cauterizeMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } } } @@ -302,18 +218,9 @@ void CauterizedModel::updateRenderItems() { auto deformerIndex = self->_modelMeshRenderItemShapes[i].deformerIndex; bool isDeformed = (deformerIndex != hfm::UNDEFINED_KEY); - - // auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; - // auto deformerIndex = self->_modelMeshRenderItemShapes[i].meshIndex; - - // const auto& shapeState = self->getShapeState(i); - - bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); - - if (isDeformed) { const auto& meshState = self->getMeshState(deformerIndex); @@ -324,12 +231,10 @@ void CauterizedModel::updateRenderItems() { primitiveMode, renderItemKeyGlobalFlags, enableCauterization](ModelMeshPartPayload& mmppData) { CauterizedMeshPartPayload& data = static_cast(mmppData); if (useDualQuaternionSkinning) { - data.updateClusterBuffer(meshState.clusterDualQuaternions, - cauterizedMeshState.clusterDualQuaternions); + data.updateClusterBuffer(meshState.clusterDualQuaternions, cauterizedMeshState.clusterDualQuaternions); } else { - data.updateClusterBuffer(meshState.clusterMatrices, - cauterizedMeshState.clusterMatrices); - } + data.updateClusterBuffer(meshState.clusterMatrices, cauterizedMeshState.clusterMatrices); + } Transform renderTransform = modelTransform; // if (meshState.clusterMatrices.size() <= 2) { diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index d3766fa65c..6df3c76a67 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -270,9 +270,7 @@ void Model::updateRenderItems() { transaction.updateItem(itemID, [modelTransform, shapeState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags](ModelMeshPartPayload& data) { Transform renderTransform = modelTransform; - // if (meshState.clusterMatrices.size() <= 1) { renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); - // } data.updateTransform(renderTransform); data.updateKey(renderItemKeyGlobalFlags); @@ -337,15 +335,6 @@ bool Model::updateGeometry() { const HFMModel& hfmModel = getHFMModel(); const auto& hfmSkinDeformers = hfmModel.skinDeformers; - int i = 0; - /* for (const auto& mesh: hfmModel.meshes) { - MeshState state; - state.clusterDualQuaternions.resize(mesh.clusters.size()); - state.clusterMatrices.resize(mesh.clusters.size()); - _meshStates.push_back(state); - i++; - } - */ for (int i = 0; i < hfmSkinDeformers.size(); i++) { const auto& dynT = hfmSkinDeformers[i]; MeshState state; @@ -1428,16 +1417,12 @@ void Model::updateClusterMatrices() { _needsUpdateClusterMatrices = false; const HFMModel& hfmModel = getHFMModel(); const auto& hfmSkinDeformers = hfmModel.skinDeformers; - for (int i = 0; i < (int) _meshStates.size(); i++) { - MeshState& state = _meshStates[i]; - const auto& deformer = hfmSkinDeformers[i]; + for (int meshIndex = 0; meshIndex < (int) _meshStates.size(); meshIndex++) { + MeshState& state = _meshStates[meshIndex]; + const auto& deformer = hfmSkinDeformers[meshIndex]; - int meshIndex = i; - int clusterIndex = 0; - - for (int d = 0; d < deformer.clusters.size(); d++) { - const auto& cluster = deformer.clusters[d]; - clusterIndex = d; + for (int clusterIndex = 0; clusterIndex < deformer.clusters.size(); clusterIndex++) { + const auto& cluster = deformer.clusters[clusterIndex]; const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex); @@ -1446,32 +1431,12 @@ void Model::updateClusterMatrices() { Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); Transform clusterTransform; Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); - state.clusterDualQuaternions[d] = Model::TransformDualQuaternion(clusterTransform); - } - else { - auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); - glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[d]); - } - - } -/* - int meshIndex = i; - const HFMMesh& mesh = hfmModel.meshes.at(i); - for (int j = 0; j < mesh.clusters.size(); j++) { - const HFMCluster& cluster = mesh.clusters.at(j); - int clusterIndex = j; - - if (_useDualQuaternionSkinning) { - auto jointPose = _rig.getJointPose(cluster.jointIndex); - Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); - Transform clusterTransform; - Transform::mult(clusterTransform, jointTransform, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindTransform); - state.clusterDualQuaternions[j] = Model::TransformDualQuaternion(clusterTransform); + state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); } else { auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); - glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]); + glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } - }*/ + } } // post the blender if we're not currently waiting for one to finish @@ -1547,24 +1512,6 @@ void Model::createRenderItemSet() { _modelMeshMaterialNames.push_back(material ? material->getName() : ""); _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.skinDeformer }); } -/* - uint32_t numMeshes = (uint32_t)meshes.size(); - for (uint32_t i = 0; i < numMeshes; i++) { - const auto& mesh = meshes.at(i); - if (!mesh) { - continue; - } - - // Create the render payloads - int numParts = (int)mesh->getNumParts(); - for (int partIndex = 0; partIndex < numParts; partIndex++) { - _modelMeshRenderItems << std::make_shared(shared_from_this(), i, partIndex, shapeID, transform); - auto material = getNetworkModel()->getShapeMaterial(shapeID); - _modelMeshMaterialNames.push_back(material ? material->getName() : ""); - _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i }); - shapeID++; - } - }*/ } bool Model::isRenderable() const { From 55ed7d9a5312c4c2163528411d0e801b419b962e Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Thu, 17 Oct 2019 14:53:32 -0700 Subject: [PATCH 062/121] Add support for shape generation for obj serializer --- libraries/fbx/src/OBJSerializer.cpp | 43 ++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index e1fc85ca2a..0c795d3a94 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -678,14 +678,17 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V hfmModel.meshExtents.reset(); hfmModel.meshes.push_back(HFMMesh()); + std::vector materialNamePerShape; try { // call parseOBJGroup as long as it's returning true. Each successful call will // add a new meshPart to the model's single mesh. while (parseOBJGroup(tokenizer, mapping, hfmModel, scaleGuess, combineParts)) {} - HFMMesh& mesh = hfmModel.meshes[0]; - mesh.meshIndex = 0; + uint32_t meshIndex = 0; + HFMMesh& mesh = hfmModel.meshes[meshIndex]; + mesh.meshIndex = meshIndex; + uint32_t jointIndex = 0; hfmModel.joints.resize(1); hfmModel.joints[0].parentIndex = -1; hfmModel.joints[0].distanceToParent = 0; @@ -697,14 +700,6 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V hfmModel.jointIndices["x"] = 1; - HFMCluster cluster; - cluster.jointIndex = 0; - cluster.inverseBindMatrix = glm::mat4(1, 0, 0, 0, - 0, 1, 0, 0, - 0, 0, 1, 0, - 0, 0, 0, 1); - mesh.clusters.append(cluster); - QMap materialMeshIdMap; std::vector hfmMeshParts; for (uint32_t i = 0, meshPartCount = 0; i < (uint32_t)mesh.parts.size(); i++, meshPartCount++) { @@ -718,12 +713,13 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V // Create a new HFMMesh for this material mapping. materialMeshIdMap.insert(face.materialName, materialMeshIdMap.count()); + uint32_t partIndex = (int)hfmMeshParts.size(); hfmMeshParts.push_back(HFMMeshPart()); HFMMeshPart& meshPartNew = hfmMeshParts.back(); meshPartNew.quadIndices = QVector(meshPart.quadIndices); // Copy over quad indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway]. meshPartNew.quadTrianglesIndices = QVector(meshPart.quadTrianglesIndices); // Copy over quad triangulated indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway]. meshPartNew.triangleIndices = QVector(meshPart.triangleIndices); // Copy over triangle indices. - + // Do some of the material logic (which previously lived below) now. // All the faces in the same group will have the same name and material. QString groupMaterialName = face.materialName; @@ -745,8 +741,15 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V needsMaterialLibrary = groupMaterialName != SMART_DEFAULT_MATERIAL_NAME; } materials[groupMaterialName] = material; - meshPartNew.materialID = groupMaterialName; + materialNamePerShape.push_back(groupMaterialName); } + + + hfm::Shape shape; + shape.mesh = meshIndex; + shape.joint = jointIndex; + shape.meshPart = partIndex; + hfmModel.shapes.push_back(shape); } } } @@ -829,12 +832,15 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V mesh.meshExtents.addPoint(vertex); hfmModel.meshExtents.addPoint(vertex); } - + // hfmDebugDump(hfmModel); } catch(const std::exception& e) { qCDebug(modelformat) << "OBJSerializer fail: " << e.what(); } + // At this point, the hfmModel joint, mesh, parts and shpaes have been defined + // only no material assigned + QString queryPart = _url.query(); bool suppressMaterialsHack = queryPart.contains("hifiusemat"); // If this appears in query string, don't fetch mtl even if used. OBJMaterial& preDefinedMaterial = materials[SMART_DEFAULT_MATERIAL_NAME]; @@ -886,6 +892,8 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V } } + // As we are populating the material list in the hfmModel, let s also create the reverse map (from materialName to index) + QMap materialNameToIndex; foreach (QString materialID, materials.keys()) { OBJMaterial& objMaterial = materials[materialID]; if (!objMaterial.used) { @@ -898,6 +906,7 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V objMaterial.shininess, objMaterial.opacity); HFMMaterial& hfmMaterial = hfmModel.materials.back(); + materialNameToIndex[materialID] = hfmModel.materials.size(); hfmMaterial.name = materialID; hfmMaterial.materialID = materialID; @@ -997,6 +1006,14 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V modelMaterial->setOpacity(hfmMaterial.opacity); } + // GO over the shapes once more to assign hte material index correctly + for (int i = 0; i < hfmModel.shapes.size(); ++i) { + auto foundMaterialIndex = materialNameToIndex.find(materialNamePerShape[i]); + if (foundMaterialIndex != materialNameToIndex.end()) { + hfmModel.shapes[i].material = foundMaterialIndex.value(); + } + } + return hfmModelPtr; } From bd57cc7b1499c0541fa057f649a6fec057503ce4 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Thu, 17 Oct 2019 15:56:41 -0700 Subject: [PATCH 063/121] Fixing bad indexing --- libraries/fbx/src/OBJSerializer.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 0c795d3a94..8384a3e4b8 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -899,6 +899,10 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V if (!objMaterial.used) { continue; } + qCDebug(modelformat) << "OBJSerializer Material Name:" << materialID; + + // capture the name to index map + materialNameToIndex[materialID] = hfmModel.materials.size(); hfmModel.materials.emplace_back(objMaterial.diffuseColor, objMaterial.specularColor, @@ -906,7 +910,6 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V objMaterial.shininess, objMaterial.opacity); HFMMaterial& hfmMaterial = hfmModel.materials.back(); - materialNameToIndex[materialID] = hfmModel.materials.size(); hfmMaterial.name = materialID; hfmMaterial.materialID = materialID; From c89e682b77279d416ab4ce13939b05ac50a38089 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Thu, 17 Oct 2019 16:24:21 -0700 Subject: [PATCH 064/121] address warning --- libraries/fbx/src/OBJSerializer.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 8384a3e4b8..4361a71c8c 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -899,10 +899,9 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V if (!objMaterial.used) { continue; } - qCDebug(modelformat) << "OBJSerializer Material Name:" << materialID; // capture the name to index map - materialNameToIndex[materialID] = hfmModel.materials.size(); + materialNameToIndex[materialID] = (uint32_t) hfmModel.materials.size(); hfmModel.materials.emplace_back(objMaterial.diffuseColor, objMaterial.specularColor, From 695b9cdba64e20f739050da8afd76e98fb2fd874 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Fri, 18 Oct 2019 03:31:06 -0700 Subject: [PATCH 065/121] Clening up left over comments and typos, simplify the code to update Matrices in the model classes and removed renderITemsIdMap because not needed, chasing the problem of the physics objects transform --- libraries/animation/src/AnimSkeleton.cpp | 30 ----- libraries/animation/src/AnimSkeleton.h | 4 +- .../src/RenderableModelEntityItem.cpp | 4 +- libraries/hfm/src/hfm/HFM.h | 1 + .../render-utils/src/CauterizedModel.cpp | 87 ++++++-------- libraries/render-utils/src/CauterizedModel.h | 2 +- libraries/render-utils/src/Model.cpp | 107 ++++++++---------- libraries/render-utils/src/Model.h | 28 +++-- .../render-utils/src/SoftAttachmentModel.cpp | 38 +++---- tools/vhacd-util/src/VHACDUtil.cpp | 2 +- 10 files changed, 125 insertions(+), 178 deletions(-) diff --git a/libraries/animation/src/AnimSkeleton.cpp b/libraries/animation/src/AnimSkeleton.cpp index bae1fb5b69..9a27ba766a 100644 --- a/libraries/animation/src/AnimSkeleton.cpp +++ b/libraries/animation/src/AnimSkeleton.cpp @@ -35,7 +35,6 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { std::vector dummyClustersList; for (int j = 0; j < defor.clusters.size(); j++) { - std::vector bindMatrices; // cast into a non-const reference, so we can mutate the FBXCluster HFMCluster& cluster = const_cast(defor.clusters.at(j)); @@ -55,35 +54,6 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { } _clusterBindMatrixOriginalValues.push_back(dummyClustersList); } - - -/* - for (int i = 0; i < (int)hfmModel.meshes.size(); i++) { - const HFMMesh& mesh = hfmModel.meshes.at(i); - std::vector dummyClustersList; - - for (int j = 0; j < mesh.clusters.size(); j++) { - std::vector bindMatrices; - // cast into a non-const reference, so we can mutate the FBXCluster - HFMCluster& cluster = const_cast(mesh.clusters.at(j)); - - HFMCluster localCluster; - localCluster.jointIndex = cluster.jointIndex; - localCluster.inverseBindMatrix = cluster.inverseBindMatrix; - localCluster.inverseBindTransform.evalFromRawMatrix(localCluster.inverseBindMatrix); - - // if we have a joint offset in the fst file then multiply its inverse by the - // model cluster inverse bind matrix - if (hfmModel.jointRotationOffsets.contains(cluster.jointIndex)) { - AnimPose localOffset(hfmModel.jointRotationOffsets[cluster.jointIndex], glm::vec3()); - localCluster.inverseBindMatrix = (glm::mat4)localOffset.inverse() * cluster.inverseBindMatrix; - localCluster.inverseBindTransform.evalFromRawMatrix(localCluster.inverseBindMatrix); - } - dummyClustersList.push_back(localCluster); - } - _clusterBindMatrixOriginalValues.push_back(dummyClustersList); - } -*/ } AnimSkeleton::AnimSkeleton(const std::vector& joints, const QMap jointOffsets) { diff --git a/libraries/animation/src/AnimSkeleton.h b/libraries/animation/src/AnimSkeleton.h index 526959df9a..1477efb223 100644 --- a/libraries/animation/src/AnimSkeleton.h +++ b/libraries/animation/src/AnimSkeleton.h @@ -68,9 +68,7 @@ public: void dump(const AnimPoseVec& poses) const; std::vector lookUpJointIndices(const std::vector& jointNames) const; - const HFMCluster getClusterBindMatricesOriginalValues(const int meshIndex, const int clusterIndex) const { return _clusterBindMatrixOriginalValues[meshIndex][clusterIndex]; } - - // const HFMCluster getClusterBindMatricesOriginalValues(const int meshIndex, const int clusterIndex) const { return _clusterBindMatrixOriginalValues[meshIndex][clusterIndex]; } + const HFMCluster getClusterBindMatricesOriginalValues(const int skinDeformerIndex, const int clusterIndex) const { return _clusterBindMatrixOriginalValues[skinDeformerIndex][clusterIndex]; } protected: void buildSkeletonFromJoints(const std::vector& joints, const QMap jointOffsets); diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index e75b28f9ed..d7da3879f6 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -479,8 +479,8 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { glm::mat4 invRegistraionOffset = glm::translate(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT)); for (uint32_t i = 0; i < numHFMMeshes; i++) { const HFMMesh& mesh = hfmModel.meshes.at(i); - if (mesh.clusters.size() > 0) { - const HFMCluster& cluster = mesh.clusters.at(0); + if (i < hfmModel.skinDeformers.size() && hfmModel.skinDeformers[i].clusters.size() > 0) { + const HFMCluster& cluster = hfmModel.skinDeformers[i].clusters.at(0); auto jointMatrix = model->getRig().getJointTransform(cluster.jointIndex); // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later localTransforms.push_back(invRegistraionOffset * jointMatrix * cluster.inverseBindMatrix); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 15ed876d94..28dc8128e6 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -387,6 +387,7 @@ typedef hfm::Texture HFMTexture; typedef hfm::MeshPart HFMMeshPart; typedef hfm::Material HFMMaterial; typedef hfm::Mesh HFMMesh; +typedef hfm::SkinDeformer HFMSkinDeformer; typedef hfm::AnimationFrame HFMAnimationFrame; typedef hfm::Light HFMLight; typedef hfm::Model HFMModel; diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 717d3cada7..2576b16354 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -33,14 +33,10 @@ bool CauterizedModel::updateGeometry() { if (_isCauterized && needsFullUpdate) { assert(_cauterizeMeshStates.empty()); - const HFMModel& hfmModel = getHFMModel(); - const auto& hfmDynamicTransforms = hfmModel.skinDeformers; - for (int i = 0; i < hfmDynamicTransforms.size(); i++) { - const auto& dynT = hfmDynamicTransforms[i]; - MeshState state; - state.clusterDualQuaternions.resize(dynT.clusters.size()); - state.clusterMatrices.resize(dynT.clusters.size()); - _cauterizeMeshStates.push_back(state); + // initialize the cauterizedDeforemrStates as a copy of the standard deformerStates + _cauterizeMeshStates.resize(_meshStates.size()); + for (int i = 0; i < _meshStates.size(); ++i) { + _cauterizeMeshStates[i] = _meshStates[i]; } } return needsFullUpdate; @@ -57,7 +53,6 @@ void CauterizedModel::createRenderItemSet() { _modelMeshRenderItems.clear(); _modelMeshMaterialNames.clear(); - _modelMeshRenderItemShapes.clear(); Transform transform; transform.setTranslation(_translation); @@ -79,7 +74,6 @@ void CauterizedModel::createRenderItemSet() { auto material = getNetworkModel()->getShapeMaterial(shapeID); _modelMeshMaterialNames.push_back(material ? material->getName() : ""); - _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.skinDeformer }); } } else { Model::createRenderItemSet(); @@ -97,26 +91,20 @@ void CauterizedModel::updateClusterMatrices() { _needsUpdateClusterMatrices = false; - - const HFMModel& hfmModel = getHFMModel(); - const auto& hfmSkinDeformers = hfmModel.skinDeformers; - for (int meshIndex = 0; meshIndex < (int)_meshStates.size(); meshIndex++) { - MeshState& state = _meshStates[meshIndex]; - const auto& deformer = hfmSkinDeformers[meshIndex]; - - for (int clusterIndex = 0; clusterIndex < deformer.clusters.size(); clusterIndex++) { - const auto& cluster = deformer.clusters[clusterIndex]; - - const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex); + for (int skinDeformerIndex = 0; skinDeformerIndex < (int)_meshStates.size(); skinDeformerIndex++) { + MeshState& state = _meshStates[skinDeformerIndex]; + auto numClusters = state.getNumClusters(); + for (uint32_t clusterIndex = 0; clusterIndex < numClusters; clusterIndex++) { + const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(skinDeformerIndex, clusterIndex); if (_useDualQuaternionSkinning) { - auto jointPose = _rig.getJointPose(cluster.jointIndex); + auto jointPose = _rig.getJointPose(cbmov.jointIndex); Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); Transform clusterTransform; Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); } else { - auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); + auto jointMatrix = _rig.getJointTransform(cbmov.jointIndex); glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } } @@ -127,6 +115,7 @@ void CauterizedModel::updateClusterMatrices() { AnimPose cauterizePose = _rig.getJointPose(_rig.indexOfJoint("Neck")); cauterizePose.scale() = glm::vec3(0.0001f, 0.0001f, 0.0001f); + Transform cauterizedDQTransform(cauterizePose.rot(), cauterizePose.scale(), cauterizePose.trans()); static const glm::mat4 zeroScale( glm::vec4(0.0001f, 0.0f, 0.0f, 0.0f), @@ -135,30 +124,27 @@ void CauterizedModel::updateClusterMatrices() { glm::vec4(0.0f, 0.0f, 0.0f, 1.0f)); auto cauterizeMatrix = _rig.getJointTransform(_rig.indexOfJoint("Neck")) * zeroScale; - for (int meshIndex = 0; meshIndex < _cauterizeMeshStates.size(); meshIndex++) { - Model::MeshState& state = _cauterizeMeshStates[meshIndex]; - const auto& deformer = hfmSkinDeformers[meshIndex]; + for (int skinDeformerIndex = 0; skinDeformerIndex < _cauterizeMeshStates.size(); skinDeformerIndex++) { + Model::MeshState& nonCauterizedState = _meshStates[skinDeformerIndex]; + Model::MeshState& state = _cauterizeMeshStates[skinDeformerIndex]; - for (int clusterIndex = 0; clusterIndex < deformer.clusters.size(); clusterIndex++) { - const auto& cluster = deformer.clusters[clusterIndex]; - - const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex); - - if (_useDualQuaternionSkinning) { - if (_cauterizeBoneSet.find(cluster.jointIndex) == _cauterizeBoneSet.end()) { - // not cauterized so just copy the value from the non-cauterized version. - state.clusterDualQuaternions[clusterIndex] = _meshStates[meshIndex].clusterDualQuaternions[clusterIndex]; - } else { - Transform jointTransform(cauterizePose.rot(), cauterizePose.scale(), cauterizePose.trans()); + // Just reset cauterized state with normal state memcpy style + if (_useDualQuaternionSkinning) { + state.clusterDualQuaternions = nonCauterizedState.clusterDualQuaternions; + } else { + state.clusterMatrices = nonCauterizedState.clusterMatrices; + } + + // ANd only cauterize affected joints + auto numClusters = state.getNumClusters(); + for (uint32_t clusterIndex = 0; clusterIndex < numClusters; clusterIndex++) { + const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(skinDeformerIndex, clusterIndex); + if (_cauterizeBoneSet.find(cbmov.jointIndex) != _cauterizeBoneSet.end()) { + if (_useDualQuaternionSkinning) { Transform clusterTransform; - Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); + Transform::mult(clusterTransform, cauterizedDQTransform, cbmov.inverseBindTransform); state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); state.clusterDualQuaternions[clusterIndex].setCauterizationParameters(1.0f, cauterizePose.trans()); - } - } else { - if (_cauterizeBoneSet.find(cluster.jointIndex) == _cauterizeBoneSet.end()) { - // not cauterized so just copy the value from the non-cauterized version. - state.clusterMatrices[clusterIndex] = _meshStates[meshIndex].clusterMatrices[clusterIndex]; } else { glm_mat4u_mul(cauterizeMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } @@ -169,7 +155,7 @@ void CauterizedModel::updateClusterMatrices() { // post the blender if we're not currently waiting for one to finish auto modelBlender = DependencyManager::get(); - if (modelBlender->shouldComputeBlendshapes() && hfmModel.hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) { + if (modelBlender->shouldComputeBlendshapes() && getHFMModel().hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) { _blendedBlendshapeCoefficients = _blendshapeCoefficients; modelBlender->noteRequiresBlend(getThisPointer()); } @@ -209,22 +195,19 @@ void CauterizedModel::updateRenderItems() { render::Transaction transaction; for (int i = 0; i < (int)self->_modelMeshRenderItemIDs.size(); i++) { - auto itemID = self->_modelMeshRenderItemIDs[i]; - auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; const auto& shapeState = self->getShapeState(i); - auto deformerIndex = self->_modelMeshRenderItemShapes[i].deformerIndex; - bool isDeformed = (deformerIndex != hfm::UNDEFINED_KEY); + auto skinDeformerIndex = shapeState._skinDeformerIndex; - bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); + bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(shapeState._meshIndex); bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); - if (isDeformed) { + if (skinDeformerIndex != hfm::UNDEFINED_KEY) { - const auto& meshState = self->getMeshState(deformerIndex); - const auto& cauterizedMeshState = self->getCauterizeMeshState(deformerIndex); + const auto& meshState = self->getMeshState(skinDeformerIndex); + const auto& cauterizedMeshState = self->getCauterizeMeshState(skinDeformerIndex); transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey, diff --git a/libraries/render-utils/src/CauterizedModel.h b/libraries/render-utils/src/CauterizedModel.h index 36a96fb006..7d241d7ac6 100644 --- a/libraries/render-utils/src/CauterizedModel.h +++ b/libraries/render-utils/src/CauterizedModel.h @@ -40,7 +40,7 @@ public: protected: std::unordered_set _cauterizeBoneSet; - QVector _cauterizeMeshStates; + std::vector _cauterizeMeshStates; bool _isCauterized { false }; bool _enableCauterization { false }; }; diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 6df3c76a67..39ae7e6a8f 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -151,7 +151,7 @@ void Model::setOffset(const glm::vec3& offset) { } void Model::calculateTextureInfo() { - if (!_hasCalculatedTextureInfo && isLoaded() && getNetworkModel()->areTexturesLoaded() && !_modelMeshRenderItemsMap.isEmpty()) { + if (!_hasCalculatedTextureInfo && isLoaded() && getNetworkModel()->areTexturesLoaded() && !_modelMeshRenderItemIDs.empty()) { size_t textureSize = 0; int textureCount = 0; bool allTexturesLoaded = true; @@ -228,25 +228,18 @@ void Model::updateRenderItems() { render::Transaction transaction; for (int i = 0; i < (int) self->_modelMeshRenderItemIDs.size(); i++) { - auto itemID = self->_modelMeshRenderItemIDs[i]; - auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex; const auto& shapeState = self->getShapeState(i); - auto deformerIndex = self->_modelMeshRenderItemShapes[i].deformerIndex; - bool isDeformed = (deformerIndex != hfm::UNDEFINED_KEY); + auto skinDeformerIndex = shapeState._skinDeformerIndex; - bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex); + bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(shapeState._meshIndex); - - if (isDeformed) { - - const auto& meshState = self->getMeshState(deformerIndex); - // MeshState meshState; + if (skinDeformerIndex != hfm::UNDEFINED_KEY) { + const auto& meshState = self->getMeshState(skinDeformerIndex); bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning(); - transaction.updateItem(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, cauterized](ModelMeshPartPayload& data) { if (useDualQuaternionSkinning) { @@ -303,15 +296,11 @@ void Model::reset() { } void Model::updateShapeStatesFromRig() { - const HFMModel& hfmModel = getHFMModel(); - // TODO: should all Models have a valid _rig? { // Shapes state: - const auto& shapes = hfmModel.shapes; - _shapeStates.resize(shapes.size()); - for (int s = 0; s < shapes.size(); ++s) { - uint32_t jointId = shapes[s].joint; + for (auto& shape : _shapeStates) { + uint32_t jointId = shape._jointIndex; if (jointId < (uint32_t) _rig.getJointStateCount()) { - _shapeStates[s]._rootFromJointTransform = _rig.getJointTransform(jointId); + shape._rootFromJointTransform = _rig.getJointTransform(jointId); } } } @@ -331,9 +320,19 @@ bool Model::updateGeometry() { initJointStates(); assert(_meshStates.empty()); + const HFMModel& hfmModel = getHFMModel(); + + const auto& shapes = hfmModel.shapes; + _shapeStates.resize(shapes.size()); + for (int s = 0; s < shapes.size(); ++s) { + auto& shapeState = _shapeStates[s]; + shapeState._jointIndex = shapes[s].joint; + shapeState._meshIndex = shapes[s].mesh; + shapeState._meshPartIndex = shapes[s].meshPart; + shapeState._skinDeformerIndex = shapes[s].skinDeformer; + } updateShapeStatesFromRig(); - const HFMModel& hfmModel = getHFMModel(); const auto& hfmSkinDeformers = hfmModel.skinDeformers; for (int i = 0; i < hfmSkinDeformers.size(); i++) { const auto& dynT = hfmSkinDeformers[i]; @@ -740,9 +739,9 @@ bool Model::replaceScriptableModelMeshPart(scriptable::ScriptableModelBasePointe render::Transaction transaction; for (int i = 0; i < (int) _modelMeshRenderItemIDs.size(); i++) { auto itemID = _modelMeshRenderItemIDs[i]; - auto shape = _modelMeshRenderItemShapes[i]; + auto& shape = _shapeStates[i]; // TODO: check to see if .partIndex matches too - if (shape.meshIndex == meshIndex) { + if (shape._meshIndex == meshIndex) { transaction.updateItem(itemID, [=](ModelMeshPartPayload& data) { data.updateMeshPart(mesh, partIndex); }); @@ -904,8 +903,8 @@ void Model::updateRenderItemsKey(const render::ScenePointer& scene) { } auto renderItemsKey = _renderItemKeyGlobalFlags; render::Transaction transaction; - foreach(auto item, _modelMeshRenderItemsMap.keys()) { - transaction.updateItem(item, [renderItemsKey](ModelMeshPartPayload& data) { + for(auto itemID: _modelMeshRenderItemIDs) { + transaction.updateItem(itemID, [renderItemsKey](ModelMeshPartPayload& data) { data.updateKey(renderItemsKey); }); } @@ -975,8 +974,8 @@ void Model::setCauterized(bool cauterized, const render::ScenePointer& scene) { return; } render::Transaction transaction; - foreach (auto item, _modelMeshRenderItemsMap.keys()) { - transaction.updateItem(item, [cauterized](ModelMeshPartPayload& data) { + for (auto itemID : _modelMeshRenderItemIDs) { + transaction.updateItem(itemID, [cauterized](ModelMeshPartPayload& data) { data.setCauterized(cauterized); }); } @@ -1003,26 +1002,25 @@ bool Model::addToScene(const render::ScenePointer& scene, bool somethingAdded = false; - if (_modelMeshRenderItemsMap.empty()) { + if (_modelMeshRenderItemIDs.empty()) { bool hasTransparent = false; size_t verticesCount = 0; foreach(auto renderItem, _modelMeshRenderItems) { auto item = scene->allocateID(); auto renderPayload = std::make_shared(renderItem); - if (_modelMeshRenderItemsMap.empty() && statusGetters.size()) { + if (_modelMeshRenderItemIDs.empty() && statusGetters.size()) { renderPayload->addStatusGetters(statusGetters); } transaction.resetItem(item, renderPayload); hasTransparent = hasTransparent || renderItem.get()->getShapeKey().isTranslucent(); verticesCount += renderItem.get()->getVerticesCount(); - _modelMeshRenderItemsMap.insert(item, renderPayload); _modelMeshRenderItemIDs.emplace_back(item); } - somethingAdded = !_modelMeshRenderItemsMap.empty(); + somethingAdded = !_modelMeshRenderItemIDs.empty(); _renderInfoVertexCount = verticesCount; - _renderInfoDrawCalls = _modelMeshRenderItemsMap.count(); + _renderInfoDrawCalls = (uint32_t) _modelMeshRenderItemIDs.size(); _renderInfoHasTransparent = hasTransparent; } @@ -1037,14 +1035,12 @@ bool Model::addToScene(const render::ScenePointer& scene, } void Model::removeFromScene(const render::ScenePointer& scene, render::Transaction& transaction) { - foreach (auto item, _modelMeshRenderItemsMap.keys()) { - transaction.removeItem(item); + for (auto itemID: _modelMeshRenderItemIDs) { + transaction.removeItem(itemID); } _modelMeshRenderItemIDs.clear(); - _modelMeshRenderItemsMap.clear(); _modelMeshRenderItems.clear(); _modelMeshMaterialNames.clear(); - _modelMeshRenderItemShapes.clear(); _priorityMap.clear(); _addedToScene = false; @@ -1415,25 +1411,22 @@ void Model::updateClusterMatrices() { updateShapeStatesFromRig(); _needsUpdateClusterMatrices = false; - const HFMModel& hfmModel = getHFMModel(); - const auto& hfmSkinDeformers = hfmModel.skinDeformers; - for (int meshIndex = 0; meshIndex < (int) _meshStates.size(); meshIndex++) { - MeshState& state = _meshStates[meshIndex]; - const auto& deformer = hfmSkinDeformers[meshIndex]; - for (int clusterIndex = 0; clusterIndex < deformer.clusters.size(); clusterIndex++) { - const auto& cluster = deformer.clusters[clusterIndex]; - - const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex); + for (int skinDeformerIndex = 0; skinDeformerIndex < (int)_meshStates.size(); skinDeformerIndex++) { + MeshState& state = _meshStates[skinDeformerIndex]; + auto numClusters = state.getNumClusters(); + for (uint32_t clusterIndex = 0; clusterIndex < numClusters; clusterIndex++) { + const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(skinDeformerIndex, clusterIndex); if (_useDualQuaternionSkinning) { - auto jointPose = _rig.getJointPose(cluster.jointIndex); + auto jointPose = _rig.getJointPose(cbmov.jointIndex); Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); Transform clusterTransform; Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); - } else { - auto jointMatrix = _rig.getJointTransform(cluster.jointIndex); + } + else { + auto jointMatrix = _rig.getJointTransform(cbmov.jointIndex); glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } } @@ -1441,7 +1434,7 @@ void Model::updateClusterMatrices() { // post the blender if we're not currently waiting for one to finish auto modelBlender = DependencyManager::get(); - if (modelBlender->shouldComputeBlendshapes() && hfmModel.hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) { + if (modelBlender->shouldComputeBlendshapes() && getHFMModel().hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) { _blendedBlendshapeCoefficients = _blendshapeCoefficients; modelBlender->noteRequiresBlend(getThisPointer()); } @@ -1490,7 +1483,6 @@ void Model::createRenderItemSet() { _modelMeshRenderItems.clear(); _modelMeshMaterialNames.clear(); - _modelMeshRenderItemShapes.clear(); Transform transform; transform.setTranslation(_translation); @@ -1510,12 +1502,11 @@ void Model::createRenderItemSet() { auto material = getNetworkModel()->getShapeMaterial(shapeID); _modelMeshMaterialNames.push_back(material ? material->getName() : ""); - _modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)shape.mesh, shape.skinDeformer }); } } bool Model::isRenderable() const { - return (!_shapeStates.empty() /* && !_meshStates.empty()*/) || (isLoaded() && _renderGeometry->getMeshes().empty()); + return (!_shapeStates.empty()) || (isLoaded() && _renderGeometry->getMeshes().empty()); } std::set Model::getMeshIDsFromMaterialID(QString parentMaterialName) { @@ -1571,11 +1562,11 @@ void Model::applyMaterialMapping() { PrimitiveMode primitiveMode = getPrimitiveMode(); bool useDualQuaternionSkinning = _useDualQuaternionSkinning; auto modelMeshRenderItemIDs = _modelMeshRenderItemIDs; - auto modelMeshRenderItemShapes = _modelMeshRenderItemShapes; + auto shapeStates = _shapeStates; std::unordered_map shouldInvalidatePayloadShapeKeyMap; - for (auto& shape : _modelMeshRenderItemShapes) { - shouldInvalidatePayloadShapeKeyMap[shape.meshIndex] = shouldInvalidatePayloadShapeKey(shape.meshIndex); + for (auto& shape : _shapeStates) { + shouldInvalidatePayloadShapeKeyMap[shape._meshIndex] = shouldInvalidatePayloadShapeKey(shape._meshIndex); } auto& materialMapping = getMaterialMapping(); @@ -1598,7 +1589,7 @@ void Model::applyMaterialMapping() { std::weak_ptr weakSelf = shared_from_this(); auto materialLoaded = [networkMaterialResource, shapeIDs, priorityMapPerResource, renderItemsKey, primitiveMode, useDualQuaternionSkinning, - modelMeshRenderItemIDs, modelMeshRenderItemShapes, shouldInvalidatePayloadShapeKeyMap, weakSelf]() { + modelMeshRenderItemIDs, shapeStates, shouldInvalidatePayloadShapeKeyMap, weakSelf]() { std::shared_ptr self = weakSelf.lock(); if (!self || networkMaterialResource->isFailed() || networkMaterialResource->parsedMaterials.names.size() == 0) { return; @@ -1624,7 +1615,7 @@ void Model::applyMaterialMapping() { for (auto shapeID : shapeIDs) { if (shapeID < modelMeshRenderItemIDs.size()) { auto itemID = modelMeshRenderItemIDs[shapeID]; - auto meshIndex = modelMeshRenderItemShapes[shapeID].meshIndex; + auto meshIndex = shapeStates[shapeID]._meshIndex; bool invalidatePayloadShapeKey = shouldInvalidatePayloadShapeKeyMap.at(meshIndex); graphics::MaterialLayer material = graphics::MaterialLayer(networkMaterial, priorityMapPerResource.at(shapeID)); { @@ -1662,7 +1653,7 @@ void Model::addMaterial(graphics::MaterialLayer material, const std::string& par for (auto shapeID : shapeIDs) { if (shapeID < _modelMeshRenderItemIDs.size()) { auto itemID = _modelMeshRenderItemIDs[shapeID]; - auto meshIndex = _modelMeshRenderItemShapes[shapeID].meshIndex; + auto meshIndex = _shapeStates[shapeID]._meshIndex; bool invalidatePayloadShapeKey = shouldInvalidatePayloadShapeKey(meshIndex); transaction.updateItem(itemID, [material, renderItemsKey, invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning](ModelMeshPartPayload& data) { @@ -1684,7 +1675,7 @@ void Model::removeMaterial(graphics::MaterialPointer material, const std::string auto itemID = _modelMeshRenderItemIDs[shapeID]; auto renderItemsKey = _renderItemKeyGlobalFlags; PrimitiveMode primitiveMode = getPrimitiveMode(); - auto meshIndex = _modelMeshRenderItemShapes[shapeID].meshIndex; + auto meshIndex = _shapeStates[shapeID]._meshIndex; bool invalidatePayloadShapeKey = shouldInvalidatePayloadShapeKey(meshIndex); bool useDualQuaternionSkinning = _useDualQuaternionSkinning; transaction.updateItem(itemID, [material, renderItemsKey, diff --git a/libraries/render-utils/src/Model.h b/libraries/render-utils/src/Model.h index 09fb9b581e..0c04aca70c 100644 --- a/libraries/render-utils/src/Model.h +++ b/libraries/render-utils/src/Model.h @@ -297,6 +297,16 @@ public: int getRenderInfoDrawCalls() const { return _renderInfoDrawCalls; } bool getRenderInfoHasTransparent() const { return _renderInfoHasTransparent; } + class ShapeState { + public: + glm::mat4 _rootFromJointTransform; + uint32_t _jointIndex{ hfm::UNDEFINED_KEY }; + uint32_t _meshIndex{ hfm::UNDEFINED_KEY }; + uint32_t _meshPartIndex{ hfm::UNDEFINED_KEY }; + uint32_t _skinDeformerIndex{ hfm::UNDEFINED_KEY }; + }; + const ShapeState& getShapeState(int index) { return _shapeStates.at(index); } + class TransformDualQuaternion { public: TransformDualQuaternion() {} @@ -339,18 +349,13 @@ public: public: std::vector clusterDualQuaternions; std::vector clusterMatrices; - }; + uint32_t getNumClusters() const { return (uint32_t) std::max(clusterMatrices.size(), clusterMatrices.size()); } + }; const MeshState& getMeshState(int index) { return _meshStates.at(index); } - class ShapeState { - public: - glm::mat4 _rootFromJointTransform; - }; - const ShapeState& getShapeState(int index) { return _shapeStates.at(index); } - uint32_t getGeometryCounter() const { return _deleteGeometryCounter; } - const QMap& getRenderItems() const { return _modelMeshRenderItemsMap; } + BlendShapeOperator getModelBlendshapeOperator() const { return _modelBlendshapeOperator; } void renderDebugMeshBoxes(gpu::Batch& batch, bool forward); @@ -425,10 +430,12 @@ protected: bool _snappedToRegistrationPoint; /// are we currently snapped to a registration point glm::vec3 _registrationPoint = glm::vec3(0.5f); /// the point in model space our center is snapped to - std::vector _meshStates; + std::vector _shapeStates; void updateShapeStatesFromRig(); + std::vector _meshStates; + virtual void initJointStates(); void setScaleInternal(const glm::vec3& scale); @@ -471,10 +478,7 @@ protected: static AbstractViewStateInterface* _viewState; QVector> _modelMeshRenderItems; - QMap _modelMeshRenderItemsMap; render::ItemIDs _modelMeshRenderItemIDs; - using ShapeInfo = struct { int meshIndex; uint32_t deformerIndex{ hfm::UNDEFINED_KEY }; }; - std::vector _modelMeshRenderItemShapes; std::vector _modelMeshMaterialNames; bool _addedToScene { false }; // has been added to scene diff --git a/libraries/render-utils/src/SoftAttachmentModel.cpp b/libraries/render-utils/src/SoftAttachmentModel.cpp index 186f9e682a..1b8d1e7b69 100644 --- a/libraries/render-utils/src/SoftAttachmentModel.cpp +++ b/libraries/render-utils/src/SoftAttachmentModel.cpp @@ -41,37 +41,37 @@ void SoftAttachmentModel::updateClusterMatrices() { _needsUpdateClusterMatrices = false; - const HFMModel& hfmModel = getHFMModel(); - for (int i = 0; i < (int) _meshStates.size(); i++) { - MeshState& state = _meshStates[i]; - const HFMMesh& mesh = hfmModel.meshes.at(i); - int meshIndex = i; - for (int j = 0; j < mesh.clusters.size(); j++) { - const HFMCluster& cluster = mesh.clusters.at(j); + for (int skinDeformerIndex = 0; skinDeformerIndex < (int)_meshStates.size(); skinDeformerIndex++) { + MeshState& state = _meshStates[skinDeformerIndex]; + auto numClusters = state.getNumClusters(); + for (uint32_t clusterIndex = 0; clusterIndex < numClusters; clusterIndex++) { + const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(skinDeformerIndex, clusterIndex); - int clusterIndex = j; // TODO: cache these look-ups as an optimization - int jointIndexOverride = getJointIndexOverride(cluster.jointIndex); - glm::mat4 jointMatrix; + int jointIndexOverride = getJointIndexOverride(cbmov.jointIndex); + auto rig = &_rigOverride; if (jointIndexOverride >= 0 && jointIndexOverride < _rigOverride.getJointStateCount()) { - jointMatrix = _rigOverride.getJointTransform(jointIndexOverride); - } else { - jointMatrix = _rig.getJointTransform(cluster.jointIndex); + rig = &_rig; } + if (_useDualQuaternionSkinning) { - glm::mat4 m; - glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, m); - state.clusterDualQuaternions[j] = Model::TransformDualQuaternion(m); - } else { - glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]); + auto jointPose = rig->getJointPose(cbmov.jointIndex); + Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans()); + Transform clusterTransform; + Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); + state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); + } + else { + auto jointMatrix = rig->getJointTransform(cbmov.jointIndex); + glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } } } // post the blender if we're not currently waiting for one to finish auto modelBlender = DependencyManager::get(); - if (modelBlender->shouldComputeBlendshapes() && hfmModel.hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) { + if (modelBlender->shouldComputeBlendshapes() && getHFMModel().hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) { _blendedBlendshapeCoefficients = _blendshapeCoefficients; modelBlender->noteRequiresBlend(getThisPointer()); } diff --git a/tools/vhacd-util/src/VHACDUtil.cpp b/tools/vhacd-util/src/VHACDUtil.cpp index 3410d35e6a..f0eb94a1cf 100644 --- a/tools/vhacd-util/src/VHACDUtil.cpp +++ b/tools/vhacd-util/src/VHACDUtil.cpp @@ -348,7 +348,7 @@ bool vhacd::VHACDUtil::computeVHACD(HFMModel& hfmModel, if (_verbose) { qDebug() << "mesh" << meshIndex << ": " - << " parts =" << mesh.parts.size() << " clusters =" << mesh.clusters.size() + << " parts =" << mesh.parts.size() << " vertices =" << numVertices; } ++meshIndex; From be9931bcabda950563299f7d8a7def87859a9746 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 16 Oct 2019 15:50:31 -0700 Subject: [PATCH 066/121] Move skinning reweighting code out of model prep to hfm library; use in FBXSerializer. --- libraries/fbx/src/FBXSerializer.cpp | 21 +-- libraries/fbx/src/GLTFSerializer.cpp | 2 +- libraries/fbx/src/OBJSerializer.cpp | 4 +- libraries/hfm/src/hfm/HFM.h | 9 +- libraries/hfm/src/hfm/HFMModelMath.cpp | 75 +++++++++++ libraries/hfm/src/hfm/HFMModelMath.h | 10 ++ .../model-baker/src/model-baker/Baker.cpp | 19 +-- .../src/model-baker/BuildGraphicsMeshTask.cpp | 31 ++--- .../src/model-baker/BuildGraphicsMeshTask.h | 2 +- .../model-baker/CollectShapeVerticesTask.cpp | 12 +- .../model-baker/CollectShapeVerticesTask.h | 2 +- .../src/model-baker/ReweightDeformersTask.cpp | 123 ------------------ .../src/model-baker/ReweightDeformersTask.h | 29 ----- 13 files changed, 131 insertions(+), 208 deletions(-) delete mode 100644 libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp delete mode 100644 libraries/model-baker/src/model-baker/ReweightDeformersTask.h diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index d507a3ae06..4b3311c95a 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -20,6 +20,7 @@ #include #include +#include // TOOL: Uncomment the following line to enable the filtering of all the unkwnon fields of a node so we can break point easily while loading a model with problems... //#define DEBUG_FBXSERIALIZER @@ -1593,7 +1594,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const if (clusterIDs.size() > 0) { hfm::SkinDeformer skinDeformer; auto& clusters = skinDeformer.clusters; - std::vector skinClusters; for (const auto& clusterID : clusterIDs) { HFMCluster hfmCluster; const Cluster& fbxCluster = fbxClusters[clusterID]; @@ -1638,7 +1638,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const cluster.jointIndex = transformIndex; clusters.push_back(cluster); - // Skinned mesh instances have a dynamic transform + std::vector skinClusters; + // Skinned mesh instances have an hfm::SkinDeformer skinDeformer.skinClusterIndices.reserve(clusterIDs.size()); for (const auto& clusterID : clusterIDs) { const Cluster& fbxCluster = fbxClusters[clusterID]; @@ -1661,12 +1662,16 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } } - - // Store this model's deformers, this dynamic transform's deformer IDs - uint32_t deformerMinID = (uint32_t)hfmModel.skinClusters.size(); - hfmModel.skinClusters.insert(hfmModel.skinClusters.end(), skinClusters.cbegin(), skinClusters.cend()); - skinDeformer.skinClusterIndices.resize(skinClusters.size()); - std::iota(skinDeformer.skinClusterIndices.begin(), skinDeformer.skinClusterIndices.end(), deformerMinID); + // It seems odd that this mesh-related code should be inside of the for loop for instanced model IDs. + // However, in practice, skinned FBX models appear to not be instanced, as the skinning includes both the weights and joints. + { + hfm::ReweightedDeformers reweightedDeformers = hfm::getReweightedDeformers(mesh.vertices.size(), skinClusters); + if (reweightedDeformers.trimmedToMatch) { + qDebug(modelformat) << "FBXSerializer -- The number of indices and weights for a skinning deformer had different sizes and have been trimmed to match"; + } + mesh.clusterIndices = std::move(reweightedDeformers.indices); + mesh.clusterWeights = std::move(reweightedDeformers.weights); + } // Store the model's dynamic transform, and put its ID in the shapes hfmModel.skinDeformers.push_back(skinDeformer); diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index db272a534c..da48c3d2e3 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1401,7 +1401,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& int numVertices = mesh.vertices.size() - prevMeshVerticesCount; // Append new cluster indices and weights for this mesh part - int prevMeshClusterWeightCount = mesh.clusterWeights.count(); + size_t prevMeshClusterWeightCount = mesh.clusterWeights.size(); for (int i = 0; i < numVertices * WEIGHTS_PER_VERTEX; ++i) { mesh.clusterIndices.push_back(mesh.clusters.size() - 1); mesh.clusterWeights.push_back(0); diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index e1fc85ca2a..d3bde02e70 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -1018,8 +1018,8 @@ void hfmDebugDump(const HFMModel& hfmModel) { qCDebug(modelformat) << " colors.count() =" << mesh.colors.count(); qCDebug(modelformat) << " texCoords.count() =" << mesh.texCoords.count(); qCDebug(modelformat) << " texCoords1.count() =" << mesh.texCoords1.count(); - qCDebug(modelformat) << " clusterIndices.count() =" << mesh.clusterIndices.count(); - qCDebug(modelformat) << " clusterWeights.count() =" << mesh.clusterWeights.count(); + qCDebug(modelformat) << " clusterIndices.size() =" << mesh.clusterIndices.size(); + qCDebug(modelformat) << " clusterWeights.size() =" << mesh.clusterWeights.size(); qCDebug(modelformat) << " meshExtents =" << mesh.meshExtents; qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform; qCDebug(modelformat) << " parts.count() =" << mesh.parts.size(); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 15ed876d94..b2d8147ac6 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -248,8 +248,8 @@ public: glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints) // Skinning cluster attributes - QVector clusterIndices; - QVector clusterWeights; + std::vector clusterIndices; + std::vector clusterWeights; // Blendshape attributes QVector blendshapes; @@ -296,7 +296,7 @@ public: bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; } }; -// Formerly contained in hfm::Mesh +// A different skinning representation, used by FBXSerializer. We convert this to our graphics-optimized runtime representation contained within the mesh. class SkinCluster { public: std::vector indices; @@ -305,7 +305,7 @@ public: class SkinDeformer { public: - std::vector skinClusterIndices; + std::vector skinClusterIndices; // DEPRECATED (see hfm::Mesh.clusterIndices, hfm::Mesh.clusterWeights) std::vector clusters; }; @@ -337,7 +337,6 @@ public: std::vector materials; std::vector skinDeformers; - std::vector skinClusters; std::vector joints; QHash jointIndices; ///< 1-based, so as to more easily detect missing indices diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index de308297c4..09083ab4cc 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -11,6 +11,9 @@ #include "HFMModelMath.h" +#include +#include "ModelFormatLogging.h" + namespace hfm { void forEachIndex(const hfm::MeshPart& meshPart, std::function func) { @@ -63,4 +66,76 @@ void calculateExtentsForModel(Extents& modelExtents, const std::vector skinClusters, const uint16_t weightsPerVertex) { + ReweightedDeformers reweightedDeformers; + if (skinClusters.size() == 0) { + return reweightedDeformers; + } + + size_t numClusterIndices = numMeshVertices * weightsPerVertex; + reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(skinClusters.size() - 1)); + reweightedDeformers.weights.resize(numClusterIndices, 0); + + std::vector weightAccumulators; + weightAccumulators.resize(numClusterIndices, 0.0f); + for (uint16_t i = 0; i < (uint16_t)skinClusters.size(); ++i) { + const hfm::SkinCluster& skinCluster = skinClusters[i]; + + if (skinCluster.indices.size() != skinCluster.weights.size()) { + reweightedDeformers.trimmedToMatch = true; + } + size_t numIndicesOrWeights = std::min(skinCluster.indices.size(), skinCluster.weights.size()); + for (size_t j = 0; j < numIndicesOrWeights; ++j) { + uint32_t index = skinCluster.indices[j]; + float weight = skinCluster.weights[j]; + + // look for an unused slot in the weights vector + uint32_t weightIndex = index * weightsPerVertex; + uint32_t lowestIndex = -1; + float lowestWeight = FLT_MAX; + uint16_t k = 0; + for (; k < weightsPerVertex; k++) { + if (weightAccumulators[weightIndex + k] == 0.0f) { + reweightedDeformers.indices[weightIndex + k] = i; + weightAccumulators[weightIndex + k] = weight; + break; + } + if (weightAccumulators[weightIndex + k] < lowestWeight) { + lowestIndex = k; + lowestWeight = weightAccumulators[weightIndex + k]; + } + } + if (k == weightsPerVertex && weight > lowestWeight) { + // no space for an additional weight; we must replace the lowest + weightAccumulators[weightIndex + lowestIndex] = weight; + reweightedDeformers.indices[weightIndex + lowestIndex] = i; + } + } + } + + // now that we've accumulated the most relevant weights for each vertex + // normalize and compress to 16-bits + for (size_t i = 0; i < numMeshVertices; ++i) { + size_t j = i * weightsPerVertex; + + // normalize weights into uint16_t + float totalWeight = 0.0f; + for (size_t k = j; k < j + weightsPerVertex; ++k) { + totalWeight += weightAccumulators[k]; + } + + const float ALMOST_HALF = 0.499f; + if (totalWeight > 0.0f) { + float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; + for (size_t k = j; k < j + weightsPerVertex; ++k) { + reweightedDeformers.weights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF); + } + } else { + reweightedDeformers.weights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); + } + } + + return reweightedDeformers; +} + }; diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h index d1e3c09763..9420c96f08 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.h +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -25,6 +25,16 @@ void calculateExtentsForShape(hfm::Shape& shape, const std::vector& m void calculateExtentsForModel(Extents& modelExtents, const std::vector& shapes); +const uint16_t NUM_SKINNING_WEIGHTS_PER_VERTEX = 4; + +class ReweightedDeformers { +public: + std::vector indices; + std::vector weights; + bool trimmedToMatch { false }; +}; + +ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector skinClusters, const uint16_t weightsPerVertex = NUM_SKINNING_WEIGHTS_PER_VERTEX); }; #endif // #define hifi_hfm_ModelMath_h diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index 5522ebc9f5..c63495c169 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -13,7 +13,6 @@ #include "BakerTypes.h" #include "ModelMath.h" -#include "ReweightDeformersTask.h" #include "CollectShapeVerticesTask.h" #include "BuildGraphicsMeshTask.h" #include "CalculateMeshNormalsTask.h" @@ -30,7 +29,7 @@ namespace baker { class GetModelPartsTask { public: using Input = hfm::Model::Pointer; - using Output = VaryingSet9, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, std::vector, Extents>; + using Output = VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, Extents>; using JobModel = Job::ModelIO; void run(const BakeContextPointer& context, const Input& input, Output& output) { @@ -46,8 +45,7 @@ namespace baker { output.edit4() = hfmModelIn->joints; output.edit5() = hfmModelIn->shapes; output.edit6() = hfmModelIn->skinDeformers; - output.edit7() = hfmModelIn->skinClusters; - output.edit8() = hfmModelIn->meshExtents; + output.edit7() = hfmModelIn->meshExtents; } }; @@ -148,8 +146,7 @@ namespace baker { const auto jointsIn = modelPartsIn.getN(4); const auto shapesIn = modelPartsIn.getN(5); const auto skinDeformersIn = modelPartsIn.getN(6); - const auto skinClustersIn = modelPartsIn.getN(7); - const auto modelExtentsIn = modelPartsIn.getN(8); + const auto modelExtentsIn = modelPartsIn.getN(7); // Calculate normals and tangents for meshes and blendshapes if they do not exist // Note: Normals are never calculated here for OBJ models. OBJ files optionally define normals on a per-face basis, so for consistency normals are calculated beforehand in OBJSerializer. @@ -161,16 +158,12 @@ namespace baker { const auto calculateBlendshapeTangentsInputs = CalculateBlendshapeTangentsTask::Input(normalsPerBlendshapePerMesh, blendshapesPerMeshIn, meshesIn).asVarying(); const auto tangentsPerBlendshapePerMesh = model.addJob("CalculateBlendshapeTangents", calculateBlendshapeTangentsInputs); - // Skinning weight calculations - // NOTE: Due to limitations in the current graphics::MeshPointer representation, the output list of ReweightedDeformers is per-mesh. An element is empty if there are no deformers for the mesh of the same index. - const auto reweightDeformersInputs = ReweightDeformersTask::Input(meshesIn, shapesIn, skinDeformersIn, skinClustersIn).asVarying(); - const auto reweightedDeformers = model.addJob("ReweightDeformers", reweightDeformersInputs); - // Shape vertices are included/rejected based on skinning weight, and thus must use the reweighted deformers. - const auto collectShapeVerticesInputs = CollectShapeVerticesTask::Input(meshesIn, shapesIn, jointsIn, skinDeformersIn, reweightedDeformers).asVarying(); + // Calculate shape vertices. These rely on the weight-normalized clusterIndices/clusterWeights in the mesh, and are used later for computing the joint kdops + const auto collectShapeVerticesInputs = CollectShapeVerticesTask::Input(meshesIn, shapesIn, jointsIn, skinDeformersIn).asVarying(); const auto shapeVerticesPerJoint = model.addJob("CollectShapeVertices", collectShapeVerticesInputs); // Build the graphics::MeshPointer for each hfm::Mesh - const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, skinDeformersIn, reweightedDeformers).asVarying(); + const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, skinDeformersIn).asVarying(); const auto graphicsMeshes = model.addJob("BuildGraphicsMesh", buildGraphicsMeshInputs); // Prepare joint information diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index 88546e0975..6af0f9edf7 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -15,6 +15,7 @@ #include #include "ModelBakerLogging.h" +#include #include "ModelMath.h" using vec2h = glm::tvec2; @@ -27,7 +28,7 @@ glm::vec3 normalizeDirForPacking(const glm::vec3& dir) { return dir; } -void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn, uint16_t numDeformerControllers, const baker::ReweightedDeformers reweightedDeformers) { +void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn, uint16_t numDeformerControllers) { auto graphicsMesh = std::make_shared(); // Fill tangents with a dummy value to force tangents to be present if there are normals @@ -90,12 +91,8 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics // 4 Weights are normalized 16bits const auto clusterWeightElement = gpu::Element(gpu::VEC4, gpu::NUINT16, gpu::XYZW); - // Cluster indices and weights must be the same sizes - if (reweightedDeformers.trimmedToMatch) { - HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- The number of indices and weights for a deformer had different sizes and have been trimmed to match"); - } // Record cluster sizes - const size_t numVertClusters = (reweightedDeformers.weightsPerVertex ? reweightedDeformers.indices.size() / reweightedDeformers.weightsPerVertex : 0); + const size_t numVertClusters = hfmMesh.clusterIndices.size() / hfm::NUM_SKINNING_WEIGHTS_PER_VERTEX; const size_t clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize(); const size_t clusterWeightsSize = numVertClusters * clusterWeightElement.getSize(); @@ -186,20 +183,20 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics if (clusterIndicesSize > 0) { if (numDeformerControllers < (uint16_t)UINT8_MAX) { // yay! we can fit the clusterIndices within 8-bits - int32_t numIndices = (int32_t)reweightedDeformers.indices.size(); + int32_t numIndices = (int32_t)hfmMesh.clusterIndices.size(); std::vector packedDeformerIndices; packedDeformerIndices.resize(numIndices); for (int32_t i = 0; i < numIndices; ++i) { assert(hfmMesh.clusterIndices[i] <= UINT8_MAX); - packedDeformerIndices[i] = (uint8_t)(reweightedDeformers.indices[i]); + packedDeformerIndices[i] = (uint8_t)(hfmMesh.clusterIndices[i]); } vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) packedDeformerIndices.data()); } else { - vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) reweightedDeformers.indices.data()); + vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) hfmMesh.clusterIndices.data()); } } if (clusterWeightsSize > 0) { - vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) reweightedDeformers.weights.data()); + vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) hfmMesh.clusterWeights.data()); } @@ -382,7 +379,6 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const const auto& tangentsPerMesh = input.get4(); const auto& shapes = input.get5(); const auto& skinDeformers = input.get6(); - const auto& reweightedDeformersPerMesh = input.get7(); // Currently, there is only (at most) one skinDeformer per mesh // An undefined shape.skinDeformer has the value hfm::UNDEFINED_KEY @@ -399,19 +395,16 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const for (int i = 0; i < n; i++) { graphicsMeshes.emplace_back(); auto& graphicsMesh = graphicsMeshes[i]; - const auto& reweightedDeformers = reweightedDeformersPerMesh[i]; uint16_t numDeformerControllers = 0; - if (reweightedDeformers.weightsPerVertex != 0) { - uint32_t skinDeformerIndex = skinDeformerPerMesh[i]; - if (skinDeformerIndex != hfm::UNDEFINED_KEY) { - const hfm::SkinDeformer& skinDeformer = skinDeformers[skinDeformerIndex]; - numDeformerControllers = (uint16_t)skinDeformer.skinClusterIndices.size(); - } + uint32_t skinDeformerIndex = skinDeformerPerMesh[i]; + if (skinDeformerIndex != hfm::UNDEFINED_KEY) { + const hfm::SkinDeformer& skinDeformer = skinDeformers[skinDeformerIndex]; + numDeformerControllers = (uint16_t)skinDeformer.clusters.size(); } // Try to create the graphics::Mesh - buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i), numDeformerControllers, reweightedDeformers); + buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i), numDeformerControllers); // Choose a name for the mesh if (graphicsMesh) { diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h index b60f6f7a43..34128eabe8 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.h @@ -20,7 +20,7 @@ class BuildGraphicsMeshTask { public: - using Input = baker::VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector, std::vector, std::vector>; + using Input = baker::VaryingSet7, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector, std::vector>; using Output = std::vector; using JobModel = baker::Job::ModelIO; diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp index e597bbf507..5ede25a42c 100644 --- a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp @@ -13,6 +13,8 @@ #include +#include + // Used to track and avoid duplicate shape vertices, as multiple shapes can have the same mesh and skinDeformer class VertexSource { public: @@ -30,7 +32,6 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con const auto& shapes = input.get1(); const auto& joints = input.get2(); const auto& skinDeformers = input.get3(); - const auto& reweightedDeformers = input.get4(); auto& shapeVerticesPerJoint = output; shapeVerticesPerJoint.resize(joints.size()); @@ -59,10 +60,9 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con const auto& mesh = meshes[shape.mesh]; const auto& vertices = mesh.vertices; - const auto& reweightedDeformer = reweightedDeformers[shape.mesh]; const glm::mat4 meshToJoint = cluster.inverseBindMatrix; - const uint16_t weightsPerVertex = reweightedDeformer.weightsPerVertex; + const uint16_t weightsPerVertex = hfm::NUM_SKINNING_WEIGHTS_PER_VERTEX; if (weightsPerVertex == 0) { for (int vertexIndex = 0; vertexIndex < (int)vertices.size(); ++vertexIndex) { const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertices[vertexIndex]); @@ -71,9 +71,9 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con } else { for (int vertexIndex = 0; vertexIndex < (int)vertices.size(); ++vertexIndex) { for (uint16_t weightIndex = 0; weightIndex < weightsPerVertex; ++weightIndex) { - const size_t index = vertexIndex*4 + weightIndex; - const uint16_t clusterIndex = reweightedDeformer.indices[index]; - const uint16_t clusterWeight = reweightedDeformer.weights[index]; + const size_t index = vertexIndex*weightsPerVertex + weightIndex; + const uint16_t clusterIndex = mesh.clusterIndices[index]; + const uint16_t clusterWeight = mesh.clusterWeights[index]; // Remember vertices associated with this joint with at least 1/4 weight const uint16_t EXPANSION_WEIGHT_THRESHOLD = std::numeric_limits::max() / 4; if (clusterIndex != j || clusterWeight < EXPANSION_WEIGHT_THRESHOLD) { diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h index f14c440f2f..a665004d6b 100644 --- a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.h @@ -19,7 +19,7 @@ class CollectShapeVerticesTask { public: - using Input = baker::VaryingSet5, std::vector, std::vector, std::vector, std::vector>; + using Input = baker::VaryingSet4, std::vector, std::vector, std::vector>; using Output = std::vector; using JobModel = baker::Job::ModelIO; diff --git a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp b/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp deleted file mode 100644 index f210a5dd6f..0000000000 --- a/libraries/model-baker/src/model-baker/ReweightDeformersTask.cpp +++ /dev/null @@ -1,123 +0,0 @@ -// -// ReweightDeformersTask.h -// model-baker/src/model-baker -// -// Created by Sabrina Shanman on 2019/09/26. -// Copyright 2019 High Fidelity, Inc. -// -// Distributed under the Apache License, Version 2.0. -// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html -// - -#include "ReweightDeformersTask.h" - -baker::ReweightedDeformers getReweightedDeformers(size_t numMeshVertices, const std::vector skinClusters, const uint16_t weightsPerVertex) { - baker::ReweightedDeformers reweightedDeformers; - if (skinClusters.size() == 0) { - return reweightedDeformers; - } - - size_t numClusterIndices = numMeshVertices * weightsPerVertex; - reweightedDeformers.weightsPerVertex = weightsPerVertex; - // TODO: Consider having a rootCluster property in the DynamicTransform rather than appending the root to the end of the cluster list. - reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(skinClusters.size() - 1)); - reweightedDeformers.weights.resize(numClusterIndices, 0); - - std::vector weightAccumulators; - weightAccumulators.resize(numClusterIndices, 0.0f); - for (uint16_t i = 0; i < (uint16_t)skinClusters.size(); ++i) { - const hfm::SkinCluster& skinCluster = *skinClusters[i]; - - if (skinCluster.indices.size() != skinCluster.weights.size()) { - reweightedDeformers.trimmedToMatch = true; - } - size_t numIndicesOrWeights = std::min(skinCluster.indices.size(), skinCluster.weights.size()); - for (size_t j = 0; j < numIndicesOrWeights; ++j) { - uint32_t index = skinCluster.indices[j]; - float weight = skinCluster.weights[j]; - - // look for an unused slot in the weights vector - uint32_t weightIndex = index * weightsPerVertex; - uint32_t lowestIndex = -1; - float lowestWeight = FLT_MAX; - uint16_t k = 0; - for (; k < weightsPerVertex; k++) { - if (weightAccumulators[weightIndex + k] == 0.0f) { - reweightedDeformers.indices[weightIndex + k] = i; - weightAccumulators[weightIndex + k] = weight; - break; - } - if (weightAccumulators[weightIndex + k] < lowestWeight) { - lowestIndex = k; - lowestWeight = weightAccumulators[weightIndex + k]; - } - } - if (k == weightsPerVertex && weight > lowestWeight) { - // no space for an additional weight; we must replace the lowest - weightAccumulators[weightIndex + lowestIndex] = weight; - reweightedDeformers.indices[weightIndex + lowestIndex] = i; - } - } - } - - // now that we've accumulated the most relevant weights for each vertex - // normalize and compress to 16-bits - for (size_t i = 0; i < numMeshVertices; ++i) { - size_t j = i * weightsPerVertex; - - // normalize weights into uint16_t - float totalWeight = 0.0f; - for (size_t k = j; k < j + weightsPerVertex; ++k) { - totalWeight += weightAccumulators[k]; - } - - const float ALMOST_HALF = 0.499f; - if (totalWeight > 0.0f) { - float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; - for (size_t k = j; k < j + weightsPerVertex; ++k) { - reweightedDeformers.weights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF); - } - } else { - reweightedDeformers.weights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); - } - } - - return reweightedDeformers; -} - -void ReweightDeformersTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) { - const uint16_t NUM_WEIGHTS_PER_VERTEX { 4 }; - - const auto& meshes = input.get0(); - const auto& shapes = input.get1(); - const auto& skinDeformers = input.get2(); - const auto& skinClusters = input.get3(); - auto& reweightedDeformers = output; - - // Currently, there is only (at most) one skinDeformer per mesh - // An undefined shape.skinDeformer has the value hfm::UNDEFINED_KEY - std::vector skinDeformerPerMesh; - skinDeformerPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY); - for (const auto& shape : shapes) { - uint32_t skinDeformerIndex = shape.skinDeformer; - skinDeformerPerMesh[shape.mesh] = skinDeformerIndex; - } - - reweightedDeformers.reserve(meshes.size()); - for (size_t i = 0; i < meshes.size(); ++i) { - const auto& mesh = meshes[i]; - uint32_t skinDeformerIndex = skinDeformerPerMesh[i]; - - const hfm::SkinDeformer* skinDeformer = nullptr; - std::vector meshSkinClusters; - if (skinDeformerIndex != hfm::UNDEFINED_KEY) { - skinDeformer = &skinDeformers[skinDeformerIndex]; - for (const auto& skinClusterIndex : skinDeformer->skinClusterIndices) { - const auto& skinCluster = skinClusters[skinClusterIndex]; - meshSkinClusters.push_back(&skinCluster); - } - } - - reweightedDeformers.push_back(getReweightedDeformers((size_t)mesh.vertices.size(), meshSkinClusters, NUM_WEIGHTS_PER_VERTEX)); - } -} diff --git a/libraries/model-baker/src/model-baker/ReweightDeformersTask.h b/libraries/model-baker/src/model-baker/ReweightDeformersTask.h deleted file mode 100644 index c40ad4c1b4..0000000000 --- a/libraries/model-baker/src/model-baker/ReweightDeformersTask.h +++ /dev/null @@ -1,29 +0,0 @@ -// -// ReweightDeformersTask.h -// model-baker/src/model-baker -// -// Created by Sabrina Shanman on 2019/09/26. -// Copyright 2019 High Fidelity, Inc. -// -// Distributed under the Apache License, Version 2.0. -// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html -// - -#ifndef hifi_ReweightDeformersTask_h -#define hifi_ReweightDeformersTask_h - -#include - -#include "Engine.h" -#include "BakerTypes.h" - -class ReweightDeformersTask { -public: - using Input = baker::VaryingSet4, std::vector, std::vector, std::vector>; - using Output = std::vector; - using JobModel = baker::Job::ModelIO; - - void run(const baker::BakeContextPointer& context, const Input& input, Output& output); -}; - -#endif // hifi_ReweightDeformersTask_h From ad6720240fe31f888f6b11a46e5e2c3c68923dd8 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 18 Oct 2019 11:12:08 -0700 Subject: [PATCH 067/121] Introduce hfm::Mesh.clusterWeightsPerVertex --- libraries/fbx/src/FBXSerializer.cpp | 1 + libraries/hfm/src/hfm/HFM.h | 1 + libraries/hfm/src/hfm/HFMModelMath.cpp | 1 + libraries/hfm/src/hfm/HFMModelMath.h | 7 ++++--- .../model-baker/src/model-baker/BuildGraphicsMeshTask.cpp | 2 +- .../src/model-baker/CollectShapeVerticesTask.cpp | 2 +- 6 files changed, 9 insertions(+), 5 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 4b3311c95a..2c03c3c3ae 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1671,6 +1671,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } mesh.clusterIndices = std::move(reweightedDeformers.indices); mesh.clusterWeights = std::move(reweightedDeformers.weights); + mesh.clusterWeightsPerVertex = reweightedDeformers.weightsPerVertex; } // Store the model's dynamic transform, and put its ID in the shapes diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index b2d8147ac6..3d7f33383d 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -250,6 +250,7 @@ public: // Skinning cluster attributes std::vector clusterIndices; std::vector clusterWeights; + uint16_t clusterWeightsPerVertex { 0 }; // Blendshape attributes QVector blendshapes; diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 09083ab4cc..93687b08b0 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -75,6 +75,7 @@ ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const s size_t numClusterIndices = numMeshVertices * weightsPerVertex; reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(skinClusters.size() - 1)); reweightedDeformers.weights.resize(numClusterIndices, 0); + reweightedDeformers.weightsPerVertex = weightsPerVertex; std::vector weightAccumulators; weightAccumulators.resize(numClusterIndices, 0.0f); diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h index 9420c96f08..b80adad3d0 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.h +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -25,16 +25,17 @@ void calculateExtentsForShape(hfm::Shape& shape, const std::vector& m void calculateExtentsForModel(Extents& modelExtents, const std::vector& shapes); -const uint16_t NUM_SKINNING_WEIGHTS_PER_VERTEX = 4; - class ReweightedDeformers { public: std::vector indices; std::vector weights; + uint16_t weightsPerVertex { 0 }; bool trimmedToMatch { false }; }; -ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector skinClusters, const uint16_t weightsPerVertex = NUM_SKINNING_WEIGHTS_PER_VERTEX); +const uint16_t DEFAULT_SKINNING_WEIGHTS_PER_VERTEX = 4; + +ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector skinClusters, const uint16_t weightsPerVertex = DEFAULT_SKINNING_WEIGHTS_PER_VERTEX); }; #endif // #define hifi_hfm_ModelMath_h diff --git a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp index 6af0f9edf7..66429ed2c4 100644 --- a/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildGraphicsMeshTask.cpp @@ -92,7 +92,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics const auto clusterWeightElement = gpu::Element(gpu::VEC4, gpu::NUINT16, gpu::XYZW); // Record cluster sizes - const size_t numVertClusters = hfmMesh.clusterIndices.size() / hfm::NUM_SKINNING_WEIGHTS_PER_VERTEX; + const size_t numVertClusters = hfmMesh.clusterWeightsPerVertex == 0 ? 0 : hfmMesh.clusterIndices.size() / hfmMesh.clusterWeightsPerVertex; const size_t clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize(); const size_t clusterWeightsSize = numVertClusters * clusterWeightElement.getSize(); diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp index 5ede25a42c..13bc75ced9 100644 --- a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp @@ -62,7 +62,7 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con const auto& vertices = mesh.vertices; const glm::mat4 meshToJoint = cluster.inverseBindMatrix; - const uint16_t weightsPerVertex = hfm::NUM_SKINNING_WEIGHTS_PER_VERTEX; + const uint16_t weightsPerVertex = mesh.clusterWeightsPerVertex; if (weightsPerVertex == 0) { for (int vertexIndex = 0; vertexIndex < (int)vertices.size(); ++vertexIndex) { const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertices[vertexIndex]); From a95a4b5aa25dfbc88389bff32a9ddf84a6d6ded7 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Fri, 18 Oct 2019 11:43:25 -0700 Subject: [PATCH 068/121] Address warnings and comments from review --- libraries/animation/src/AnimSkeleton.cpp | 2 +- libraries/fbx/src/OBJSerializer.cpp | 2 +- .../src/model-networking/ModelCache.cpp | 4 ++-- libraries/render-utils/src/CauterizedModel.cpp | 8 +++----- libraries/render-utils/src/MeshPartPayload.cpp | 3 --- libraries/render-utils/src/MeshPartPayload.h | 2 +- libraries/render-utils/src/Model.cpp | 13 +++++-------- 7 files changed, 13 insertions(+), 21 deletions(-) diff --git a/libraries/animation/src/AnimSkeleton.cpp b/libraries/animation/src/AnimSkeleton.cpp index 9a27ba766a..3afa7b0e3e 100644 --- a/libraries/animation/src/AnimSkeleton.cpp +++ b/libraries/animation/src/AnimSkeleton.cpp @@ -34,7 +34,7 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { const auto& defor = hfmModel.skinDeformers[i]; std::vector dummyClustersList; - for (int j = 0; j < defor.clusters.size(); j++) { + for (int j = 0; j < (uint32_t) defor.clusters.size(); j++) { // cast into a non-const reference, so we can mutate the FBXCluster HFMCluster& cluster = const_cast(defor.clusters.at(j)); diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 47c88168b4..57fcf79aac 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -1009,7 +1009,7 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V } // GO over the shapes once more to assign hte material index correctly - for (int i = 0; i < hfmModel.shapes.size(); ++i) { + for (int i = 0; i < (uint32_t) hfmModel.shapes.size(); ++i) { auto foundMaterialIndex = materialNameToIndex.find(materialNamePerShape[i]); if (foundMaterialIndex != materialNameToIndex.end()) { hfmModel.shapes[i].material = foundMaterialIndex.value(); diff --git a/libraries/model-networking/src/model-networking/ModelCache.cpp b/libraries/model-networking/src/model-networking/ModelCache.cpp index 8b7db5957b..bb911c6914 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.cpp +++ b/libraries/model-networking/src/model-networking/ModelCache.cpp @@ -491,8 +491,8 @@ bool NetworkModel::areTexturesLoaded() const { } const std::shared_ptr NetworkModel::getShapeMaterial(int shapeID) const { - auto materialID = getHFMModel().shapes[shapeID].material; - if ((materialID >= 0) && (materialID < (int)_materials.size())) { + uint32_t materialID = getHFMModel().shapes[shapeID].material; + if (materialID < (uint32_t)_materials.size()) { return _materials[materialID]; } return nullptr; diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index 2576b16354..ca26b9739c 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -35,7 +35,7 @@ bool CauterizedModel::updateGeometry() { // initialize the cauterizedDeforemrStates as a copy of the standard deformerStates _cauterizeMeshStates.resize(_meshStates.size()); - for (int i = 0; i < _meshStates.size(); ++i) { + for (int i = 0; i < (int) _meshStates.size(); ++i) { _cauterizeMeshStates[i] = _meshStates[i]; } } @@ -45,8 +45,6 @@ bool CauterizedModel::updateGeometry() { void CauterizedModel::createRenderItemSet() { if (_isCauterized) { assert(isLoaded()); - const auto& meshes = _renderGeometry->getMeshes(); - // We should not have any existing renderItems if we enter this section of code Q_ASSERT(_modelMeshRenderItems.isEmpty()); @@ -67,7 +65,7 @@ void CauterizedModel::createRenderItemSet() { // Run through all of the meshes, and place them into their segregated, but unsorted buckets int shapeID = 0; const auto& shapes = _renderGeometry->getHFMModel().shapes; - for (shapeID; shapeID < shapes.size(); shapeID++) { + for (shapeID; shapeID < (int) shapes.size(); shapeID++) { const auto& shape = shapes[shapeID]; _modelMeshRenderItems << std::make_shared(shared_from_this(), shape.mesh, shape.meshPart, shapeID, transform); @@ -124,7 +122,7 @@ void CauterizedModel::updateClusterMatrices() { glm::vec4(0.0f, 0.0f, 0.0f, 1.0f)); auto cauterizeMatrix = _rig.getJointTransform(_rig.indexOfJoint("Neck")) * zeroScale; - for (int skinDeformerIndex = 0; skinDeformerIndex < _cauterizeMeshStates.size(); skinDeformerIndex++) { + for (int skinDeformerIndex = 0; skinDeformerIndex < (int) _cauterizeMeshStates.size(); skinDeformerIndex++) { Model::MeshState& nonCauterizedState = _meshStates[skinDeformerIndex]; Model::MeshState& state = _cauterizeMeshStates[skinDeformerIndex]; diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index 3f25d2ef80..6e3fe6ebec 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -210,11 +210,8 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in assert(shape.mesh == meshIndex); assert(shape.meshPart == partIndex); - bool useDualQuaternionSkinning = model->getUseDualQuaternionSkinning(); - auto& modelMesh = model->getNetworkModel()->getMeshes().at(_meshIndex); _meshNumVertices = (int)modelMesh->getNumVertices(); - // const Model::MeshState& state = model->getMeshState(_meshIndex); updateMeshPart(modelMesh, partIndex); diff --git a/libraries/render-utils/src/MeshPartPayload.h b/libraries/render-utils/src/MeshPartPayload.h index 5d351e90d4..b207bd9403 100644 --- a/libraries/render-utils/src/MeshPartPayload.h +++ b/libraries/render-utils/src/MeshPartPayload.h @@ -118,7 +118,7 @@ public: int _meshIndex; int _shapeID; - int _deformerIndex; + uint32_t _deformerIndex; bool _isSkinned{ false }; bool _isBlendShaped { false }; diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 39ae7e6a8f..20568635f0 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -324,7 +324,7 @@ bool Model::updateGeometry() { const auto& shapes = hfmModel.shapes; _shapeStates.resize(shapes.size()); - for (int s = 0; s < shapes.size(); ++s) { + for (uint32_t s = 0; s < (uint32_t) shapes.size(); ++s) { auto& shapeState = _shapeStates[s]; shapeState._jointIndex = shapes[s].joint; shapeState._meshIndex = shapes[s].mesh; @@ -334,7 +334,7 @@ bool Model::updateGeometry() { updateShapeStatesFromRig(); const auto& hfmSkinDeformers = hfmModel.skinDeformers; - for (int i = 0; i < hfmSkinDeformers.size(); i++) { + for (uint32_t i = 0; i < (uint32_t) hfmSkinDeformers.size(); i++) { const auto& dynT = hfmSkinDeformers[i]; MeshState state; state.clusterDualQuaternions.resize(dynT.clusters.size()); @@ -741,7 +741,7 @@ bool Model::replaceScriptableModelMeshPart(scriptable::ScriptableModelBasePointe auto itemID = _modelMeshRenderItemIDs[i]; auto& shape = _shapeStates[i]; // TODO: check to see if .partIndex matches too - if (shape._meshIndex == meshIndex) { + if (shape._meshIndex == (uint32_t) meshIndex) { transaction.updateItem(itemID, [=](ModelMeshPartPayload& data) { data.updateMeshPart(mesh, partIndex); }); @@ -1424,8 +1424,7 @@ void Model::updateClusterMatrices() { Transform clusterTransform; Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); - } - else { + } else { auto jointMatrix = _rig.getJointTransform(cbmov.jointIndex); glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } @@ -1476,7 +1475,6 @@ const render::ItemIDs& Model::fetchRenderItemIDs() const { void Model::createRenderItemSet() { assert(isLoaded()); - const auto& meshes = _renderGeometry->getMeshes(); // We should not have any existing renderItems if we enter this section of code Q_ASSERT(_modelMeshRenderItems.isEmpty()); @@ -1493,9 +1491,8 @@ void Model::createRenderItemSet() { offset.postTranslate(_offset); // Run through all of the meshes, and place them into their segregated, but unsorted buckets - int shapeID = 0; const auto& shapes = _renderGeometry->getHFMModel().shapes; - for (shapeID; shapeID < shapes.size(); shapeID++) { + for (uint32_t shapeID = 0; shapeID < shapes.size(); shapeID++) { const auto& shape = shapes[shapeID]; _modelMeshRenderItems << std::make_shared(shared_from_this(), shape.mesh, shape.meshPart, shapeID, transform); From ca164375f1b8d41ae5c41b21b500585602a82ffe Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 18 Oct 2019 11:56:43 -0700 Subject: [PATCH 069/121] New skinning for GLTF --- libraries/animation/src/AnimSkeleton.cpp | 8 +- libraries/fbx/src/GLTFSerializer.cpp | 176 ++++++++++++----------- libraries/fbx/src/GLTFSerializer.h | 2 +- 3 files changed, 97 insertions(+), 89 deletions(-) diff --git a/libraries/animation/src/AnimSkeleton.cpp b/libraries/animation/src/AnimSkeleton.cpp index bae1fb5b69..0a7881abd8 100644 --- a/libraries/animation/src/AnimSkeleton.cpp +++ b/libraries/animation/src/AnimSkeleton.cpp @@ -20,13 +20,7 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { _geometryOffset = hfmModel.offset; - // convert to std::vector of joints - std::vector joints; - joints.reserve(hfmModel.joints.size()); - for (auto& joint : hfmModel.joints) { - joints.push_back(joint); - } - buildSkeletonFromJoints(joints, hfmModel.jointRotationOffsets); + buildSkeletonFromJoints(hfmModel.joints, hfmModel.jointRotationOffsets); // we make a copy of the inverseBindMatrices in order to prevent mutating the model bind pose // when we are dealing with a joint offset in the model diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index da48c3d2e3..ea31f74312 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1014,6 +1014,9 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& const auto& parentJoint = hfmModel.joints[(size_t)joint.parentIndex]; joint.transform = parentJoint.transform * joint.transform; joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; + } else { + joint.transform = hfmModel.offset * joint.transform; + joint.globalTransform = hfmModel.offset * joint.globalTransform; } joint.name = node.name; @@ -1034,6 +1037,9 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& std::vector globalBindTransforms; jointInverseBindTransforms.resize(numNodes); globalBindTransforms.resize(numNodes); + // Lookup between the GLTF mesh and the skin + std::vector gltfMeshToSkin; + gltfMeshToSkin.resize(_file.meshes.size(), -1); hfmModel.hasSkeletonJoints = !_file.skins.isEmpty(); if (hfmModel.hasSkeletonJoints) { @@ -1042,7 +1048,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& for (int jointIndex = 0; jointIndex < numNodes; ++jointIndex) { int nodeIndex = jointIndex; - auto joint = hfmModel.joints[jointIndex]; + auto& joint = hfmModel.joints[jointIndex]; for (int s = 0; s < _file.skins.size(); ++s) { const auto& skin = _file.skins[s]; @@ -1068,7 +1074,41 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * glm::inverse(jointInverseBindTransforms[jointIndex])); hfmModel.bindExtents.addPoint(bindTranslation); } - hfmModel.joints[jointIndex] = joint; + } + + std::vector skinToRootJoint; + skinToRootJoint.resize(_file.skins.size(), 0); + for (int jointIndex = 0; jointIndex < numNodes; ++jointIndex) { + const auto& node = _file.nodes[jointIndex]; + if (node.skin != -1) { + skinToRootJoint[node.skin] = jointIndex; + if (node.mesh != -1) { + gltfMeshToSkin[node.mesh] = node.skin; + } + } + } + + for (int skinIndex = 0; skinIndex < _file.skins.size(); ++skinIndex) { + const auto& skin = _file.skins[skinIndex]; + hfmModel.skinDeformers.emplace_back(); + auto& skinDeformer = hfmModel.skinDeformers.back(); + + // Add the nodes being referred to for skinning + for (int skinJointIndex : skin.joints) { + hfm::Cluster cluster; + cluster.jointIndex = skinJointIndex; + cluster.inverseBindMatrix = jointInverseBindTransforms[skinJointIndex]; + cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); + skinDeformer.clusters.push_back(cluster); + } + + // Always append a cluster referring to the root joint at the end + int rootJointIndex = skinToRootJoint[skinIndex]; + hfm::Cluster root; + root.jointIndex = rootJointIndex; + root.inverseBindMatrix = jointInverseBindTransforms[root.jointIndex]; + root.inverseBindTransform = Transform(root.inverseBindMatrix); + skinDeformer.clusters.push_back(root); } } @@ -1095,30 +1135,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& templateShapePerPrimPerGLTFMesh.emplace_back(); std::vector& templateShapePerPrim = templateShapePerPrimPerGLTFMesh.back(); - // TODO: Rewrite GLTF skinning definition - if (!hfmModel.hasSkeletonJoints) { - HFMCluster cluster; -#if 0 - cluster.jointIndex = nodeIndex; -#endif - cluster.inverseBindMatrix = glm::mat4(); - cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); - meshPtr->clusters.append(cluster); - } else { // skinned model - for (int j = 0; j < numNodes; ++j) { - HFMCluster cluster; - cluster.jointIndex = j; - cluster.inverseBindMatrix = jointInverseBindTransforms[j]; - cluster.inverseBindTransform = Transform(cluster.inverseBindMatrix); - meshPtr->clusters.append(cluster); - } - } - HFMCluster root; - root.jointIndex = 0; - root.inverseBindMatrix = jointInverseBindTransforms[root.jointIndex]; - root.inverseBindTransform = Transform(root.inverseBindMatrix); - meshPtr->clusters.append(root); - QSet primitiveAttributes; if (!gltfMesh.primitives.empty()) { for (const auto& attribute : gltfMesh.primitives[0].attributes.values.keys()) { @@ -1333,35 +1349,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } } - if (joints.size() == partVerticesCount * jointStride) { - for (int n = 0; n < joints.size(); n += jointStride) { - clusterJoints.push_back(joints[n]); - if (jointStride > 1) { - clusterJoints.push_back(joints[n + 1]); - if (jointStride > 2) { - clusterJoints.push_back(joints[n + 2]); - if (jointStride > 3) { - clusterJoints.push_back(joints[n + 3]); - } else { - clusterJoints.push_back(0); - } - } else { - clusterJoints.push_back(0); - clusterJoints.push_back(0); - } - } else { - clusterJoints.push_back(0); - clusterJoints.push_back(0); - clusterJoints.push_back(0); - } - } - } else if (primitiveAttributes.contains("JOINTS_0")) { - for (int i = 0; i < partVerticesCount; ++i) { - for (int j = 0; j < 4; ++j) { - clusterJoints.push_back(0); - } - } - } + const int WEIGHTS_PER_VERTEX = 4; if (weights.size() == partVerticesCount * weightStride) { for (int n = 0; n < weights.size(); n += weightStride) { @@ -1388,40 +1376,65 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } else if (primitiveAttributes.contains("WEIGHTS_0")) { for (int i = 0; i < partVerticesCount; ++i) { clusterWeights.push_back(1.0f); - for (int j = 0; j < 4; ++j) { + for (int j = 0; j < WEIGHTS_PER_VERTEX; ++j) { clusterWeights.push_back(0.0f); } } } - // Build weights (adapted from FBXSerializer.cpp) - if (hfmModel.hasSkeletonJoints) { - const int WEIGHTS_PER_VERTEX = 4; - const float ALMOST_HALF = 0.499f; - int numVertices = mesh.vertices.size() - prevMeshVerticesCount; - - // Append new cluster indices and weights for this mesh part - size_t prevMeshClusterWeightCount = mesh.clusterWeights.size(); - for (int i = 0; i < numVertices * WEIGHTS_PER_VERTEX; ++i) { - mesh.clusterIndices.push_back(mesh.clusters.size() - 1); - mesh.clusterWeights.push_back(0); + // Compress floating point weights to uint16_t for graphics runtime + // TODO: If the GLTF skinning weights are already in integer format, we should just copy the data + if (!clusterWeights.empty()) { + size_t numWeights = 4 * (mesh.vertices.size() - (uint32_t)prevMeshVerticesCount); + size_t newWeightsStart = mesh.clusterWeights.size(); + size_t newWeightsEnd = newWeightsStart + numWeights; + mesh.clusterWeights.reserve(newWeightsEnd); + for (int weightIndex = 0; weightIndex < clusterWeights.size(); ++weightIndex) { + // Per the GLTF specification + uint16_t weight = std::round(clusterWeights[weightIndex] * 65535.0); + mesh.clusterWeights.push_back(weight); } + } - // normalize and compress to 16-bits - for (int i = 0; i < numVertices; ++i) { - int j = i * WEIGHTS_PER_VERTEX; - - float totalWeight = 0.0f; - for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { - totalWeight += clusterWeights[k]; - } - if (totalWeight > 0.0f) { - float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; - for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { - mesh.clusterWeights[prevMeshClusterWeightCount + k] = (uint16_t)(weightScalingFactor * clusterWeights[k] + ALMOST_HALF); + if (joints.size() == partVerticesCount * jointStride) { + for (int n = 0; n < joints.size(); n += jointStride) { + mesh.clusterIndices.push_back(joints[n]); + if (jointStride > 1) { + mesh.clusterIndices.push_back(joints[n + 1]); + if (jointStride > 2) { + mesh.clusterIndices.push_back(joints[n + 2]); + if (jointStride > 3) { + mesh.clusterIndices.push_back(joints[n + 3]); + } else { + mesh.clusterIndices.push_back(0); + } + } else { + mesh.clusterIndices.push_back(0); + mesh.clusterIndices.push_back(0); } } else { - mesh.clusterWeights[prevMeshClusterWeightCount + j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); + mesh.clusterIndices.push_back(0); + mesh.clusterIndices.push_back(0); + mesh.clusterIndices.push_back(0); + } + } + } else if (primitiveAttributes.contains("JOINTS_0")) { + for (int i = 0; i < partVerticesCount; ++i) { + for (int j = 0; j < 4; ++j) { + mesh.clusterIndices.push_back(0); + } + } + } + + if (!mesh.clusterIndices.empty()) { + int skinIndex = gltfMeshToSkin[gltfMeshIndex]; + if (skinIndex != -1) { + const auto& deformer = hfmModel.skinDeformers[(size_t)skinIndex]; + std::vector oldToNew; + oldToNew.resize(_file.nodes.size()); + for (uint16_t clusterIndex = 0; clusterIndex < deformer.clusters.size() - 1; ++clusterIndex) { + const auto& cluster = deformer.clusters[clusterIndex]; + oldToNew[(size_t)cluster.jointIndex] = clusterIndex; } } } @@ -1523,8 +1536,9 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& const auto& templateShape = templateShapePerPrim[primIndex]; hfmModel.shapes.push_back(templateShape); auto& hfmShape = hfmModel.shapes.back(); - // Everything else is already defined (mesh, meshPart, material), so just define the new transform + // Everything else is already defined (mesh, meshPart, material), so just define the new transform and deformer if present hfmShape.joint = nodeIndex; + hfmShape.skinDeformer = node.skin != -1 ? node.skin : hfm::UNDEFINED_KEY; } } diff --git a/libraries/fbx/src/GLTFSerializer.h b/libraries/fbx/src/GLTFSerializer.h index 78dc9b9a37..edecde6985 100755 --- a/libraries/fbx/src/GLTFSerializer.h +++ b/libraries/fbx/src/GLTFSerializer.h @@ -46,7 +46,7 @@ struct GLTFNode { QVector scale; QVector matrix; glm::mat4 transform; - int skin; + int skin { -1 }; QVector skeletons; QString jointName; QMap defined; From d4970525053e3a60a5b10933c3edf55cf9397e2e Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 23 Oct 2019 17:13:34 -0700 Subject: [PATCH 070/121] Fix GLTF claiming to have no cluster weights when it is skinned --- libraries/fbx/src/GLTFSerializer.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index ea31f74312..021821befd 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1394,6 +1394,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& uint16_t weight = std::round(clusterWeights[weightIndex] * 65535.0); mesh.clusterWeights.push_back(weight); } + mesh.clusterWeightsPerVertex = WEIGHTS_PER_VERTEX; } if (joints.size() == partVerticesCount * jointStride) { From 1fe1321b6ca8b584e830ab7ec700b9312a3dca90 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 24 Oct 2019 10:53:46 -0700 Subject: [PATCH 071/121] Disable GLTF skinning --- libraries/fbx/src/GLTFSerializer.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 021821befd..456bad9346 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1543,6 +1543,24 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } } + // TODO: Fix skinning and remove this workaround which disables skinning + { + std::vector meshToRootJoint; + meshToRootJoint.resize(hfmModel.meshes.size(), -1); + std::vector meshToClusterSize; + meshToClusterSize.resize(hfmModel.meshes.size()); + for (auto& shape : hfmModel.shapes) { + shape.skinDeformer = hfm::UNDEFINED_KEY; + } + + for (auto& mesh : hfmModel.meshes) { + mesh.clusterWeights.clear(); + mesh.clusterIndices.clear(); + mesh.clusterWeightsPerVertex = 0; + } + + } + return true; } From 83229db45859c7a2a75b153c098f79400225fd98 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 24 Oct 2019 14:59:09 -0700 Subject: [PATCH 072/121] Fix GLTFSerializer not reading tangents --- libraries/fbx/src/GLTFSerializer.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 456bad9346..16d2962ecc 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1315,8 +1315,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& float tanW = tangentStride == 4 ? tangents[n + 3] : 1; mesh.tangents.push_back(glm::vec3(tanW * tangents[n], tangents[n + 1], tanW * tangents[n + 2])); } - } else if (primitiveAttributes.contains("TANGENT")) { - mesh.tangents.resize(mesh.tangents.size() + partVerticesCount); } if (texcoords.size() == partVerticesCount * TEX_COORD_STRIDE) { @@ -1874,7 +1872,6 @@ bool GLTFSerializer::addArrayFromAttribute(GLTFVertexAttribute::Value vertexAttr qWarning(modelformat) << "Invalid accessor type on glTF TANGENT data for model " << _url; return false; } - break; if (!addArrayFromAccessor(accessor, outarray)) { qWarning(modelformat) << "There was a problem reading glTF TANGENT data for model " << _url; From 4e0db5d6414e621c36ee3e4e8172dadb7dad51b6 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 24 Oct 2019 15:45:06 -0700 Subject: [PATCH 073/121] Fix build warnings --- libraries/animation/src/AnimSkeleton.cpp | 2 +- libraries/fbx/src/GLTFSerializer.cpp | 3 ++- libraries/fbx/src/OBJSerializer.cpp | 4 ++-- libraries/render-utils/src/CauterizedModel.cpp | 3 +-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libraries/animation/src/AnimSkeleton.cpp b/libraries/animation/src/AnimSkeleton.cpp index 5074ac0776..b60fc42f89 100644 --- a/libraries/animation/src/AnimSkeleton.cpp +++ b/libraries/animation/src/AnimSkeleton.cpp @@ -28,7 +28,7 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { const auto& defor = hfmModel.skinDeformers[i]; std::vector dummyClustersList; - for (int j = 0; j < (uint32_t) defor.clusters.size(); j++) { + for (uint32_t j = 0; j < (uint32_t)defor.clusters.size(); j++) { // cast into a non-const reference, so we can mutate the FBXCluster HFMCluster& cluster = const_cast(defor.clusters.at(j)); diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 16d2962ecc..115e7e0ca7 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1389,7 +1389,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& mesh.clusterWeights.reserve(newWeightsEnd); for (int weightIndex = 0; weightIndex < clusterWeights.size(); ++weightIndex) { // Per the GLTF specification - uint16_t weight = std::round(clusterWeights[weightIndex] * 65535.0); + uint16_t weight = std::round(clusterWeights[weightIndex] * 65535.0f); mesh.clusterWeights.push_back(weight); } mesh.clusterWeightsPerVertex = WEIGHTS_PER_VERTEX; @@ -1542,6 +1542,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& } // TODO: Fix skinning and remove this workaround which disables skinning + // TODO: Restore after testing { std::vector meshToRootJoint; meshToRootJoint.resize(hfmModel.meshes.size(), -1); diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 57fcf79aac..445c259650 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -1008,8 +1008,8 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V modelMaterial->setOpacity(hfmMaterial.opacity); } - // GO over the shapes once more to assign hte material index correctly - for (int i = 0; i < (uint32_t) hfmModel.shapes.size(); ++i) { + // GO over the shapes once more to assign the material index correctly + for (uint32_t i = 0; i < (uint32_t)hfmModel.shapes.size(); ++i) { auto foundMaterialIndex = materialNameToIndex.find(materialNamePerShape[i]); if (foundMaterialIndex != materialNameToIndex.end()) { hfmModel.shapes[i].material = foundMaterialIndex.value(); diff --git a/libraries/render-utils/src/CauterizedModel.cpp b/libraries/render-utils/src/CauterizedModel.cpp index ca26b9739c..69710b2ed1 100644 --- a/libraries/render-utils/src/CauterizedModel.cpp +++ b/libraries/render-utils/src/CauterizedModel.cpp @@ -63,9 +63,8 @@ void CauterizedModel::createRenderItemSet() { Transform::mult(transform, transform, offset); // Run through all of the meshes, and place them into their segregated, but unsorted buckets - int shapeID = 0; const auto& shapes = _renderGeometry->getHFMModel().shapes; - for (shapeID; shapeID < (int) shapes.size(); shapeID++) { + for (int shapeID = 0; shapeID < (int) shapes.size(); shapeID++) { const auto& shape = shapes[shapeID]; _modelMeshRenderItems << std::make_shared(shared_from_this(), shape.mesh, shape.meshPart, shapeID, transform); From fa51ec3dafa766734e77e0a1aa7733fbe601e7c0 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 24 Oct 2019 18:02:19 -0700 Subject: [PATCH 074/121] (WIP) (has debug) Fix baked FBX materials --- libraries/fbx/src/FBXSerializer.cpp | 5 +++-- libraries/fbx/src/FBXSerializer_Mesh.cpp | 14 ++++---------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 2c03c3c3ae..8c1204d783 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1562,7 +1562,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } // For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart if (!extracted.materialIDPerMeshPart.empty()) { - /* if (partShapes.size() == extracted.materialIDPerMeshPart.size()) { + // TODO: Verify this code works as intended by testing baked FBX models, then remove the verification/debug + if (partShapes.size() == extracted.materialIDPerMeshPart.size()) { for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) { hfm::Shape& shape = partShapes[i]; const std::string& materialID = extracted.materialIDPerMeshPart[i]; @@ -1576,7 +1577,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const qCDebug(modelformat) << "mesh.parts[" << p <<"] is " << mesh.parts[p].materialID; } qCDebug(modelformat) << "partShapes is not the same size as materialIDPerMeshPart ?"; - }*/ + } } // find the clusters with which the mesh is associated diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index 37f2c9ec1b..e9884f3087 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -369,11 +369,6 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } } - if (dracoMeshNodeVersion >= 2) { - // Define the materialIDs now - data.extracted.materialIDPerMeshPart = dracoMaterialList; - } - // load the draco mesh from the FBX and create a draco::Mesh draco::Decoder decoder; draco::DecoderBuffer decodedBuffer; @@ -491,15 +486,14 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me // grab or setup the HFMMeshPart for the part this face belongs to int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { - data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); + data.extracted.mesh.parts.emplace_back(); HFMMeshPart& part = data.extracted.mesh.parts.back(); // Figure out if this is the older way of defining the per-part material for baked FBX if (dracoMeshNodeVersion >= 2) { - // Define the materialID now - if (materialID < dracoMaterialList.size()) { - part.materialID = QString(dracoMaterialList[materialID].c_str()); - } + // Define the materialID for this mesh part index + uint16_t safeMaterialID = materialID < dracoMaterialList.size() ? materialID : 0; + data.extracted.materialIDPerMeshPart.push_back(dracoMaterialList[materialID].c_str()); } else { // Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap data.extracted.partMaterialTextures.append(materialTexture); From 77dcad21db62f70cd7d4932393981d9601eb881f Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 25 Oct 2019 09:14:12 -0700 Subject: [PATCH 075/121] Fix OBJSerializer crash with empty groupMaterialName --- libraries/fbx/src/OBJSerializer.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 445c259650..6f3f67a3bf 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -741,8 +741,8 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V needsMaterialLibrary = groupMaterialName != SMART_DEFAULT_MATERIAL_NAME; } materials[groupMaterialName] = material; - materialNamePerShape.push_back(groupMaterialName); } + materialNamePerShape.push_back(groupMaterialName); hfm::Shape shape; @@ -1010,9 +1010,12 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V // GO over the shapes once more to assign the material index correctly for (uint32_t i = 0; i < (uint32_t)hfmModel.shapes.size(); ++i) { - auto foundMaterialIndex = materialNameToIndex.find(materialNamePerShape[i]); - if (foundMaterialIndex != materialNameToIndex.end()) { - hfmModel.shapes[i].material = foundMaterialIndex.value(); + const auto& materialName = materialNamePerShape[i]; + if (!materialName.isEmpty()) { + auto foundMaterialIndex = materialNameToIndex.find(materialName); + if (foundMaterialIndex != materialNameToIndex.end()) { + hfmModel.shapes[i].material = foundMaterialIndex.value(); + } } } From 3de61b4a183bea0c8fd3adb703a791d4d60f884b Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 28 Oct 2019 10:59:44 -0700 Subject: [PATCH 076/121] Properly calculate shape extents for FBX aside from geometricTransform/upAxis, speed up model extents calculation --- libraries/fbx/src/FBXSerializer.cpp | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 8c1204d783..39531261c0 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1492,15 +1492,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } const uint32_t transformIndex = (indexOfModelID == -1) ? 0 : (uint32_t)indexOfModelID; - // accumulate local transforms - glm::mat4 globalTransform = hfmModel.joints[transformIndex].globalTransform; - // compute the mesh extents from the transformed vertices - for (const glm::vec3& vertex : mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); - hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex); - hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex); - } - // partShapes will be added to meshShapes at the very end std::vector partShapes { mesh.parts.size() }; for (uint32_t i = 0; i < (uint32_t)partShapes.size(); ++i) { @@ -1508,6 +1499,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const shape.mesh = meshIndex; shape.meshPart = i; shape.joint = transformIndex; + + hfm::calculateExtentsForShape(shape, hfmModel.meshes, hfmModel.joints); auto matName = mesh.parts[i].materialID; auto materialIt = materialNameToID.find(matName.toStdString()); @@ -1516,14 +1509,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } else { qCDebug(modelformat) << "Unknown material ? " << matName; } - - shape.transformedExtents.reset(); - // compute the shape extents from the transformed vertices - for (const glm::vec3& vertex : mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); - shape.transformedExtents.minimum = glm::min(shape.transformedExtents.minimum, transformedVertex); - shape.transformedExtents.maximum = glm::max(shape.transformedExtents.maximum, transformedVertex); - } } // For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures @@ -1610,6 +1595,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfmCluster.jointIndex = (uint32_t)indexOfJointID; } + const glm::mat4 globalTransform = hfmModel.joints[transformIndex].globalTransform; hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * globalTransform; // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and @@ -1718,6 +1704,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } + // TODO: The ordering of shape extent calculations is wrong. The entire mesh vertex set is transformed if there is a geometric offset, which would break instancing for FBX models with a geometricOffset. + hfm::calculateExtentsForModel(hfmModel.meshExtents, hfmModel.shapes); + if (applyUpAxisZRotation) { hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation)); hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation)); From 9fd76cac843c9e25d8cd5cacc0685433a3ef6767 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 28 Oct 2019 11:50:53 -0700 Subject: [PATCH 077/121] Fix FBX UpAxis transform not being applied to skinned models --- libraries/fbx/src/FBXSerializer.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 39531261c0..50580938e8 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1312,6 +1312,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.preTransform = fbxModel.preTransform; joint.preRotation = fbxModel.preRotation; joint.rotation = fbxModel.rotation; + glm::quat rotationWithoutUpZAxis = fbxModel.rotation; joint.postRotation = fbxModel.postRotation; joint.postTransform = fbxModel.postTransform; joint.rotationMin = fbxModel.rotationMin; @@ -1379,7 +1380,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // Now that we've initialized the joint, we can define the transform // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; - joint.globalTransform = joint.localTransform; + if (applyUpAxisZRotation) { + joint.globalTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * rotationWithoutUpZAxis * joint.postRotation) * joint.postTransform; + } else { + joint.globalTransform = joint.localTransform; + } if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; // SG Change: i think this not correct and the [parent]*[local] is the correct answer here From 993aa06301e2f559a8b13b298ace72d691cfa1d5 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 28 Oct 2019 13:05:27 -0700 Subject: [PATCH 078/121] Let hfm prep calculate OBJ model extents --- libraries/fbx/src/OBJSerializer.cpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 6f3f67a3bf..62c81fe360 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -675,7 +675,6 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V _url = url; bool combineParts = mapping.value("combineParts").toBool(); - hfmModel.meshExtents.reset(); hfmModel.meshes.push_back(HFMMesh()); std::vector materialNamePerShape; @@ -826,12 +825,6 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V } } } - - mesh.meshExtents.reset(); - foreach(const glm::vec3& vertex, mesh.vertices) { - mesh.meshExtents.addPoint(vertex); - hfmModel.meshExtents.addPoint(vertex); - } // hfmDebugDump(hfmModel); } catch(const std::exception& e) { From d961c4a8927aa0c1c013ec3e5171ddbd643c8a72 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 28 Oct 2019 15:32:45 -0700 Subject: [PATCH 079/121] Use safeMaterialID in FBXSerializer_Mesh.cpp --- libraries/fbx/src/FBXSerializer_Mesh.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index e9884f3087..51104ee74f 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -493,7 +493,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me if (dracoMeshNodeVersion >= 2) { // Define the materialID for this mesh part index uint16_t safeMaterialID = materialID < dracoMaterialList.size() ? materialID : 0; - data.extracted.materialIDPerMeshPart.push_back(dracoMaterialList[materialID].c_str()); + data.extracted.materialIDPerMeshPart.push_back(dracoMaterialList[safeMaterialID].c_str()); } else { // Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap data.extracted.partMaterialTextures.append(materialTexture); From f8017d28ff7d6e3dbb5be2ab85acd07691948709 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 29 Oct 2019 09:20:17 -0700 Subject: [PATCH 080/121] Separate cluster-related transform from joint.globalTransform --- libraries/fbx/src/FBXSerializer.cpp | 36 ++++++++++++++--------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 50580938e8..697cd090ca 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1302,6 +1302,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const bool needMixamoHack = hfmModel.applicationName == "mixamo.com"; + std::vector globalTransformForClusters; + globalTransformForClusters.reserve((size_t)modelIDs.size()); for (const QString& modelID : modelIDs) { const FBXModel& fbxModel = fbxModels[modelID]; HFMJoint joint; @@ -1312,7 +1314,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.preTransform = fbxModel.preTransform; joint.preRotation = fbxModel.preRotation; joint.rotation = fbxModel.rotation; - glm::quat rotationWithoutUpZAxis = fbxModel.rotation; joint.postRotation = fbxModel.postRotation; joint.postTransform = fbxModel.postTransform; joint.rotationMin = fbxModel.rotationMin; @@ -1324,6 +1325,19 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.geometricScaling = fbxModel.geometricScaling; joint.isSkeletonJoint = fbxModel.isLimbNode; hfmModel.hasSkeletonJoints = (hfmModel.hasSkeletonJoints || joint.isSkeletonJoint); + + glm::quat jointBindCombinedRotation = joint.preRotation * joint.rotation * joint.postRotation; + glm::mat4 globalTransformForCluster = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(jointBindCombinedRotation) * joint.postTransform; + if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { + const glm::mat4& parentGlobalTransformForCluster = globalTransformForClusters[joint.parentIndex]; + globalTransformForCluster = parentGlobalTransformForCluster * globalTransformForCluster; + } + if (joint.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); + globalTransformForCluster = globalTransformForCluster * geometricOffset; + } + globalTransformForClusters.push_back(globalTransformForCluster); + if (applyUpAxisZRotation && joint.parentIndex == -1) { joint.rotation *= upAxisZRotation; joint.translation = upAxisZRotation * joint.translation; @@ -1380,11 +1394,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // Now that we've initialized the joint, we can define the transform // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; - if (applyUpAxisZRotation) { - joint.globalTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * rotationWithoutUpZAxis * joint.postRotation) * joint.postTransform; - } else { - joint.globalTransform = joint.localTransform; - } + joint.globalTransform = joint.localTransform; if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; // SG Change: i think this not correct and the [parent]*[local] is the correct answer here @@ -1600,8 +1610,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfmCluster.jointIndex = (uint32_t)indexOfJointID; } - const glm::mat4 globalTransform = hfmModel.joints[transformIndex].globalTransform; - hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * globalTransform; + const glm::mat4& jointBindTransform = globalTransformForClusters[transformIndex]; + hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * jointBindTransform; // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and // sometimes floating point fuzz can be introduced after the inverse. @@ -1712,16 +1722,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // TODO: The ordering of shape extent calculations is wrong. The entire mesh vertex set is transformed if there is a geometric offset, which would break instancing for FBX models with a geometricOffset. hfm::calculateExtentsForModel(hfmModel.meshExtents, hfmModel.shapes); - if (applyUpAxisZRotation) { - hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation)); - hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation)); - for (auto& shape : hfmModelPtr->shapes) { - shape.transformedExtents.transform(glm::mat4_cast(upAxisZRotation)); - } - for (auto& joint : hfmModelPtr->joints) { - joint.globalTransform = joint.globalTransform * glm::mat4_cast(upAxisZRotation); - } - } return hfmModelPtr; } From a70f0f27918e315cb295bb2171dcce22445f875c Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 29 Oct 2019 10:47:27 -0700 Subject: [PATCH 081/121] (WIP, has debug) Fix FBXSerializer applying geometric transform inconsistently, let HFM prep calculate FBX extents --- libraries/fbx/src/FBXSerializer.cpp | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 697cd090ca..d3722b2d50 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1326,6 +1326,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.isSkeletonJoint = fbxModel.isLimbNode; hfmModel.hasSkeletonJoints = (hfmModel.hasSkeletonJoints || joint.isSkeletonJoint); + // First, calculate the FBX-specific transform used for inverse bind transform calculations + glm::quat jointBindCombinedRotation = joint.preRotation * joint.rotation * joint.postRotation; glm::mat4 globalTransformForCluster = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(jointBindCombinedRotation) * joint.postTransform; if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { @@ -1338,10 +1340,16 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } globalTransformForClusters.push_back(globalTransformForCluster); + // Then, calculate the transforms proper + if (applyUpAxisZRotation && joint.parentIndex == -1) { joint.rotation *= upAxisZRotation; joint.translation = upAxisZRotation * joint.translation; } + if (joint.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); + joint.postTransform *= geometricOffset; + } glm::quat combinedRotation = joint.preRotation * joint.rotation * joint.postRotation; if (joint.parentIndex == -1) { joint.transform = hfmModel.offset * glm::translate(joint.translation) * joint.preTransform * @@ -1405,12 +1413,15 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // However, we must be careful when modifying the behavior of FBXSerializer. // So, we leave this here, as a breakpoint for debugging, or stub for implementation. // qCDebug(modelformat) << "Geometric offset encountered on non-leaf node. jointIndex: " << jointIndex << ", modelURL: " << url; - // glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); - // globalTransform = globalTransform * glm::inverse(geometricOffset); + // glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); + // joint.preTransform = glm::inverse(parentGeometricOffset) * joint.preTransform; } } - if (joint.hasGeometricOffset) { + // TODO: Revert after testing + if (false) { + //if (joint.hasGeometricOffset) { glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); + joint.transform = joint.transform * geometricOffset; joint.globalTransform = joint.globalTransform * geometricOffset; } @@ -1514,8 +1525,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const shape.mesh = meshIndex; shape.meshPart = i; shape.joint = transformIndex; - - hfm::calculateExtentsForShape(shape, hfmModel.meshes, hfmModel.joints); auto matName = mesh.parts[i].materialID; auto materialIt = materialNameToID.find(matName.toStdString()); @@ -1687,12 +1696,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const HFMJoint& joint = hfmModel.joints[transformIndex]; // Apply geometric offset, if present, by transforming the vertices directly - if (joint.hasGeometricOffset) { + /*if (joint.hasGeometricOffset) { glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); for (int i = 0; i < mesh.vertices.size(); i++) { mesh.vertices[i] = transformPoint(geometricOffset, mesh.vertices[i]); } - } + }*/ } // Store the parts for this mesh (or instance of this mesh, as the case may be) @@ -1719,9 +1728,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } - // TODO: The ordering of shape extent calculations is wrong. The entire mesh vertex set is transformed if there is a geometric offset, which would break instancing for FBX models with a geometricOffset. - hfm::calculateExtentsForModel(hfmModel.meshExtents, hfmModel.shapes); - return hfmModelPtr; } From 8686dcac134bff46fe7afda8efee64af89750831 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 29 Oct 2019 11:25:20 -0700 Subject: [PATCH 082/121] Remove FBXSerializer debug --- libraries/fbx/src/FBXSerializer.cpp | 61 ++++------------------------- 1 file changed, 7 insertions(+), 54 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index d3722b2d50..d8d687f7ff 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1405,8 +1405,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.globalTransform = joint.localTransform; if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; - // SG Change: i think this not correct and the [parent]*[local] is the correct answer here - //joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; joint.globalTransform = parentJoint.globalTransform * joint.localTransform; if (parentJoint.hasGeometricOffset) { // Per the FBX standard, geometric offset should not propagate to children. @@ -1417,28 +1415,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // joint.preTransform = glm::inverse(parentGeometricOffset) * joint.preTransform; } } - // TODO: Revert after testing - if (false) { - //if (joint.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); - joint.transform = joint.transform * geometricOffset; - joint.globalTransform = joint.globalTransform * geometricOffset; - } - - // TODO: Remove these lines, just here to make sure we are not breaking the transform computation - // QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key()); - glm::mat4 anotherModelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url); - auto col0 = (glm::epsilonNotEqual(anotherModelTransform[0], joint.globalTransform[0], 0.001f)); - auto col1 = (glm::epsilonNotEqual(anotherModelTransform[1], joint.globalTransform[1], 0.001f)); - auto col2 = (glm::epsilonNotEqual(anotherModelTransform[2], joint.globalTransform[2], 0.001f)); - auto col3 = (glm::epsilonNotEqual(anotherModelTransform[3], joint.globalTransform[3], 0.001f)); - if ( glm::any(col0) - || glm::any(col1) - || glm::any(col2) - || glm::any(col3)) { - anotherModelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url); - // joint.globalTransform = anotherModelTransform; - } hfmModel.joints.push_back(joint); } @@ -1504,12 +1480,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // meshShapes will be added to hfmModel at the very end std::vector meshShapes; meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size()); - if (instanceModelIDs.size() > 1) { - qCDebug(modelformat) << "Mesh " << meshID << " made of " << mesh.parts.size() << " parts is instanced " << instanceModelIDs.size() << " times!!!"; - } - if (mesh.parts.size() < 1) { - qCDebug(modelformat) << "Mesh " << meshID << " made of " << mesh.parts.size() << " parts !!!!! "; - } for (const QString& modelID : instanceModelIDs) { // The transform node has the same indexing order as the joints int indexOfModelID = modelIDs.indexOf(modelID); @@ -1530,8 +1500,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const auto materialIt = materialNameToID.find(matName.toStdString()); if (materialIt != materialNameToID.end()) { shape.material = materialIt->second; - } else { - qCDebug(modelformat) << "Unknown material ? " << matName; } } @@ -1571,21 +1539,14 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } // For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart if (!extracted.materialIDPerMeshPart.empty()) { - // TODO: Verify this code works as intended by testing baked FBX models, then remove the verification/debug - if (partShapes.size() == extracted.materialIDPerMeshPart.size()) { - for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) { - hfm::Shape& shape = partShapes[i]; - const std::string& materialID = extracted.materialIDPerMeshPart[i]; - auto materialIt = materialNameToID.find(materialID); - if (materialIt != materialNameToID.end()) { - shape.material = materialIt->second; - } + assert(partShapes.size() == extracted.materialIDPerMeshPart.size()); + for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) { + hfm::Shape& shape = partShapes[i]; + const std::string& materialID = extracted.materialIDPerMeshPart[i]; + auto materialIt = materialNameToID.find(materialID); + if (materialIt != materialNameToID.end()) { + shape.material = materialIt->second; } - } else { - for (int p = 0; p < mesh.parts.size(); p++) { - qCDebug(modelformat) << "mesh.parts[" << p <<"] is " << mesh.parts[p].materialID; - } - qCDebug(modelformat) << "partShapes is not the same size as materialIDPerMeshPart ?"; } } @@ -1694,14 +1655,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } else { // this is a no cluster mesh HFMJoint& joint = hfmModel.joints[transformIndex]; - - // Apply geometric offset, if present, by transforming the vertices directly - /*if (joint.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); - for (int i = 0; i < mesh.vertices.size(); i++) { - mesh.vertices[i] = transformPoint(geometricOffset, mesh.vertices[i]); - } - }*/ } // Store the parts for this mesh (or instance of this mesh, as the case may be) From 27b30a964663702812a00ab8dbe42b6d9a00f7f2 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 29 Oct 2019 16:02:30 -0700 Subject: [PATCH 083/121] Move hfm debug dump to hfm::Model --- libraries/fbx/src/GLTFSerializer.cpp | 139 +--------------------- libraries/fbx/src/OBJSerializer.cpp | 74 ------------ libraries/fbx/src/OBJSerializer.h | 1 - libraries/hfm/src/hfm/HFM.cpp | 163 ++++++++++++++++++++++++++ libraries/hfm/src/hfm/HFM.h | 2 + libraries/hfm/src/hfm/HFMSerializer.h | 2 +- 6 files changed, 167 insertions(+), 214 deletions(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 115e7e0ca7..5bf1ea17ff 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -2073,144 +2073,7 @@ void GLTFSerializer::glTFDebugDump() { } void GLTFSerializer::hfmDebugDump(const HFMModel& hfmModel) { - qCDebug(modelformat) << "---------------- hfmModel ----------------"; - qCDebug(modelformat) << " hasSkeletonJoints =" << hfmModel.hasSkeletonJoints; - qCDebug(modelformat) << " offset =" << hfmModel.offset; - - qCDebug(modelformat) << " neckPivot = " << hfmModel.neckPivot; - - qCDebug(modelformat) << " bindExtents.size() = " << hfmModel.bindExtents.size(); - qCDebug(modelformat) << " meshExtents.size() = " << hfmModel.meshExtents.size(); - - qCDebug(modelformat) << " jointIndices.size() =" << hfmModel.jointIndices.size(); - qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.size(); - qCDebug(modelformat) << "---------------- Meshes ----------------"; - qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.size(); - qCDebug(modelformat) << " blendshapeChannelNames = " << hfmModel.blendshapeChannelNames; - for (const HFMMesh& mesh : hfmModel.meshes) { - qCDebug(modelformat) << "\n"; - qCDebug(modelformat) << " meshpointer =" << mesh._mesh.get(); - qCDebug(modelformat) << " meshindex =" << mesh.meshIndex; - qCDebug(modelformat) << " vertices.count() =" << mesh.vertices.size(); - qCDebug(modelformat) << " colors.count() =" << mesh.colors.count(); - qCDebug(modelformat) << " normals.count() =" << mesh.normals.size(); - qCDebug(modelformat) << " tangents.count() =" << mesh.tangents.size(); - qCDebug(modelformat) << " colors.count() =" << mesh.colors.count(); - qCDebug(modelformat) << " texCoords.count() =" << mesh.texCoords.count(); - qCDebug(modelformat) << " texCoords1.count() =" << mesh.texCoords1.count(); - //qCDebug(modelformat) << " clusterIndices.count() =" << mesh.clusterIndices.count(); - //qCDebug(modelformat) << " clusterWeights.count() =" << mesh.clusterWeights.count(); - //qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform; - qCDebug(modelformat) << " parts.count() =" << mesh.parts.size(); - qCDebug(modelformat) << "---------------- Meshes (blendshapes)--------"; - for (HFMBlendshape bshape : mesh.blendshapes) { - qCDebug(modelformat) << "\n"; - qCDebug(modelformat) << " bshape.indices.count() =" << bshape.indices.count(); - qCDebug(modelformat) << " bshape.vertices.count() =" << bshape.vertices.count(); - qCDebug(modelformat) << " bshape.normals.count() =" << bshape.normals.count(); - qCDebug(modelformat) << "\n"; - } - qCDebug(modelformat) << "---------------- Meshes (meshparts)--------"; - for (HFMMeshPart meshPart : mesh.parts) { - qCDebug(modelformat) << "\n"; - qCDebug(modelformat) << " quadIndices.count() =" << meshPart.quadIndices.count(); - qCDebug(modelformat) << " triangleIndices.count() =" << meshPart.triangleIndices.count(); - //qCDebug(modelformat) << " materialID =" << meshPart.materialID; - qCDebug(modelformat) << "\n"; - } - qCDebug(modelformat) << "---------------- Meshes (clusters)--------"; - //qCDebug(modelformat) << " clusters.count() =" << mesh.clusters.count(); - //for(HFMCluster cluster : mesh.clusters) { - // qCDebug(modelformat) << "\n"; - // qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex; - // qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix; - // qCDebug(modelformat) << "\n"; - //} - //qCDebug(modelformat) << "\n"; - } - qCDebug(modelformat) << "---------------- AnimationFrames ----------------"; - for (HFMAnimationFrame anim : hfmModel.animationFrames) { - qCDebug(modelformat) << " anim.translations = " << anim.translations; - qCDebug(modelformat) << " anim.rotations = " << anim.rotations; - } - QList mitomona_keys = hfmModel.meshIndicesToModelNames.keys(); - for (int key : mitomona_keys) { - qCDebug(modelformat) << " meshIndicesToModelNames key =" << key - << " val =" << hfmModel.meshIndicesToModelNames[key]; - } - - qCDebug(modelformat) << "---------------- Materials ----------------"; - - for (HFMMaterial mat : hfmModel.materials) { - qCDebug(modelformat) << "\n"; - qCDebug(modelformat) << " mat.materialID =" << mat.materialID; - qCDebug(modelformat) << " diffuseColor =" << mat.diffuseColor; - qCDebug(modelformat) << " diffuseFactor =" << mat.diffuseFactor; - qCDebug(modelformat) << " specularColor =" << mat.specularColor; - qCDebug(modelformat) << " specularFactor =" << mat.specularFactor; - qCDebug(modelformat) << " emissiveColor =" << mat.emissiveColor; - qCDebug(modelformat) << " emissiveFactor =" << mat.emissiveFactor; - qCDebug(modelformat) << " shininess =" << mat.shininess; - qCDebug(modelformat) << " opacity =" << mat.opacity; - qCDebug(modelformat) << " metallic =" << mat.metallic; - qCDebug(modelformat) << " roughness =" << mat.roughness; - qCDebug(modelformat) << " emissiveIntensity =" << mat.emissiveIntensity; - qCDebug(modelformat) << " ambientFactor =" << mat.ambientFactor; - - qCDebug(modelformat) << " materialID =" << mat.materialID; - qCDebug(modelformat) << " name =" << mat.name; - qCDebug(modelformat) << " shadingModel =" << mat.shadingModel; - qCDebug(modelformat) << " _material =" << mat._material.get(); - - qCDebug(modelformat) << " normalTexture =" << mat.normalTexture.filename; - qCDebug(modelformat) << " albedoTexture =" << mat.albedoTexture.filename; - qCDebug(modelformat) << " opacityTexture =" << mat.opacityTexture.filename; - - qCDebug(modelformat) << " lightmapParams =" << mat.lightmapParams; - - qCDebug(modelformat) << " isPBSMaterial =" << mat.isPBSMaterial; - qCDebug(modelformat) << " useNormalMap =" << mat.useNormalMap; - qCDebug(modelformat) << " useAlbedoMap =" << mat.useAlbedoMap; - qCDebug(modelformat) << " useOpacityMap =" << mat.useOpacityMap; - qCDebug(modelformat) << " useRoughnessMap =" << mat.useRoughnessMap; - qCDebug(modelformat) << " useSpecularMap =" << mat.useSpecularMap; - qCDebug(modelformat) << " useMetallicMap =" << mat.useMetallicMap; - qCDebug(modelformat) << " useEmissiveMap =" << mat.useEmissiveMap; - qCDebug(modelformat) << " useOcclusionMap =" << mat.useOcclusionMap; - qCDebug(modelformat) << "\n"; - } - - qCDebug(modelformat) << "---------------- Joints ----------------"; - - foreach (HFMJoint joint, hfmModel.joints) { - qCDebug(modelformat) << "\n"; - qCDebug(modelformat) << " shapeInfo.avgPoint =" << joint.shapeInfo.avgPoint; - qCDebug(modelformat) << " shapeInfo.debugLines =" << joint.shapeInfo.debugLines; - qCDebug(modelformat) << " shapeInfo.dots =" << joint.shapeInfo.dots; - qCDebug(modelformat) << " shapeInfo.points =" << joint.shapeInfo.points; - - qCDebug(modelformat) << " parentIndex" << joint.parentIndex; - qCDebug(modelformat) << " distanceToParent" << joint.distanceToParent; - qCDebug(modelformat) << " translation" << joint.translation; - qCDebug(modelformat) << " preTransform" << joint.preTransform; - qCDebug(modelformat) << " preRotation" << joint.preRotation; - qCDebug(modelformat) << " rotation" << joint.rotation; - qCDebug(modelformat) << " postRotation" << joint.postRotation; - qCDebug(modelformat) << " postTransform" << joint.postTransform; - qCDebug(modelformat) << " transform" << joint.transform; - qCDebug(modelformat) << " rotationMin" << joint.rotationMin; - qCDebug(modelformat) << " rotationMax" << joint.rotationMax; - qCDebug(modelformat) << " inverseDefaultRotation" << joint.inverseDefaultRotation; - qCDebug(modelformat) << " inverseBindRotation" << joint.inverseBindRotation; - qCDebug(modelformat) << " bindTransform" << joint.bindTransform; - qCDebug(modelformat) << " name" << joint.name; - qCDebug(modelformat) << " isSkeletonJoint" << joint.isSkeletonJoint; - qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.hasGeometricOffset; - qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricTranslation; - qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricRotation; - qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricScaling; - qCDebug(modelformat) << "\n"; - } + hfmModel.debugDump(); qCDebug(modelformat) << "---------------- GLTF Model ----------------"; glTFDebugDump(); diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 62c81fe360..31f92555f1 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -825,8 +825,6 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V } } } - - // hfmDebugDump(hfmModel); } catch(const std::exception& e) { qCDebug(modelformat) << "OBJSerializer fail: " << e.what(); } @@ -1014,75 +1012,3 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V return hfmModelPtr; } - -void hfmDebugDump(const HFMModel& hfmModel) { - qCDebug(modelformat) << "---------------- hfmModel ----------------"; - qCDebug(modelformat) << " hasSkeletonJoints =" << hfmModel.hasSkeletonJoints; - qCDebug(modelformat) << " offset =" << hfmModel.offset; - qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.size(); - foreach (HFMMesh mesh, hfmModel.meshes) { - qCDebug(modelformat) << " vertices.count() =" << mesh.vertices.count(); - qCDebug(modelformat) << " colors.count() =" << mesh.colors.count(); - qCDebug(modelformat) << " normals.count() =" << mesh.normals.count(); - /*if (mesh.normals.count() == mesh.vertices.count()) { - for (int i = 0; i < mesh.normals.count(); i++) { - qCDebug(modelformat) << " " << mesh.vertices[ i ] << mesh.normals[ i ]; - } - }*/ - qCDebug(modelformat) << " tangents.count() =" << mesh.tangents.count(); - qCDebug(modelformat) << " colors.count() =" << mesh.colors.count(); - qCDebug(modelformat) << " texCoords.count() =" << mesh.texCoords.count(); - qCDebug(modelformat) << " texCoords1.count() =" << mesh.texCoords1.count(); - qCDebug(modelformat) << " clusterIndices.size() =" << mesh.clusterIndices.size(); - qCDebug(modelformat) << " clusterWeights.size() =" << mesh.clusterWeights.size(); - qCDebug(modelformat) << " meshExtents =" << mesh.meshExtents; - qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform; - qCDebug(modelformat) << " parts.count() =" << mesh.parts.size(); - foreach (HFMMeshPart meshPart, mesh.parts) { - qCDebug(modelformat) << " quadIndices.count() =" << meshPart.quadIndices.count(); - qCDebug(modelformat) << " triangleIndices.count() =" << meshPart.triangleIndices.count(); - /* - qCDebug(modelformat) << " diffuseColor =" << meshPart.diffuseColor << "mat =" << meshPart._material->getDiffuse(); - qCDebug(modelformat) << " specularColor =" << meshPart.specularColor << "mat =" << meshPart._material->getMetallic(); - qCDebug(modelformat) << " emissiveColor =" << meshPart.emissiveColor << "mat =" << meshPart._material->getEmissive(); - qCDebug(modelformat) << " emissiveParams =" << meshPart.emissiveParams; - qCDebug(modelformat) << " gloss =" << meshPart.shininess << "mat =" << meshPart._material->getRoughness(); - qCDebug(modelformat) << " opacity =" << meshPart.opacity << "mat =" << meshPart._material->getOpacity(); - */ - qCDebug(modelformat) << " materialID =" << meshPart.materialID; - /* qCDebug(modelformat) << " diffuse texture =" << meshPart.diffuseTexture.filename; - qCDebug(modelformat) << " specular texture =" << meshPart.specularTexture.filename; - */ - } - qCDebug(modelformat) << " clusters.count() =" << mesh.clusters.count(); - foreach (HFMCluster cluster, mesh.clusters) { - qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex; - qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix; - } - } - - qCDebug(modelformat) << " jointIndices =" << hfmModel.jointIndices; - qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.size(); - - foreach (HFMJoint joint, hfmModel.joints) { - - qCDebug(modelformat) << " parentIndex" << joint.parentIndex; - qCDebug(modelformat) << " distanceToParent" << joint.distanceToParent; - qCDebug(modelformat) << " translation" << joint.translation; - qCDebug(modelformat) << " preTransform" << joint.preTransform; - qCDebug(modelformat) << " preRotation" << joint.preRotation; - qCDebug(modelformat) << " rotation" << joint.rotation; - qCDebug(modelformat) << " postRotation" << joint.postRotation; - qCDebug(modelformat) << " postTransform" << joint.postTransform; - qCDebug(modelformat) << " transform" << joint.transform; - qCDebug(modelformat) << " rotationMin" << joint.rotationMin; - qCDebug(modelformat) << " rotationMax" << joint.rotationMax; - qCDebug(modelformat) << " inverseDefaultRotation" << joint.inverseDefaultRotation; - qCDebug(modelformat) << " inverseBindRotation" << joint.inverseBindRotation; - qCDebug(modelformat) << " bindTransform" << joint.bindTransform; - qCDebug(modelformat) << " name" << joint.name; - qCDebug(modelformat) << " isSkeletonJoint" << joint.isSkeletonJoint; - } - - qCDebug(modelformat) << "\n"; -} diff --git a/libraries/fbx/src/OBJSerializer.h b/libraries/fbx/src/OBJSerializer.h index 6fdd95e2c3..462d32a119 100644 --- a/libraries/fbx/src/OBJSerializer.h +++ b/libraries/fbx/src/OBJSerializer.h @@ -120,6 +120,5 @@ private: // What are these utilities doing here? One is used by fbx loading code in VHACD Utils, and the other a general debugging utility. void setMeshPartDefaults(HFMMeshPart& meshPart, QString materialID); -void hfmDebugDump(const HFMModel& hfmModel); #endif // hifi_OBJSerializer_h diff --git a/libraries/hfm/src/hfm/HFM.cpp b/libraries/hfm/src/hfm/HFM.cpp index f68af2b1ce..5d57ef2c98 100644 --- a/libraries/hfm/src/hfm/HFM.cpp +++ b/libraries/hfm/src/hfm/HFM.cpp @@ -208,3 +208,166 @@ void HFMModel::computeKdops() { } } } + +void hfm::Model::debugDump() const { + qCDebug(modelformat) << "---------------- hfmModel ----------------"; + qCDebug(modelformat) << " hasSkeletonJoints =" << hasSkeletonJoints; + qCDebug(modelformat) << " offset =" << offset; + + qCDebug(modelformat) << " neckPivot = " << neckPivot; + + qCDebug(modelformat) << " bindExtents.size() = " << bindExtents.size(); + qCDebug(modelformat) << " meshExtents.size() = " << meshExtents.size(); + + qCDebug(modelformat) << "---------------- Shapes ----------------"; + qCDebug(modelformat) << " shapes.size() =" << shapes.size(); + for (const hfm::Shape& shape : shapes) { + qCDebug(modelformat) << "\n"; + qCDebug(modelformat) << " mesh =" << shape.mesh; + qCDebug(modelformat) << " meshPart =" << shape.meshPart; + qCDebug(modelformat) << " material =" << shape.material; + qCDebug(modelformat) << " joint =" << shape.joint; + qCDebug(modelformat) << " transformedExtents =" << shape.transformedExtents; + qCDebug(modelformat) << " skinDeformer =" << shape.skinDeformer; + } + + qCDebug(modelformat) << " jointIndices.size() =" << jointIndices.size(); + qCDebug(modelformat) << " joints.size() =" << joints.size(); + qCDebug(modelformat) << "---------------- Meshes ----------------"; + qCDebug(modelformat) << " meshes.size() =" << meshes.size(); + qCDebug(modelformat) << " blendshapeChannelNames = " << blendshapeChannelNames; + for (const HFMMesh& mesh : meshes) { + qCDebug(modelformat) << "\n"; + qCDebug(modelformat) << " meshpointer =" << mesh._mesh.get(); + qCDebug(modelformat) << " meshindex =" << mesh.meshIndex; + qCDebug(modelformat) << " vertices.size() =" << mesh.vertices.size(); + qCDebug(modelformat) << " colors.size() =" << mesh.colors.size(); + qCDebug(modelformat) << " normals.size() =" << mesh.normals.size(); + qCDebug(modelformat) << " tangents.size() =" << mesh.tangents.size(); + qCDebug(modelformat) << " colors.size() =" << mesh.colors.size(); + qCDebug(modelformat) << " texCoords.size() =" << mesh.texCoords.size(); + qCDebug(modelformat) << " texCoords1.size() =" << mesh.texCoords1.size(); + qCDebug(modelformat) << " clusterIndices.size() =" << mesh.clusterIndices.size(); + qCDebug(modelformat) << " clusterWeights.size() =" << mesh.clusterWeights.size(); + qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform; + qCDebug(modelformat) << " parts.size() =" << mesh.parts.size(); + qCDebug(modelformat) << "---------------- Meshes (blendshapes)--------"; + for (HFMBlendshape bshape : mesh.blendshapes) { + qCDebug(modelformat) << "\n"; + qCDebug(modelformat) << " bshape.indices.size() =" << bshape.indices.size(); + qCDebug(modelformat) << " bshape.vertices.size() =" << bshape.vertices.size(); + qCDebug(modelformat) << " bshape.normals.size() =" << bshape.normals.size(); + qCDebug(modelformat) << "\n"; + } + qCDebug(modelformat) << "---------------- Meshes (meshparts)--------"; + for (HFMMeshPart meshPart : mesh.parts) { + qCDebug(modelformat) << "\n"; + qCDebug(modelformat) << " quadIndices.size() =" << meshPart.quadIndices.size(); + qCDebug(modelformat) << " triangleIndices.size() =" << meshPart.triangleIndices.size(); + qCDebug(modelformat) << "\n"; + } + } + qCDebug(modelformat) << "---------------- AnimationFrames ----------------"; + for (HFMAnimationFrame anim : animationFrames) { + qCDebug(modelformat) << " anim.translations = " << anim.translations; + qCDebug(modelformat) << " anim.rotations = " << anim.rotations; + } + QList mitomona_keys = meshIndicesToModelNames.keys(); + for (int key : mitomona_keys) { + qCDebug(modelformat) << " meshIndicesToModelNames key =" << key + << " val =" << meshIndicesToModelNames[key]; + } + + qCDebug(modelformat) << "---------------- Materials ----------------"; + + for (HFMMaterial mat : materials) { + qCDebug(modelformat) << "\n"; + qCDebug(modelformat) << " mat.materialID =" << mat.materialID; + qCDebug(modelformat) << " diffuseColor =" << mat.diffuseColor; + qCDebug(modelformat) << " diffuseFactor =" << mat.diffuseFactor; + qCDebug(modelformat) << " specularColor =" << mat.specularColor; + qCDebug(modelformat) << " specularFactor =" << mat.specularFactor; + qCDebug(modelformat) << " emissiveColor =" << mat.emissiveColor; + qCDebug(modelformat) << " emissiveFactor =" << mat.emissiveFactor; + qCDebug(modelformat) << " shininess =" << mat.shininess; + qCDebug(modelformat) << " opacity =" << mat.opacity; + qCDebug(modelformat) << " metallic =" << mat.metallic; + qCDebug(modelformat) << " roughness =" << mat.roughness; + qCDebug(modelformat) << " emissiveIntensity =" << mat.emissiveIntensity; + qCDebug(modelformat) << " ambientFactor =" << mat.ambientFactor; + + qCDebug(modelformat) << " materialID =" << mat.materialID; + qCDebug(modelformat) << " name =" << mat.name; + qCDebug(modelformat) << " shadingModel =" << mat.shadingModel; + qCDebug(modelformat) << " _material =" << mat._material.get(); + + qCDebug(modelformat) << " normalTexture =" << mat.normalTexture.filename; + qCDebug(modelformat) << " albedoTexture =" << mat.albedoTexture.filename; + qCDebug(modelformat) << " opacityTexture =" << mat.opacityTexture.filename; + + qCDebug(modelformat) << " lightmapParams =" << mat.lightmapParams; + + qCDebug(modelformat) << " isPBSMaterial =" << mat.isPBSMaterial; + qCDebug(modelformat) << " useNormalMap =" << mat.useNormalMap; + qCDebug(modelformat) << " useAlbedoMap =" << mat.useAlbedoMap; + qCDebug(modelformat) << " useOpacityMap =" << mat.useOpacityMap; + qCDebug(modelformat) << " useRoughnessMap =" << mat.useRoughnessMap; + qCDebug(modelformat) << " useSpecularMap =" << mat.useSpecularMap; + qCDebug(modelformat) << " useMetallicMap =" << mat.useMetallicMap; + qCDebug(modelformat) << " useEmissiveMap =" << mat.useEmissiveMap; + qCDebug(modelformat) << " useOcclusionMap =" << mat.useOcclusionMap; + qCDebug(modelformat) << "\n"; + } + + qCDebug(modelformat) << "---------------- Joints ----------------"; + + for (const HFMJoint& joint : joints) { + qCDebug(modelformat) << "\n"; + qCDebug(modelformat) << " shapeInfo.avgPoint =" << joint.shapeInfo.avgPoint; + qCDebug(modelformat) << " shapeInfo.debugLines =" << joint.shapeInfo.debugLines; + qCDebug(modelformat) << " shapeInfo.dots =" << joint.shapeInfo.dots; + qCDebug(modelformat) << " shapeInfo.points =" << joint.shapeInfo.points; + + qCDebug(modelformat) << " ---"; + + qCDebug(modelformat) << " parentIndex" << joint.parentIndex; + qCDebug(modelformat) << " distanceToParent" << joint.distanceToParent; + qCDebug(modelformat) << " localTransform" << joint.localTransform; + qCDebug(modelformat) << " transform" << joint.transform; + qCDebug(modelformat) << " globalTransform" << joint.globalTransform; + + qCDebug(modelformat) << " ---"; + + qCDebug(modelformat) << " translation" << joint.translation; + qCDebug(modelformat) << " preTransform" << joint.preTransform; + qCDebug(modelformat) << " preRotation" << joint.preRotation; + qCDebug(modelformat) << " rotation" << joint.rotation; + qCDebug(modelformat) << " postRotation" << joint.postRotation; + qCDebug(modelformat) << " postTransform" << joint.postTransform; + qCDebug(modelformat) << " rotationMin" << joint.rotationMin; + qCDebug(modelformat) << " rotationMax" << joint.rotationMax; + qCDebug(modelformat) << " inverseDefaultRotation" << joint.inverseDefaultRotation; + qCDebug(modelformat) << " inverseBindRotation" << joint.inverseBindRotation; + qCDebug(modelformat) << " bindTransform" << joint.bindTransform; + qCDebug(modelformat) << " name" << joint.name; + qCDebug(modelformat) << " isSkeletonJoint" << joint.isSkeletonJoint; + qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.hasGeometricOffset; + qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricTranslation; + qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricRotation; + qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricScaling; + qCDebug(modelformat) << "\n"; + } + + qCDebug(modelformat) << "------------- SkinDeformers ------------"; + qCDebug(modelformat) << " skinDeformers.size() =" << skinDeformers.size(); + for(const hfm::SkinDeformer& skinDeformer : skinDeformers) { + qCDebug(modelformat) << "------- SkinDeformers (Clusters) -------"; + for (const hfm::Cluster& cluster : skinDeformer.clusters) { + qCDebug(modelformat) << "\n"; + qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex; + qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix; + qCDebug(modelformat) << "\n"; + } + } + qCDebug(modelformat) << "\n"; +} diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index ec91bd7605..87263cb72f 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -375,6 +375,8 @@ public: QMap jointRotationOffsets; std::vector shapeVertices; FlowData flowData; + + void debugDump() const; }; }; diff --git a/libraries/hfm/src/hfm/HFMSerializer.h b/libraries/hfm/src/hfm/HFMSerializer.h index d0be588d60..f28ef9f9c3 100644 --- a/libraries/hfm/src/hfm/HFMSerializer.h +++ b/libraries/hfm/src/hfm/HFMSerializer.h @@ -1,5 +1,5 @@ // -// FBXSerializer.h +// HFMSerializer.h // libraries/hfm/src/hfm // // Created by Sabrina Shanman on 2018/11/07. From 28e3fd4bc4ad04c1d6ecef1bf249743ef80745b1 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 29 Oct 2019 15:28:23 -0700 Subject: [PATCH 084/121] Refactor joint transform definition --- libraries/fbx/src/FBXSerializer.cpp | 67 +++++++++++++++-------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index d8d687f7ff..bf79a012a0 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1310,6 +1310,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.parentIndex = fbxModel.parentIndex; uint32_t jointIndex = (uint32_t)hfmModel.joints.size(); + // Copy default joint parameters from model + joint.translation = fbxModel.translation; // these are usually in centimeters joint.preTransform = fbxModel.preTransform; joint.preRotation = fbxModel.preRotation; @@ -1326,21 +1328,26 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.isSkeletonJoint = fbxModel.isLimbNode; hfmModel.hasSkeletonJoints = (hfmModel.hasSkeletonJoints || joint.isSkeletonJoint); + joint.name = fbxModel.name; + + // With the basic joint information, we can start to calculate compound transform information + // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate + // First, calculate the FBX-specific transform used for inverse bind transform calculations - glm::quat jointBindCombinedRotation = joint.preRotation * joint.rotation * joint.postRotation; - glm::mat4 globalTransformForCluster = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(jointBindCombinedRotation) * joint.postTransform; - if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { - const glm::mat4& parentGlobalTransformForCluster = globalTransformForClusters[joint.parentIndex]; + glm::quat jointBindCombinedRotation = fbxModel.preRotation * fbxModel.rotation * fbxModel.postRotation; + glm::mat4 globalTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform; + if (fbxModel.parentIndex != -1 && fbxModel.parentIndex < (int)jointIndex && !needMixamoHack) { + const glm::mat4& parentGlobalTransformForCluster = globalTransformForClusters[fbxModel.parentIndex]; globalTransformForCluster = parentGlobalTransformForCluster * globalTransformForCluster; } - if (joint.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); + if (fbxModel.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); globalTransformForCluster = globalTransformForCluster * geometricOffset; } globalTransformForClusters.push_back(globalTransformForCluster); - // Then, calculate the transforms proper + // Make final adjustments to the static joint properties, and pre-calculate static transforms if (applyUpAxisZRotation && joint.parentIndex == -1) { joint.rotation *= upAxisZRotation; @@ -1350,26 +1357,38 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); joint.postTransform *= geometricOffset; } + glm::quat combinedRotation = joint.preRotation * joint.rotation * joint.postRotation; + joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(combinedRotation) * joint.postTransform; + if (joint.parentIndex == -1) { - joint.transform = hfmModel.offset * glm::translate(joint.translation) * joint.preTransform * - glm::mat4_cast(combinedRotation) * joint.postTransform; + joint.transform = hfmModel.offset * joint.localTransform; + joint.globalTransform = joint.localTransform; joint.inverseDefaultRotation = glm::inverse(combinedRotation); joint.distanceToParent = 0.0f; - } else { const HFMJoint& parentJoint = hfmModel.joints.at(joint.parentIndex); - joint.transform = parentJoint.transform * glm::translate(joint.translation) * - joint.preTransform * glm::mat4_cast(combinedRotation) * joint.postTransform; + joint.transform = parentJoint.transform * joint.localTransform; + joint.globalTransform = parentJoint.globalTransform * joint.localTransform; joint.inverseDefaultRotation = glm::inverse(combinedRotation) * parentJoint.inverseDefaultRotation; - joint.distanceToParent = glm::distance(extractTranslation(parentJoint.transform), - extractTranslation(joint.transform)); + joint.distanceToParent = glm::distance(extractTranslation(parentJoint.transform), extractTranslation(joint.transform)); + + if (parentJoint.hasGeometricOffset) { + // Per the FBX standard, geometric offset should not propagate to children. + // However, we must be careful when modifying the behavior of FBXSerializer. + // So, we leave this here, as a breakpoint for debugging, or stub for implementation. + // qCDebug(modelformat) << "Geometric offset encountered on non-leaf node. jointIndex: " << jointIndex << ", modelURL: " << url; + // glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); + // joint.preTransform = glm::inverse(parentGeometricOffset) * joint.preTransform; + } } joint.inverseBindRotation = joint.inverseDefaultRotation; - joint.name = fbxModel.name; joint.bindTransformFoundInCluster = false; + // Initialize animation information next + // And also get the joint poses from the first frame of the animation, if present + QString rotationID = localRotations.value(modelID); AnimationCurve xRotCurve = animationCurves.value(xComponents.value(rotationID)); AnimationCurve yRotCurve = animationCurves.value(yComponents.value(rotationID)); @@ -1396,24 +1415,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.translation = hfmModel.animationFrames[i].translations[jointIndex]; joint.rotation = hfmModel.animationFrames[i].rotations[jointIndex]; } - - } - - // Now that we've initialized the joint, we can define the transform - // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate - joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; - joint.globalTransform = joint.localTransform; - if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { - hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; - joint.globalTransform = parentJoint.globalTransform * joint.localTransform; - if (parentJoint.hasGeometricOffset) { - // Per the FBX standard, geometric offset should not propagate to children. - // However, we must be careful when modifying the behavior of FBXSerializer. - // So, we leave this here, as a breakpoint for debugging, or stub for implementation. - // qCDebug(modelformat) << "Geometric offset encountered on non-leaf node. jointIndex: " << jointIndex << ", modelURL: " << url; - // glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); - // joint.preTransform = glm::inverse(parentGeometricOffset) * joint.preTransform; - } } hfmModel.joints.push_back(joint); From 3c078eeef64a14b6ea753c0c0244a5f5c2301047 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 29 Oct 2019 17:25:38 -0700 Subject: [PATCH 085/121] It makes more sense for joint.globalTransform to have the hfmModel.offset applied --- libraries/fbx/src/FBXSerializer.cpp | 4 ++-- libraries/fbx/src/GLTFSerializer.cpp | 2 +- libraries/hfm/src/hfm/HFMModelMath.cpp | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index bf79a012a0..79c3e1368c 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1362,8 +1362,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(combinedRotation) * joint.postTransform; if (joint.parentIndex == -1) { - joint.transform = hfmModel.offset * joint.localTransform; - joint.globalTransform = joint.localTransform; + joint.transform = joint.localTransform; + joint.globalTransform = hfmModel.offset * joint.localTransform; joint.inverseDefaultRotation = glm::inverse(combinedRotation); joint.distanceToParent = 0.0f; } else { diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 5bf1ea17ff..42306dce40 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1015,7 +1015,7 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& joint.transform = parentJoint.transform * joint.transform; joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; } else { - joint.transform = hfmModel.offset * joint.transform; + joint.transform = joint.transform; joint.globalTransform = hfmModel.offset * joint.globalTransform; } diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 93687b08b0..0026378060 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -43,13 +43,13 @@ void calculateExtentsForShape(hfm::Shape& shape, const std::vector& m const auto& mesh = meshes[shape.mesh]; const auto& meshPart = mesh.parts[shape.meshPart]; - glm::mat4 globalTransform = joints[shape.joint].globalTransform; + glm::mat4 transform = joints[shape.joint].transform; forEachIndex(meshPart, [&](int32_t idx){ if (mesh.vertices.size() <= idx) { return; } const glm::vec3& vertex = mesh.vertices[idx]; - const glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); + const glm::vec3 transformedVertex = glm::vec3(transform * glm::vec4(vertex, 1.0f)); shapeExtents.addPoint(transformedVertex); }); From fe1fe6aa836521655500191d822b1f568ea14dc9 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 09:09:17 -0700 Subject: [PATCH 086/121] Fix build warnings --- libraries/fbx/src/FBXSerializer.cpp | 3 --- libraries/fbx/src/FBXSerializer_Mesh.cpp | 1 - 2 files changed, 4 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 79c3e1368c..548d2924d4 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1653,9 +1653,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const for (hfm::Shape& shape : partShapes) { shape.skinDeformer = skinDeformerID; } - } else { - // this is a no cluster mesh - HFMJoint& joint = hfmModel.joints[transformIndex]; } // Store the parts for this mesh (or instance of this mesh, as the case may be) diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index 51104ee74f..e687f5e9f2 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -487,7 +487,6 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { data.extracted.mesh.parts.emplace_back(); - HFMMeshPart& part = data.extracted.mesh.parts.back(); // Figure out if this is the older way of defining the per-part material for baked FBX if (dracoMeshNodeVersion >= 2) { From ff908a36d60abbcedb4156ab94cbcdace8992a96 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 11:49:28 -0700 Subject: [PATCH 087/121] Define separately globalTransformForCluster and localTransformForCluster for clarity --- libraries/fbx/src/FBXSerializer.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 548d2924d4..91e85cd1af 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1336,10 +1336,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // First, calculate the FBX-specific transform used for inverse bind transform calculations glm::quat jointBindCombinedRotation = fbxModel.preRotation * fbxModel.rotation * fbxModel.postRotation; - glm::mat4 globalTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform; + const glm::mat4 localTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform; + glm::mat4 globalTransformForCluster; if (fbxModel.parentIndex != -1 && fbxModel.parentIndex < (int)jointIndex && !needMixamoHack) { const glm::mat4& parentGlobalTransformForCluster = globalTransformForClusters[fbxModel.parentIndex]; - globalTransformForCluster = parentGlobalTransformForCluster * globalTransformForCluster; + globalTransformForCluster = parentGlobalTransformForCluster * localTransformForCluster; + } else { + globalTransformForCluster = localTransformForCluster; } if (fbxModel.hasGeometricOffset) { glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); From 61825f2e06e81256989c45fbce33ec6f210f1710 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 11:50:03 -0700 Subject: [PATCH 088/121] Remove joint.transform self-assignment --- libraries/fbx/src/GLTFSerializer.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/fbx/src/GLTFSerializer.cpp b/libraries/fbx/src/GLTFSerializer.cpp index 42306dce40..1ec276ba5a 100755 --- a/libraries/fbx/src/GLTFSerializer.cpp +++ b/libraries/fbx/src/GLTFSerializer.cpp @@ -1015,7 +1015,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const hifi::VariantHash& joint.transform = parentJoint.transform * joint.transform; joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; } else { - joint.transform = joint.transform; joint.globalTransform = hfmModel.offset * joint.globalTransform; } From c8bf1ddfea9f27ca17bfad055c86cf1440aac51b Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 12:01:25 -0700 Subject: [PATCH 089/121] Only recalculate globalTransformForCluster if needed --- libraries/fbx/src/FBXSerializer.cpp | 42 ++++++++++++++++------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 91e85cd1af..0d126dba5b 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1330,27 +1330,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.name = fbxModel.name; + joint.bindTransformFoundInCluster = false; + // With the basic joint information, we can start to calculate compound transform information // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate - // First, calculate the FBX-specific transform used for inverse bind transform calculations - - glm::quat jointBindCombinedRotation = fbxModel.preRotation * fbxModel.rotation * fbxModel.postRotation; - const glm::mat4 localTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform; - glm::mat4 globalTransformForCluster; - if (fbxModel.parentIndex != -1 && fbxModel.parentIndex < (int)jointIndex && !needMixamoHack) { - const glm::mat4& parentGlobalTransformForCluster = globalTransformForClusters[fbxModel.parentIndex]; - globalTransformForCluster = parentGlobalTransformForCluster * localTransformForCluster; - } else { - globalTransformForCluster = localTransformForCluster; - } - if (fbxModel.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); - globalTransformForCluster = globalTransformForCluster * geometricOffset; - } - globalTransformForClusters.push_back(globalTransformForCluster); - - // Make final adjustments to the static joint properties, and pre-calculate static transforms + // Make adjustments to the static joint properties, and pre-calculate static transforms if (applyUpAxisZRotation && joint.parentIndex == -1) { joint.rotation *= upAxisZRotation; @@ -1387,7 +1372,26 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } joint.inverseBindRotation = joint.inverseDefaultRotation; - joint.bindTransformFoundInCluster = false; + // If needed, separately calculate the FBX-specific transform used for inverse bind transform calculations + + glm::mat4 globalTransformForCluster; + if (applyUpAxisZRotation && joint.parentIndex == -1) { + const glm::quat jointBindCombinedRotation = fbxModel.preRotation * fbxModel.rotation * fbxModel.postRotation; + const glm::mat4 localTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform; + if (fbxModel.parentIndex != -1 && fbxModel.parentIndex < (int)jointIndex && !needMixamoHack) { + const glm::mat4& parentGlobalTransformForCluster = globalTransformForClusters[fbxModel.parentIndex]; + globalTransformForCluster = parentGlobalTransformForCluster * localTransformForCluster; + } else { + globalTransformForCluster = localTransformForCluster; + } + if (fbxModel.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); + globalTransformForCluster = globalTransformForCluster * geometricOffset; + } + } else { + globalTransformForCluster = joint.transform; + } + globalTransformForClusters.push_back(globalTransformForCluster); // Initialize animation information next // And also get the joint poses from the first frame of the animation, if present From 296cd4a47b1019a096ee4b375cb43a657b00fd53 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 12:05:50 -0700 Subject: [PATCH 090/121] Rename globalTransformForCluster(s) -> transformForCluster(s) --- libraries/fbx/src/FBXSerializer.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 0d126dba5b..e6bc35da9b 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1302,8 +1302,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const bool needMixamoHack = hfmModel.applicationName == "mixamo.com"; - std::vector globalTransformForClusters; - globalTransformForClusters.reserve((size_t)modelIDs.size()); + std::vector transformForClusters; + transformForClusters.reserve((size_t)modelIDs.size()); for (const QString& modelID : modelIDs) { const FBXModel& fbxModel = fbxModels[modelID]; HFMJoint joint; @@ -1374,24 +1374,24 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // If needed, separately calculate the FBX-specific transform used for inverse bind transform calculations - glm::mat4 globalTransformForCluster; + glm::mat4 transformForCluster; if (applyUpAxisZRotation && joint.parentIndex == -1) { const glm::quat jointBindCombinedRotation = fbxModel.preRotation * fbxModel.rotation * fbxModel.postRotation; const glm::mat4 localTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform; if (fbxModel.parentIndex != -1 && fbxModel.parentIndex < (int)jointIndex && !needMixamoHack) { - const glm::mat4& parentGlobalTransformForCluster = globalTransformForClusters[fbxModel.parentIndex]; - globalTransformForCluster = parentGlobalTransformForCluster * localTransformForCluster; + const glm::mat4& parenttransformForCluster = transformForClusters[fbxModel.parentIndex]; + transformForCluster = parenttransformForCluster * localTransformForCluster; } else { - globalTransformForCluster = localTransformForCluster; + transformForCluster = localTransformForCluster; } if (fbxModel.hasGeometricOffset) { glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); - globalTransformForCluster = globalTransformForCluster * geometricOffset; + transformForCluster = transformForCluster * geometricOffset; } } else { - globalTransformForCluster = joint.transform; + transformForCluster = joint.transform; } - globalTransformForClusters.push_back(globalTransformForCluster); + transformForClusters.push_back(transformForCluster); // Initialize animation information next // And also get the joint poses from the first frame of the animation, if present @@ -1588,8 +1588,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfmCluster.jointIndex = (uint32_t)indexOfJointID; } - const glm::mat4& jointBindTransform = globalTransformForClusters[transformIndex]; - hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * jointBindTransform; + const glm::mat4& transformForCluster = transformForClusters[transformIndex]; + hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * transformForCluster; // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and // sometimes floating point fuzz can be introduced after the inverse. From 0d1bd6afbfb834c64624e19555ed1a22b3e4c71c Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 13:32:30 -0700 Subject: [PATCH 091/121] Fix wrong transformForCluster for non-root joints with Z up axis --- libraries/fbx/src/FBXSerializer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index e6bc35da9b..33b9457928 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1375,7 +1375,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // If needed, separately calculate the FBX-specific transform used for inverse bind transform calculations glm::mat4 transformForCluster; - if (applyUpAxisZRotation && joint.parentIndex == -1) { + if (applyUpAxisZRotation) { const glm::quat jointBindCombinedRotation = fbxModel.preRotation * fbxModel.rotation * fbxModel.postRotation; const glm::mat4 localTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform; if (fbxModel.parentIndex != -1 && fbxModel.parentIndex < (int)jointIndex && !needMixamoHack) { From 9b41fa20c65cbcab83a82eac5dc3eb9bd179401a Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 14:08:02 -0700 Subject: [PATCH 092/121] Clean up hfm joint geometric offset definition --- libraries/fbx/src/FBXSerializer.cpp | 20 ++++++++------------ libraries/hfm/src/hfm/HFM.cpp | 6 ++---- libraries/hfm/src/hfm/HFM.h | 5 +---- 3 files changed, 11 insertions(+), 20 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 33b9457928..33ffdb5714 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1321,10 +1321,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.rotationMin = fbxModel.rotationMin; joint.rotationMax = fbxModel.rotationMax; - joint.hasGeometricOffset = fbxModel.hasGeometricOffset; - joint.geometricTranslation = fbxModel.geometricTranslation; - joint.geometricRotation = fbxModel.geometricRotation; - joint.geometricScaling = fbxModel.geometricScaling; + if (fbxModel.hasGeometricOffset) { + joint.geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); + } joint.isSkeletonJoint = fbxModel.isLimbNode; hfmModel.hasSkeletonJoints = (hfmModel.hasSkeletonJoints || joint.isSkeletonJoint); @@ -1341,9 +1340,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.rotation *= upAxisZRotation; joint.translation = upAxisZRotation * joint.translation; } - if (joint.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); - joint.postTransform *= geometricOffset; + if (fbxModel.hasGeometricOffset) { + joint.postTransform *= joint.geometricOffset; } glm::quat combinedRotation = joint.preRotation * joint.rotation * joint.postRotation; @@ -1361,13 +1359,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.inverseDefaultRotation = glm::inverse(combinedRotation) * parentJoint.inverseDefaultRotation; joint.distanceToParent = glm::distance(extractTranslation(parentJoint.transform), extractTranslation(joint.transform)); - if (parentJoint.hasGeometricOffset) { + if (fbxModel.hasGeometricOffset) { // Per the FBX standard, geometric offset should not propagate to children. // However, we must be careful when modifying the behavior of FBXSerializer. // So, we leave this here, as a breakpoint for debugging, or stub for implementation. // qCDebug(modelformat) << "Geometric offset encountered on non-leaf node. jointIndex: " << jointIndex << ", modelURL: " << url; - // glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); - // joint.preTransform = glm::inverse(parentGeometricOffset) * joint.preTransform; + // joint.preTransform = glm::inverse(parentJoint.geometricOffset) * joint.preTransform; } } joint.inverseBindRotation = joint.inverseDefaultRotation; @@ -1385,8 +1382,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const transformForCluster = localTransformForCluster; } if (fbxModel.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); - transformForCluster = transformForCluster * geometricOffset; + transformForCluster = transformForCluster * joint.geometricOffset; } } else { transformForCluster = joint.transform; diff --git a/libraries/hfm/src/hfm/HFM.cpp b/libraries/hfm/src/hfm/HFM.cpp index 5d57ef2c98..500aaaa842 100644 --- a/libraries/hfm/src/hfm/HFM.cpp +++ b/libraries/hfm/src/hfm/HFM.cpp @@ -348,13 +348,11 @@ void hfm::Model::debugDump() const { qCDebug(modelformat) << " rotationMax" << joint.rotationMax; qCDebug(modelformat) << " inverseDefaultRotation" << joint.inverseDefaultRotation; qCDebug(modelformat) << " inverseBindRotation" << joint.inverseBindRotation; + qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.bindTransformFoundInCluster; qCDebug(modelformat) << " bindTransform" << joint.bindTransform; qCDebug(modelformat) << " name" << joint.name; qCDebug(modelformat) << " isSkeletonJoint" << joint.isSkeletonJoint; - qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.hasGeometricOffset; - qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricTranslation; - qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricRotation; - qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.geometricScaling; + qCDebug(modelformat) << " geometricOffset" << joint.geometricOffset; qCDebug(modelformat) << "\n"; } diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 87263cb72f..11b32dc4b3 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -114,10 +114,7 @@ public: bool bindTransformFoundInCluster; // geometric offset is applied in local space but does NOT affect children. - bool hasGeometricOffset; - glm::vec3 geometricTranslation; - glm::quat geometricRotation; - glm::vec3 geometricScaling; + glm::mat4 geometricOffset; // globalTransform is the transform of the joint with all parent transforms applied, plus the geometric offset glm::mat4 localTransform; From ecdca05679349f9b3add4ed9f1aa4d2f5527cd5c Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 14:28:55 -0700 Subject: [PATCH 093/121] Do not apply geometric offset to loaded FBX. Proper fix TBD --- libraries/fbx/src/FBXSerializer.cpp | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 33ffdb5714..91989e5a90 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1340,9 +1340,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.rotation *= upAxisZRotation; joint.translation = upAxisZRotation * joint.translation; } - if (fbxModel.hasGeometricOffset) { - joint.postTransform *= joint.geometricOffset; - } glm::quat combinedRotation = joint.preRotation * joint.rotation * joint.postRotation; joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(combinedRotation) * joint.postTransform; @@ -1358,14 +1355,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const joint.globalTransform = parentJoint.globalTransform * joint.localTransform; joint.inverseDefaultRotation = glm::inverse(combinedRotation) * parentJoint.inverseDefaultRotation; joint.distanceToParent = glm::distance(extractTranslation(parentJoint.transform), extractTranslation(joint.transform)); - - if (fbxModel.hasGeometricOffset) { - // Per the FBX standard, geometric offset should not propagate to children. - // However, we must be careful when modifying the behavior of FBXSerializer. - // So, we leave this here, as a breakpoint for debugging, or stub for implementation. - // qCDebug(modelformat) << "Geometric offset encountered on non-leaf node. jointIndex: " << jointIndex << ", modelURL: " << url; - // joint.preTransform = glm::inverse(parentJoint.geometricOffset) * joint.preTransform; - } } joint.inverseBindRotation = joint.inverseDefaultRotation; @@ -1381,9 +1370,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } else { transformForCluster = localTransformForCluster; } - if (fbxModel.hasGeometricOffset) { - transformForCluster = transformForCluster * joint.geometricOffset; - } } else { transformForCluster = joint.transform; } From 4ecd25c8bbebe16cfdeb14afa5b529b61072a523 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 14:32:36 -0700 Subject: [PATCH 094/121] Add TODO for joint.geometricOffset --- libraries/hfm/src/hfm/HFM.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 11b32dc4b3..c61f03d070 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -113,7 +113,9 @@ public: bool isSkeletonJoint; bool bindTransformFoundInCluster; + // geometric offset is applied in local space but does NOT affect children. + // TODO: Apply hfm::Joint.geometricOffset to transforms in the model preparation step glm::mat4 geometricOffset; // globalTransform is the transform of the joint with all parent transforms applied, plus the geometric offset From 927e08acdba2ede624145dad7f37f3cab6265776 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Sun, 20 Oct 2019 23:59:59 -0700 Subject: [PATCH 095/121] Explore the simple mesh pure pos triangle list idea --- libraries/hfm/src/hfm/HFM.h | 2 ++ libraries/hfm/src/hfm/HFMModelMath.cpp | 20 ++++++++++++++++++++ libraries/hfm/src/hfm/HFMModelMath.h | 10 ++++++++++ 3 files changed, 32 insertions(+) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index c61f03d070..bfea8a66af 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -260,6 +260,8 @@ public: graphics::MeshPointer _mesh; bool wasCompressed { false }; + + MeshIndexedTrianglesPos _meshAsIndexedTrianglePos; }; /// A single animation frame. diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 0026378060..cf25c5fed6 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -139,4 +139,24 @@ ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const s return reweightedDeformers; } + +MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector srcParts) { + + auto newIndicesCount = 0; + for (const auto& part : srcParts) { + newIndicesCount += part.triangleIndices.size() + part.quadTrianglesIndices.size(); + } + + MeshIndexedTrianglesPos dest; + dest.indices.reserve(newIndicesCount); + for (const auto& part : srcParts) { + dest.indices.insert(dest.indices.end(), part.triangleIndices.cbegin(), part.triangleIndices.cend()); + dest.indices.insert(dest.indices.end(), part.quadTrianglesIndices.cbegin(), part.quadTrianglesIndices.cend()); + } + + dest.vertices = srcVertices; + + return dest; +} + }; diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h index b80adad3d0..59c64fc490 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.h +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -36,6 +36,16 @@ public: const uint16_t DEFAULT_SKINNING_WEIGHTS_PER_VERTEX = 4; ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector skinClusters, const uint16_t weightsPerVertex = DEFAULT_SKINNING_WEIGHTS_PER_VERTEX); + + +struct MeshIndexedTrianglesPos { +public: + std::vector vertices; + std::vector indices; +}; + +MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector srcParts); + }; #endif // #define hifi_hfm_ModelMath_h From 24e6a966a8bd7e3d8bf803c50bdf70692366558b Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Mon, 21 Oct 2019 00:33:46 -0700 Subject: [PATCH 096/121] Keep exploring --- libraries/hfm/src/hfm/HFM.h | 3 +- libraries/hfm/src/hfm/HFMModelMath.cpp | 52 ++++++++++++++++++++++---- 2 files changed, 47 insertions(+), 8 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index bfea8a66af..41f2e1d501 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -261,7 +261,8 @@ public: graphics::MeshPointer _mesh; bool wasCompressed { false }; - MeshIndexedTrianglesPos _meshAsIndexedTrianglePos; + std::vector uniqueVertices; + std::vector trianglesIndices; }; /// A single animation frame. diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index cf25c5fed6..b5ff47e6c0 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -14,6 +14,8 @@ #include #include "ModelFormatLogging.h" +#include + namespace hfm { void forEachIndex(const hfm::MeshPart& meshPart, std::function func) { @@ -142,19 +144,55 @@ ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const s MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector srcParts) { + MeshIndexedTrianglesPos dest; + dest.vertices.resize(srcVertices.size()); + + std::vector remap(srcVertices.size()); + { + std::unordered_map uniqueVertices; + int vi = 0; + int vu = 0; + for (const auto& v : srcVertices) { + auto foundIndex = uniqueVertices.find(v); + if (foundIndex != uniqueVertices.end()) { + remap[vi] = foundIndex->second; + } else { + uniqueVertices[v] = vu; + remap[vi] = vu; + dest.vertices[vu] = v; + vu++; + } + ++vi; + } + if (uniqueVertices.size() < srcVertices.size()) { + dest.vertices.resize(uniqueVertices.size()); + dest.vertices.shrink_to_fit(); + + } + } + auto newIndicesCount = 0; for (const auto& part : srcParts) { newIndicesCount += part.triangleIndices.size() + part.quadTrianglesIndices.size(); } - MeshIndexedTrianglesPos dest; - dest.indices.reserve(newIndicesCount); - for (const auto& part : srcParts) { - dest.indices.insert(dest.indices.end(), part.triangleIndices.cbegin(), part.triangleIndices.cend()); - dest.indices.insert(dest.indices.end(), part.quadTrianglesIndices.cbegin(), part.quadTrianglesIndices.cend()); - } + { + dest.indices.resize(newIndicesCount); + int i = 0; + for (const auto& part : srcParts) { + for (const auto& qti : part.quadTrianglesIndices) { + dest.indices[i] = remap[qti]; + ++i; + } + for (const auto& ti : part.quadTrianglesIndices) { + dest.indices[i] = remap[ti]; + ++i; + } - dest.vertices = srcVertices; + // dest.indices.insert(dest.indices.end(), part.quadTrianglesIndices.cbegin(), part.quadTrianglesIndices.cend()); + // dest.indices.insert(dest.indices.end(), part.triangleIndices.cbegin(), part.triangleIndices.cend()); + } + } return dest; } From 465e8c3e18b20c266731882fddf2149308157df9 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Mon, 21 Oct 2019 16:29:35 -0700 Subject: [PATCH 097/121] Prototyping the slim mesh generation --- libraries/fbx/src/FBXSerializer_Mesh.cpp | 1 + libraries/hfm/src/hfm/HFM.h | 1 + libraries/hfm/src/hfm/HFMModelMath.cpp | 19 +++++++++++++------ libraries/hfm/src/hfm/HFMModelMath.h | 3 ++- .../src/model-networking/ModelCache.cpp | 8 ++++++++ libraries/shared/src/GLMHelpers.h | 10 ++++++++++ 6 files changed, 35 insertions(+), 7 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index e687f5e9f2..895059c6ad 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -174,6 +174,7 @@ void appendIndex(MeshData& data, QVector& indices, int index, bool deduplic data.indices.insert(vertex, newIndex); data.extracted.newIndices.insert(vertexIndex, newIndex); data.extracted.mesh.vertices.append(position); + data.extracted.mesh.positions.emplace_back(position); data.extracted.mesh.originalIndices.append(vertexIndex); data.extracted.mesh.normals.append(normal); data.extracted.mesh.texCoords.append(vertex.texCoord); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 41f2e1d501..ee3a25fc46 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -235,6 +235,7 @@ public: std::vector parts; + std::vector positions; QVector vertices; QVector normals; QVector tangents; diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index b5ff47e6c0..f34a43bf19 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -16,6 +16,9 @@ #include +#include +#include + namespace hfm { void forEachIndex(const hfm::MeshPart& meshPart, std::function func) { @@ -142,12 +145,13 @@ ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const s } -MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector srcParts) { +const MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector& srcParts) { MeshIndexedTrianglesPos dest; - dest.vertices.resize(srcVertices.size()); + // dest.vertices.resize(srcVertices.size()); + dest.vertices = srcVertices; - std::vector remap(srcVertices.size()); + /* std::vector remap(srcVertices.size()); { std::unordered_map uniqueVertices; int vi = 0; @@ -170,7 +174,7 @@ MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector vertices; std::vector indices; + std::vector parts; // Offset in the indices, Number of indices }; -MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector srcParts); +const MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector& srcParts); }; diff --git a/libraries/model-networking/src/model-networking/ModelCache.cpp b/libraries/model-networking/src/model-networking/ModelCache.cpp index bb911c6914..f427326f68 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.cpp +++ b/libraries/model-networking/src/model-networking/ModelCache.cpp @@ -29,6 +29,8 @@ #include #include +#include + Q_LOGGING_CATEGORY(trace_resource_parse_geometry, "trace.resource.parse.geometry") class GeometryExtra { @@ -320,6 +322,7 @@ void ModelResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const Mate _hfmModel = hfmModel; _materialMapping = materialMapping; + // Copy materials QHash materialIDAtlas; for (const HFMMaterial& material : _hfmModel->materials) { @@ -328,11 +331,16 @@ void ModelResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const Mate } std::shared_ptr meshes = std::make_shared(); + std::vector triangleListMeshes = std::vector(); int meshID = 0; for (const HFMMesh& mesh : _hfmModel->meshes) { // Copy mesh pointers meshes->emplace_back(mesh._mesh); meshID++; + + auto simpleMesh = hfm::generateMeshIndexedTrianglePos(mesh.positions, mesh.parts); + + triangleListMeshes.emplace_back(simpleMesh); } _meshes = meshes; diff --git a/libraries/shared/src/GLMHelpers.h b/libraries/shared/src/GLMHelpers.h index cfb4bb6398..5787295da6 100644 --- a/libraries/shared/src/GLMHelpers.h +++ b/libraries/shared/src/GLMHelpers.h @@ -392,4 +392,14 @@ inline glm::vec4 extractFov( const glm::mat4& m) { return result; } +inline bool operator<(const glm::vec3& lhs, const glm::vec3& rhs) { + return (lhs.x < rhs.x) || ( + (lhs.x == rhs.x) && ( + (lhs.y < rhs.y) || ( + (lhs.y == rhs.y) && (lhs.z < rhs.z) + ) + ) + ); +} + #endif // hifi_GLMHelpers_h From e9ce467eb9e7d42aafdaccecb0a2c7aa3ca3cff3 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Tue, 22 Oct 2019 01:43:08 -0700 Subject: [PATCH 098/121] Crahs because of ? --- libraries/fbx/src/FBXSerializer_Mesh.cpp | 1 - libraries/hfm/src/hfm/HFM.h | 14 +++++-- libraries/hfm/src/hfm/HFMModelMath.cpp | 9 ++-- libraries/hfm/src/hfm/HFMModelMath.h | 13 +----- .../model-baker/src/model-baker/Baker.cpp | 41 ++++++++++++++++--- .../src/model-networking/ModelCache.cpp | 5 --- 6 files changed, 53 insertions(+), 30 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index 895059c6ad..e687f5e9f2 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -174,7 +174,6 @@ void appendIndex(MeshData& data, QVector& indices, int index, bool deduplic data.indices.insert(vertex, newIndex); data.extracted.newIndices.insert(vertexIndex, newIndex); data.extracted.mesh.vertices.append(position); - data.extracted.mesh.positions.emplace_back(position); data.extracted.mesh.originalIndices.append(vertexIndex); data.extracted.mesh.normals.append(normal); data.extracted.mesh.texCoords.append(vertex.texCoord); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index ee3a25fc46..419f47d680 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -229,14 +229,22 @@ public: bool needTangentSpace() const; }; + +/// Simple Triangle List vertices; + std::vector indices; + std::vector parts; // Offset in the indices, Number of indices +}; + /// A single mesh (with optional blendshapes). class Mesh { public: std::vector parts; - std::vector positions; QVector vertices; + std::vector _vertices; QVector normals; QVector tangents; QVector colors; @@ -255,6 +263,8 @@ public: // Blendshape attributes QVector blendshapes; + // Simple Triangle List Mesh generated during baking + hfm::TriangleListMesh triangleListMesh; QVector originalIndices; // Original indices of the vertices unsigned int meshIndex; // the order the meshes appeared in the object file @@ -262,8 +272,6 @@ public: graphics::MeshPointer _mesh; bool wasCompressed { false }; - std::vector uniqueVertices; - std::vector trianglesIndices; }; /// A single animation frame. diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index f34a43bf19..1678d5d405 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -145,11 +145,12 @@ ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const s } -const MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector& srcParts) { +const TriangleListMesh generateTriangleListMesh(const std::vector& srcVertices, const std::vector& srcParts) { - MeshIndexedTrianglesPos dest; - // dest.vertices.resize(srcVertices.size()); - dest.vertices = srcVertices; + TriangleListMesh dest; + + // just copy vertices + dest.vertices.insert(dest.vertices.end(), srcVertices.cbegin(), srcVertices.cend()); /* std::vector remap(srcVertices.size()); { diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h index 38d3262042..dc397c5e6f 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.h +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -25,8 +25,7 @@ void calculateExtentsForShape(hfm::Shape& shape, const std::vector& m void calculateExtentsForModel(Extents& modelExtents, const std::vector& shapes); -class ReweightedDeformers { -public: +struct ReweightedDeformers { std::vector indices; std::vector weights; uint16_t weightsPerVertex { 0 }; @@ -37,15 +36,7 @@ const uint16_t DEFAULT_SKINNING_WEIGHTS_PER_VERTEX = 4; ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector skinClusters, const uint16_t weightsPerVertex = DEFAULT_SKINNING_WEIGHTS_PER_VERTEX); - -struct MeshIndexedTrianglesPos { -public: - std::vector vertices; - std::vector indices; - std::vector parts; // Offset in the indices, Number of indices -}; - -const MeshIndexedTrianglesPos generateMeshIndexedTrianglePos(const std::vector& srcVertices, const std::vector& srcParts); +const TriangleListMesh generateTriangleListMesh(const std::vector& srcVertices, const std::vector& srcParts); }; diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index c63495c169..c6c8be4bdd 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -23,6 +23,7 @@ #include "CalculateExtentsTask.h" #include "BuildDracoMeshTask.h" #include "ParseFlowDataTask.h" +#include namespace baker { @@ -49,6 +50,29 @@ namespace baker { } }; + class BuildMeshTriangleListTask { + public: + using Input = std::vector; + using Output = std::vector; + using JobModel = Job::ModelIO; + + void run(const BakeContextPointer& context, const Input& input, Output& output) { + const auto& meshesIn = input; + auto& indexedTrianglesMeshOut = output; + indexedTrianglesMeshOut.clear(); + indexedTrianglesMeshOut.resize(meshesIn.size()); + + for (int i = 0; i < meshesIn.size(); i++) { + auto& mesh = meshesIn[i]; + + auto meshPointer = const_cast (&mesh); + meshPointer->_vertices = meshPointer->vertices.toStdVector(); + + indexedTrianglesMeshOut[i] = hfm::generateTriangleListMesh(meshPointer->_vertices, mesh.parts); + } + } + }; + class BuildBlendshapesTask { public: using Input = VaryingSet3, std::vector>; @@ -80,21 +104,23 @@ namespace baker { class BuildMeshesTask { public: - using Input = VaryingSet5, std::vector, NormalsPerMesh, TangentsPerMesh, BlendshapesPerMesh>; + using Input = VaryingSet6, std::vector, std::vector, NormalsPerMesh, TangentsPerMesh, BlendshapesPerMesh>; using Output = std::vector; using JobModel = Job::ModelIO; void run(const BakeContextPointer& context, const Input& input, Output& output) { auto& meshesIn = input.get0(); int numMeshes = (int)meshesIn.size(); - auto& graphicsMeshesIn = input.get1(); - auto& normalsPerMeshIn = input.get2(); - auto& tangentsPerMeshIn = input.get3(); - auto& blendshapesPerMeshIn = input.get4(); + auto& triangleListMeshesIn = input.get1(); + auto& graphicsMeshesIn = input.get2(); + auto& normalsPerMeshIn = input.get3(); + auto& tangentsPerMeshIn = input.get4(); + auto& blendshapesPerMeshIn = input.get5(); auto meshesOut = meshesIn; for (int i = 0; i < numMeshes; i++) { auto& meshOut = meshesOut[i]; + meshOut.triangleListMesh = triangleListMeshesIn[i]; meshOut._mesh = safeGet(graphicsMeshesIn, i); meshOut.normals = QVector::fromStdVector(safeGet(normalsPerMeshIn, i)); meshOut.tangents = QVector::fromStdVector(safeGet(tangentsPerMeshIn, i)); @@ -162,6 +188,9 @@ namespace baker { const auto collectShapeVerticesInputs = CollectShapeVerticesTask::Input(meshesIn, shapesIn, jointsIn, skinDeformersIn).asVarying(); const auto shapeVerticesPerJoint = model.addJob("CollectShapeVertices", collectShapeVerticesInputs); + // Build the slim triangle list mesh for each hfm::mesh + const auto triangleListMeshes = model.addJob("BuildMeshTriangleListTask", meshesIn); + // Build the graphics::MeshPointer for each hfm::Mesh const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, skinDeformersIn).asVarying(); const auto graphicsMeshes = model.addJob("BuildGraphicsMesh", buildGraphicsMeshInputs); @@ -200,7 +229,7 @@ namespace baker { // Combine the outputs into a new hfm::Model const auto buildBlendshapesInputs = BuildBlendshapesTask::Input(blendshapesPerMeshIn, normalsPerBlendshapePerMesh, tangentsPerBlendshapePerMesh).asVarying(); const auto blendshapesPerMeshOut = model.addJob("BuildBlendshapes", buildBlendshapesInputs); - const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, graphicsMeshes, normalsPerMesh, tangentsPerMesh, blendshapesPerMeshOut).asVarying(); + const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, triangleListMeshes, graphicsMeshes, normalsPerMesh, tangentsPerMesh, blendshapesPerMeshOut).asVarying(); const auto meshesOut = model.addJob("BuildMeshes", buildMeshesInputs); const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData, shapeVerticesPerJoint, shapesOut, modelExtentsOut).asVarying(); const auto hfmModelOut = model.addJob("BuildModel", buildModelInputs); diff --git a/libraries/model-networking/src/model-networking/ModelCache.cpp b/libraries/model-networking/src/model-networking/ModelCache.cpp index f427326f68..e9674d6d26 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.cpp +++ b/libraries/model-networking/src/model-networking/ModelCache.cpp @@ -331,16 +331,11 @@ void ModelResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const Mate } std::shared_ptr meshes = std::make_shared(); - std::vector triangleListMeshes = std::vector(); int meshID = 0; for (const HFMMesh& mesh : _hfmModel->meshes) { // Copy mesh pointers meshes->emplace_back(mesh._mesh); meshID++; - - auto simpleMesh = hfm::generateMeshIndexedTrianglePos(mesh.positions, mesh.parts); - - triangleListMeshes.emplace_back(simpleMesh); } _meshes = meshes; From 7d37a064f2572a97d60b0c9039c6661d1ab96e02 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 30 Oct 2019 16:22:00 -0700 Subject: [PATCH 099/121] Fix crash --- libraries/hfm/src/hfm/HFMModelMath.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 1678d5d405..1086fb711c 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -190,7 +190,7 @@ const TriangleListMesh generateTriangleListMesh(const std::vector& sr dest.indices[i] = qti; //remap[qti]; ++i; } - for (const auto& ti : part.quadTrianglesIndices) { + for (const auto& ti : part.triangleIndices) { dest.indices[i] = ti; //remap[ti]; ++i; } From 023d73a25da3b83ba3aea03acdc109043def44e3 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 10:48:38 -0700 Subject: [PATCH 100/121] Finish TriangleListMesh generation, rename some things --- libraries/hfm/src/hfm/HFM.h | 2 +- libraries/hfm/src/hfm/HFMModelMath.cpp | 48 +++++++++---------- .../model-baker/src/model-baker/Baker.cpp | 2 +- 3 files changed, 25 insertions(+), 27 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 419f47d680..07dc17c02c 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -230,7 +230,7 @@ public: }; -/// Simple Triangle List vertices; std::vector indices; diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 1086fb711c..1aeaf6d2b9 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -16,6 +16,9 @@ #include +// TODO: Remove after testing +#include + #include #include @@ -144,38 +147,36 @@ ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const s return reweightedDeformers; } - const TriangleListMesh generateTriangleListMesh(const std::vector& srcVertices, const std::vector& srcParts) { TriangleListMesh dest; - // just copy vertices - dest.vertices.insert(dest.vertices.end(), srcVertices.cbegin(), srcVertices.cend()); + // copy vertices for now + dest.vertices = srcVertices; - /* std::vector remap(srcVertices.size()); + std::vector oldToNewIndex(srcVertices.size()); { - std::unordered_map uniqueVertices; - int vi = 0; - int vu = 0; - for (const auto& v : srcVertices) { - auto foundIndex = uniqueVertices.find(v); - if (foundIndex != uniqueVertices.end()) { - remap[vi] = foundIndex->second; + std::unordered_map uniqueVertexToNewIndex; + int oldIndex = 0; + int newIndex = 0; + for (const auto& srcVertex : srcVertices) { + auto foundIndex = uniqueVertexToNewIndex.find(srcVertex); + if (foundIndex != uniqueVertexToNewIndex.end()) { + oldToNewIndex[oldIndex] = foundIndex->second; } else { - uniqueVertices[v] = vu; - remap[vi] = vu; - dest.vertices[vu] = v; - vu++; + uniqueVertexToNewIndex[srcVertex] = newIndex; + oldToNewIndex[oldIndex] = newIndex; + dest.vertices[newIndex] = srcVertex; + ++newIndex; } - ++vi; + ++oldIndex; } - if (uniqueVertices.size() < srcVertices.size()) { - dest.vertices.resize(uniqueVertices.size()); + if (uniqueVertexToNewIndex.size() < srcVertices.size()) { + dest.vertices.resize(uniqueVertexToNewIndex.size()); dest.vertices.shrink_to_fit(); - } } -*/ + auto newIndicesCount = 0; for (const auto& part : srcParts) { newIndicesCount += part.triangleIndices.size() + part.quadTrianglesIndices.size(); @@ -187,18 +188,15 @@ const TriangleListMesh generateTriangleListMesh(const std::vector& sr for (const auto& part : srcParts) { glm::ivec2 spart(i, 0); for (const auto& qti : part.quadTrianglesIndices) { - dest.indices[i] = qti; //remap[qti]; + dest.indices[i] = oldToNewIndex[qti]; ++i; } for (const auto& ti : part.triangleIndices) { - dest.indices[i] = ti; //remap[ti]; + dest.indices[i] = oldToNewIndex[ti]; ++i; } spart.y = i - spart.x; dest.parts.push_back(spart); - - // dest.indices.insert(dest.indices.end(), part.quadTrianglesIndices.cbegin(), part.quadTrianglesIndices.cend()); - // dest.indices.insert(dest.indices.end(), part.triangleIndices.cbegin(), part.triangleIndices.cend()); } } diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index c6c8be4bdd..f17db7397e 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -65,7 +65,7 @@ namespace baker { for (int i = 0; i < meshesIn.size(); i++) { auto& mesh = meshesIn[i]; - auto meshPointer = const_cast (&mesh); + auto meshPointer = const_cast(&mesh); meshPointer->_vertices = meshPointer->vertices.toStdVector(); indexedTrianglesMeshOut[i] = hfm::generateTriangleListMesh(meshPointer->_vertices, mesh.parts); From 297ce9d88bbe94fabf95823fb804a2b89731cc08 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 13:42:19 -0700 Subject: [PATCH 101/121] Remove debug --- libraries/hfm/src/hfm/HFM.h | 1 - libraries/hfm/src/hfm/HFMModelMath.cpp | 4 ---- .../model-networking/src/model-networking/ModelCache.cpp | 3 --- 3 files changed, 8 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 07dc17c02c..ec06832f22 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -244,7 +244,6 @@ public: std::vector parts; QVector vertices; - std::vector _vertices; QVector normals; QVector tangents; QVector colors; diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 1aeaf6d2b9..d0288d684c 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -12,13 +12,9 @@ #include "HFMModelMath.h" #include -#include "ModelFormatLogging.h" #include -// TODO: Remove after testing -#include - #include #include diff --git a/libraries/model-networking/src/model-networking/ModelCache.cpp b/libraries/model-networking/src/model-networking/ModelCache.cpp index e9674d6d26..bb911c6914 100644 --- a/libraries/model-networking/src/model-networking/ModelCache.cpp +++ b/libraries/model-networking/src/model-networking/ModelCache.cpp @@ -29,8 +29,6 @@ #include #include -#include - Q_LOGGING_CATEGORY(trace_resource_parse_geometry, "trace.resource.parse.geometry") class GeometryExtra { @@ -322,7 +320,6 @@ void ModelResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const Mate _hfmModel = hfmModel; _materialMapping = materialMapping; - // Copy materials QHash materialIDAtlas; for (const HFMMaterial& material : _hfmModel->materials) { From 6666df6137f79b50c79f5ac7293dca60e58a4dfc Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 14:49:28 -0700 Subject: [PATCH 102/121] Fix build warnings/errors --- libraries/model-baker/src/model-baker/Baker.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index f17db7397e..bd39b3178f 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -62,13 +62,10 @@ namespace baker { indexedTrianglesMeshOut.clear(); indexedTrianglesMeshOut.resize(meshesIn.size()); - for (int i = 0; i < meshesIn.size(); i++) { + for (size_t i = 0; i < meshesIn.size(); i++) { auto& mesh = meshesIn[i]; - - auto meshPointer = const_cast(&mesh); - meshPointer->_vertices = meshPointer->vertices.toStdVector(); - - indexedTrianglesMeshOut[i] = hfm::generateTriangleListMesh(meshPointer->_vertices, mesh.parts); + const auto verticesStd = mesh.vertices.toStdVector(); + indexedTrianglesMeshOut[i] = hfm::generateTriangleListMesh(verticesStd, mesh.parts); } } }; From a4c10e2903ab56d2f8bda472b5e3a40cf27d87c8 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 1 Nov 2019 10:39:43 -0700 Subject: [PATCH 103/121] Attempt fix for Android build --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index f8fd5b7637..f0304bbaed 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -220,6 +220,7 @@ find_package( Threads ) add_definitions(-DGLM_FORCE_RADIANS) add_definitions(-DGLM_ENABLE_EXPERIMENTAL) add_definitions(-DGLM_FORCE_CTOR_INIT) +add_definitions(-DGLM_LANG_STL11_FORCED) # Workaround for GLM not detecting support for C++11 templates on Android if (WIN32) # Deal with fakakta Visual Studo 2017 bug From d94cd185c1aba419fe35336a044a140312116316 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 15:54:17 -0700 Subject: [PATCH 104/121] Introduce extents for TriangleListMesh and calculate them --- libraries/hfm/src/hfm/HFM.h | 1 + libraries/hfm/src/hfm/HFMModelMath.cpp | 16 ++++++++++++++++ libraries/hfm/src/hfm/HFMModelMath.h | 2 ++ 3 files changed, 19 insertions(+) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index ec06832f22..d141c88cd8 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -235,6 +235,7 @@ struct TriangleListMesh { std::vector vertices; std::vector indices; std::vector parts; // Offset in the indices, Number of indices + std::vector partExtents; // Extents of each part with no transform applied. Same length as parts. }; /// A single mesh (with optional blendshapes). diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index d0288d684c..7ce06821ec 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -40,6 +40,20 @@ void thickenFlatExtents(Extents& extents) { extents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); } +void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh) { + triangleListMesh.partExtents.resize(triangleListMesh.parts.size()); + for (size_t partIndex = 0; partIndex < triangleListMesh.parts.size(); ++partIndex) { + const auto& part = triangleListMesh.parts[partIndex]; + auto& extents = triangleListMesh.partExtents[partIndex]; + int partEnd = part.x + part.y; + for (int i = part.x; i < partEnd; ++i) { + auto index = triangleListMesh.indices[i]; + const auto& position = triangleListMesh.vertices[index]; + extents.addPoint(position); + } + } +} + void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints) { auto& shapeExtents = shape.transformedExtents; shapeExtents.reset(); @@ -196,6 +210,8 @@ const TriangleListMesh generateTriangleListMesh(const std::vector& sr } } + calculateExtentsForTriangleListMesh(dest); + return dest; } diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h index dc397c5e6f..3ed0584fac 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.h +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -20,6 +20,8 @@ void forEachIndex(const hfm::MeshPart& meshPart, std::function f void initializeExtents(Extents& extents); +void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh); + // This can't be moved to model-baker until void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints); From df4ca90d49e1743405abef4b519da85f247f2a06 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 16:06:48 -0700 Subject: [PATCH 105/121] Remove unused cluster fields from hfm --- libraries/fbx/src/FBXSerializer.cpp | 4 +--- libraries/hfm/src/hfm/HFM.h | 2 -- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index d9a7fe1071..f09182c0e6 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1600,12 +1600,10 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const cluster.jointIndex = transformIndex; clusters.push_back(cluster); - std::vector skinClusters; // Skinned mesh instances have an hfm::SkinDeformer - skinDeformer.skinClusterIndices.reserve(clusterIDs.size()); + std::vector skinClusters; for (const auto& clusterID : clusterIDs) { const Cluster& fbxCluster = fbxClusters[clusterID]; - skinDeformer.skinClusterIndices.emplace_back(); skinClusters.emplace_back(); hfm::SkinCluster& skinCluster = skinClusters.back(); size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size()); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index d141c88cd8..20fc70f131 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -251,7 +251,6 @@ public: QVector texCoords; QVector texCoords1; - QVector clusters; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::clusters) Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents) glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints) @@ -317,7 +316,6 @@ public: class SkinDeformer { public: - std::vector skinClusterIndices; // DEPRECATED (see hfm::Mesh.clusterIndices, hfm::Mesh.clusterWeights) std::vector clusters; }; From d9e441d65ef9b47293f917fbc7fb29765b5011a4 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 17:37:46 -0700 Subject: [PATCH 106/121] Make shape extent calculations use slim mesh extents, and rename task to CalculateTransformedExtentsTask --- libraries/hfm/src/hfm/HFMModelMath.cpp | 18 ++++++------------ libraries/hfm/src/hfm/HFMModelMath.h | 2 +- .../model-baker/src/model-baker/Baker.cpp | 10 +++++----- ...cpp => CalculateTransformedExtentsTask.cpp} | 10 +++++----- ...ask.h => CalculateTransformedExtentsTask.h} | 8 ++++---- 5 files changed, 21 insertions(+), 27 deletions(-) rename libraries/model-baker/src/model-baker/{CalculateExtentsTask.cpp => CalculateTransformedExtentsTask.cpp} (74%) rename libraries/model-baker/src/model-baker/{CalculateExtentsTask.h => CalculateTransformedExtentsTask.h} (71%) diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 7ce06821ec..436e520643 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -54,22 +54,16 @@ void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh) { } } -void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints) { +void calculateExtentsForShape(hfm::Shape& shape, const std::vector& triangleListMeshes, const std::vector& joints) { auto& shapeExtents = shape.transformedExtents; shapeExtents.reset(); - const auto& mesh = meshes[shape.mesh]; - const auto& meshPart = mesh.parts[shape.meshPart]; + const auto& triangleListMesh = triangleListMeshes[shape.mesh]; + const auto& partExtent = triangleListMesh.partExtents[shape.meshPart]; - glm::mat4 transform = joints[shape.joint].transform; - forEachIndex(meshPart, [&](int32_t idx){ - if (mesh.vertices.size() <= idx) { - return; - } - const glm::vec3& vertex = mesh.vertices[idx]; - const glm::vec3 transformedVertex = glm::vec3(transform * glm::vec4(vertex, 1.0f)); - shapeExtents.addPoint(transformedVertex); - }); + const glm::mat4& transform = joints[shape.joint].transform; + shapeExtents = partExtent; + shapeExtents.transform(transform); thickenFlatExtents(shapeExtents); } diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h index 3ed0584fac..ef86e7379a 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.h +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -23,7 +23,7 @@ void initializeExtents(Extents& extents); void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh); // This can't be moved to model-baker until -void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints); +void calculateExtentsForShape(hfm::Shape& shape, const std::vector& triangleListMeshes, const std::vector& joints); void calculateExtentsForModel(Extents& modelExtents, const std::vector& shapes); diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index bd39b3178f..662b4670ee 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -20,7 +20,7 @@ #include "CalculateBlendshapeNormalsTask.h" #include "CalculateBlendshapeTangentsTask.h" #include "PrepareJointsTask.h" -#include "CalculateExtentsTask.h" +#include "CalculateTransformedExtentsTask.h" #include "BuildDracoMeshTask.h" #include "ParseFlowDataTask.h" #include @@ -200,10 +200,10 @@ namespace baker { const auto jointIndices = jointInfoOut.getN(2); // Use transform information to compute extents - const auto calculateExtentsInputs = CalculateExtentsTask::Input(modelExtentsIn, meshesIn, shapesIn, jointsOut).asVarying(); - const auto calculateExtentsOutputs = model.addJob("CalculateExtents", calculateExtentsInputs); - const auto modelExtentsOut = calculateExtentsOutputs.getN(0); - const auto shapesOut = calculateExtentsOutputs.getN(1); + const auto calculateExtentsInputs = CalculateTransformedExtentsTask::Input(modelExtentsIn, triangleListMeshes, shapesIn, jointsOut).asVarying(); + const auto calculateExtentsOutputs = model.addJob("CalculateExtents", calculateExtentsInputs); + const auto modelExtentsOut = calculateExtentsOutputs.getN(0); + const auto shapesOut = calculateExtentsOutputs.getN(1); // Parse material mapping const auto parseMaterialMappingInputs = ParseMaterialMappingTask::Input(mapping, materialMappingBaseURL).asVarying(); diff --git a/libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp b/libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.cpp similarity index 74% rename from libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp rename to libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.cpp index e237cdb402..028dba4939 100644 --- a/libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp +++ b/libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.cpp @@ -1,5 +1,5 @@ // -// CalculateExtentsTask.cpp +// CalculateTransformedExtentsTask.cpp // model-baker/src/model-baker // // Created by Sabrina Shanman on 2019/10/04. @@ -9,13 +9,13 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -#include "CalculateExtentsTask.h" +#include "CalculateTransformedExtentsTask.h" #include "hfm/HFMModelMath.h" -void CalculateExtentsTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) { +void CalculateTransformedExtentsTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) { const auto& modelExtentsIn = input.get0(); - const auto& meshes = input.get1(); + const auto& triangleListMeshes = input.get1(); const auto& shapesIn = input.get2(); const auto& joints = input.get3(); auto& modelExtentsOut = output.edit0(); @@ -31,7 +31,7 @@ void CalculateExtentsTask::run(const baker::BakeContextPointer& context, const I continue; } - hfm::calculateExtentsForShape(shapeOut, meshes, joints); + hfm::calculateExtentsForShape(shapeOut, triangleListMeshes, joints); } modelExtentsOut = modelExtentsIn; diff --git a/libraries/model-baker/src/model-baker/CalculateExtentsTask.h b/libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.h similarity index 71% rename from libraries/model-baker/src/model-baker/CalculateExtentsTask.h rename to libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.h index 006688ec5a..aed089a13d 100644 --- a/libraries/model-baker/src/model-baker/CalculateExtentsTask.h +++ b/libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.h @@ -1,5 +1,5 @@ // -// CalculateExtentsTask.h +// CalculateTransformedExtentsTask.h // model-baker/src/model-baker // // Created by Sabrina Shanman on 2019/10/04. @@ -17,11 +17,11 @@ // Calculates any undefined extents in the shapes and the model. Precalculated extents will be left alone. // Bind extents will currently not be calculated -class CalculateExtentsTask { +class CalculateTransformedExtentsTask { public: - using Input = baker::VaryingSet4, std::vector, std::vector>; + using Input = baker::VaryingSet4, std::vector, std::vector>; using Output = baker::VaryingSet2>; - using JobModel = baker::Job::ModelIO; + using JobModel = baker::Job::ModelIO; void run(const baker::BakeContextPointer& context, const Input& input, Output& output); }; From 67e05a70d4a3ab9963e093581ee41d3c3318e021 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 5 Nov 2019 10:56:29 -0800 Subject: [PATCH 107/121] Fix build error with _drawTransform --- libraries/render-utils/src/MeshPartPayload.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/render-utils/src/MeshPartPayload.cpp b/libraries/render-utils/src/MeshPartPayload.cpp index 8476b490d8..64140bbb79 100644 --- a/libraries/render-utils/src/MeshPartPayload.cpp +++ b/libraries/render-utils/src/MeshPartPayload.cpp @@ -183,7 +183,7 @@ void MeshPartPayload::render(RenderArgs* args) { auto& schema = _drawMaterials.getSchemaBuffer().get(); glm::vec4 outColor = glm::vec4(ColorUtils::tosRGBVec3(schema._albedo), schema._opacity); outColor = procedural->getColor(outColor); - procedural->prepare(batch, _drawTransform.getTranslation(), _drawTransform.getScale(), _drawTransform.getRotation(), _created, + procedural->prepare(batch, _worldFromLocalTransform.getTranslation(), _worldFromLocalTransform.getScale(), _worldFromLocalTransform.getRotation(), _created, ProceduralProgramKey(outColor.a < 1.0f)); batch._glColor4f(outColor.r, outColor.g, outColor.b, outColor.a); } else { @@ -463,7 +463,7 @@ void ModelMeshPartPayload::render(RenderArgs* args) { auto& schema = _drawMaterials.getSchemaBuffer().get(); glm::vec4 outColor = glm::vec4(ColorUtils::tosRGBVec3(schema._albedo), schema._opacity); outColor = procedural->getColor(outColor); - procedural->prepare(batch, _drawTransform.getTranslation(), _drawTransform.getScale(), _drawTransform.getRotation(), _created, + procedural->prepare(batch, _worldFromLocalTransform.getTranslation(), _worldFromLocalTransform.getScale(), _worldFromLocalTransform.getRotation(), _created, ProceduralProgramKey(outColor.a < 1.0f, _shapeKey.isDeformed(), _shapeKey.isDualQuatSkinned())); batch._glColor4f(outColor.r, outColor.g, outColor.b, outColor.a); } else { From ba257634f203c1efc3ba1781350a3aa3fe739516 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 15:54:17 -0700 Subject: [PATCH 108/121] Introduce extents for TriangleListMesh and calculate them --- libraries/hfm/src/hfm/HFM.h | 1 + libraries/hfm/src/hfm/HFMModelMath.cpp | 16 ++++++++++++++++ libraries/hfm/src/hfm/HFMModelMath.h | 2 ++ 3 files changed, 19 insertions(+) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index ec06832f22..d141c88cd8 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -235,6 +235,7 @@ struct TriangleListMesh { std::vector vertices; std::vector indices; std::vector parts; // Offset in the indices, Number of indices + std::vector partExtents; // Extents of each part with no transform applied. Same length as parts. }; /// A single mesh (with optional blendshapes). diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index d0288d684c..7ce06821ec 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -40,6 +40,20 @@ void thickenFlatExtents(Extents& extents) { extents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON); } +void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh) { + triangleListMesh.partExtents.resize(triangleListMesh.parts.size()); + for (size_t partIndex = 0; partIndex < triangleListMesh.parts.size(); ++partIndex) { + const auto& part = triangleListMesh.parts[partIndex]; + auto& extents = triangleListMesh.partExtents[partIndex]; + int partEnd = part.x + part.y; + for (int i = part.x; i < partEnd; ++i) { + auto index = triangleListMesh.indices[i]; + const auto& position = triangleListMesh.vertices[index]; + extents.addPoint(position); + } + } +} + void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints) { auto& shapeExtents = shape.transformedExtents; shapeExtents.reset(); @@ -196,6 +210,8 @@ const TriangleListMesh generateTriangleListMesh(const std::vector& sr } } + calculateExtentsForTriangleListMesh(dest); + return dest; } diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h index dc397c5e6f..3ed0584fac 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.h +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -20,6 +20,8 @@ void forEachIndex(const hfm::MeshPart& meshPart, std::function f void initializeExtents(Extents& extents); +void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh); + // This can't be moved to model-baker until void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints); From c3af8b5da7a5950c4f67b239e42330ff64262079 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 16:06:48 -0700 Subject: [PATCH 109/121] Remove unused cluster fields from hfm --- libraries/fbx/src/FBXSerializer.cpp | 4 +--- libraries/hfm/src/hfm/HFM.h | 2 -- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index d9a7fe1071..f09182c0e6 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1600,12 +1600,10 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const cluster.jointIndex = transformIndex; clusters.push_back(cluster); - std::vector skinClusters; // Skinned mesh instances have an hfm::SkinDeformer - skinDeformer.skinClusterIndices.reserve(clusterIDs.size()); + std::vector skinClusters; for (const auto& clusterID : clusterIDs) { const Cluster& fbxCluster = fbxClusters[clusterID]; - skinDeformer.skinClusterIndices.emplace_back(); skinClusters.emplace_back(); hfm::SkinCluster& skinCluster = skinClusters.back(); size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size()); diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index d141c88cd8..20fc70f131 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -251,7 +251,6 @@ public: QVector texCoords; QVector texCoords1; - QVector clusters; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::clusters) Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents) glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints) @@ -317,7 +316,6 @@ public: class SkinDeformer { public: - std::vector skinClusterIndices; // DEPRECATED (see hfm::Mesh.clusterIndices, hfm::Mesh.clusterWeights) std::vector clusters; }; From 27643023a3675cbc46b7658570bf2eb47af066b4 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 31 Oct 2019 17:37:46 -0700 Subject: [PATCH 110/121] Make shape extent calculations use slim mesh extents, and rename task to CalculateTransformedExtentsTask --- libraries/hfm/src/hfm/HFMModelMath.cpp | 18 ++++++------------ libraries/hfm/src/hfm/HFMModelMath.h | 2 +- .../model-baker/src/model-baker/Baker.cpp | 10 +++++----- ...cpp => CalculateTransformedExtentsTask.cpp} | 10 +++++----- ...ask.h => CalculateTransformedExtentsTask.h} | 8 ++++---- 5 files changed, 21 insertions(+), 27 deletions(-) rename libraries/model-baker/src/model-baker/{CalculateExtentsTask.cpp => CalculateTransformedExtentsTask.cpp} (74%) rename libraries/model-baker/src/model-baker/{CalculateExtentsTask.h => CalculateTransformedExtentsTask.h} (71%) diff --git a/libraries/hfm/src/hfm/HFMModelMath.cpp b/libraries/hfm/src/hfm/HFMModelMath.cpp index 7ce06821ec..436e520643 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.cpp +++ b/libraries/hfm/src/hfm/HFMModelMath.cpp @@ -54,22 +54,16 @@ void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh) { } } -void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints) { +void calculateExtentsForShape(hfm::Shape& shape, const std::vector& triangleListMeshes, const std::vector& joints) { auto& shapeExtents = shape.transformedExtents; shapeExtents.reset(); - const auto& mesh = meshes[shape.mesh]; - const auto& meshPart = mesh.parts[shape.meshPart]; + const auto& triangleListMesh = triangleListMeshes[shape.mesh]; + const auto& partExtent = triangleListMesh.partExtents[shape.meshPart]; - glm::mat4 transform = joints[shape.joint].transform; - forEachIndex(meshPart, [&](int32_t idx){ - if (mesh.vertices.size() <= idx) { - return; - } - const glm::vec3& vertex = mesh.vertices[idx]; - const glm::vec3 transformedVertex = glm::vec3(transform * glm::vec4(vertex, 1.0f)); - shapeExtents.addPoint(transformedVertex); - }); + const glm::mat4& transform = joints[shape.joint].transform; + shapeExtents = partExtent; + shapeExtents.transform(transform); thickenFlatExtents(shapeExtents); } diff --git a/libraries/hfm/src/hfm/HFMModelMath.h b/libraries/hfm/src/hfm/HFMModelMath.h index 3ed0584fac..ef86e7379a 100644 --- a/libraries/hfm/src/hfm/HFMModelMath.h +++ b/libraries/hfm/src/hfm/HFMModelMath.h @@ -23,7 +23,7 @@ void initializeExtents(Extents& extents); void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh); // This can't be moved to model-baker until -void calculateExtentsForShape(hfm::Shape& shape, const std::vector& meshes, const std::vector joints); +void calculateExtentsForShape(hfm::Shape& shape, const std::vector& triangleListMeshes, const std::vector& joints); void calculateExtentsForModel(Extents& modelExtents, const std::vector& shapes); diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index bd39b3178f..662b4670ee 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -20,7 +20,7 @@ #include "CalculateBlendshapeNormalsTask.h" #include "CalculateBlendshapeTangentsTask.h" #include "PrepareJointsTask.h" -#include "CalculateExtentsTask.h" +#include "CalculateTransformedExtentsTask.h" #include "BuildDracoMeshTask.h" #include "ParseFlowDataTask.h" #include @@ -200,10 +200,10 @@ namespace baker { const auto jointIndices = jointInfoOut.getN(2); // Use transform information to compute extents - const auto calculateExtentsInputs = CalculateExtentsTask::Input(modelExtentsIn, meshesIn, shapesIn, jointsOut).asVarying(); - const auto calculateExtentsOutputs = model.addJob("CalculateExtents", calculateExtentsInputs); - const auto modelExtentsOut = calculateExtentsOutputs.getN(0); - const auto shapesOut = calculateExtentsOutputs.getN(1); + const auto calculateExtentsInputs = CalculateTransformedExtentsTask::Input(modelExtentsIn, triangleListMeshes, shapesIn, jointsOut).asVarying(); + const auto calculateExtentsOutputs = model.addJob("CalculateExtents", calculateExtentsInputs); + const auto modelExtentsOut = calculateExtentsOutputs.getN(0); + const auto shapesOut = calculateExtentsOutputs.getN(1); // Parse material mapping const auto parseMaterialMappingInputs = ParseMaterialMappingTask::Input(mapping, materialMappingBaseURL).asVarying(); diff --git a/libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp b/libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.cpp similarity index 74% rename from libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp rename to libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.cpp index e237cdb402..028dba4939 100644 --- a/libraries/model-baker/src/model-baker/CalculateExtentsTask.cpp +++ b/libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.cpp @@ -1,5 +1,5 @@ // -// CalculateExtentsTask.cpp +// CalculateTransformedExtentsTask.cpp // model-baker/src/model-baker // // Created by Sabrina Shanman on 2019/10/04. @@ -9,13 +9,13 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // -#include "CalculateExtentsTask.h" +#include "CalculateTransformedExtentsTask.h" #include "hfm/HFMModelMath.h" -void CalculateExtentsTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) { +void CalculateTransformedExtentsTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) { const auto& modelExtentsIn = input.get0(); - const auto& meshes = input.get1(); + const auto& triangleListMeshes = input.get1(); const auto& shapesIn = input.get2(); const auto& joints = input.get3(); auto& modelExtentsOut = output.edit0(); @@ -31,7 +31,7 @@ void CalculateExtentsTask::run(const baker::BakeContextPointer& context, const I continue; } - hfm::calculateExtentsForShape(shapeOut, meshes, joints); + hfm::calculateExtentsForShape(shapeOut, triangleListMeshes, joints); } modelExtentsOut = modelExtentsIn; diff --git a/libraries/model-baker/src/model-baker/CalculateExtentsTask.h b/libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.h similarity index 71% rename from libraries/model-baker/src/model-baker/CalculateExtentsTask.h rename to libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.h index 006688ec5a..aed089a13d 100644 --- a/libraries/model-baker/src/model-baker/CalculateExtentsTask.h +++ b/libraries/model-baker/src/model-baker/CalculateTransformedExtentsTask.h @@ -1,5 +1,5 @@ // -// CalculateExtentsTask.h +// CalculateTransformedExtentsTask.h // model-baker/src/model-baker // // Created by Sabrina Shanman on 2019/10/04. @@ -17,11 +17,11 @@ // Calculates any undefined extents in the shapes and the model. Precalculated extents will be left alone. // Bind extents will currently not be calculated -class CalculateExtentsTask { +class CalculateTransformedExtentsTask { public: - using Input = baker::VaryingSet4, std::vector, std::vector>; + using Input = baker::VaryingSet4, std::vector, std::vector>; using Output = baker::VaryingSet2>; - using JobModel = baker::Job::ModelIO; + using JobModel = baker::Job::ModelIO; void run(const baker::BakeContextPointer& context, const Input& input, Output& output); }; From 7af09408460e783a25c3cab0b70f899096116346 Mon Sep 17 00:00:00 2001 From: Sam Gateau Date: Fri, 18 Oct 2019 17:45:44 -0700 Subject: [PATCH 111/121] getting coision to work --- .../src/RenderableModelEntityItem.cpp | 26 ++++++++++++++++--- libraries/hfm/src/hfm/HFM.h | 1 + 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index 287f022007..0029a20f40 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -473,20 +473,38 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { // compute meshPart local transforms QVector localTransforms; const HFMModel& hfmModel = model->getHFMModel(); + uint32_t numHFMShapes = (uint32_t)hfmModel.shapes.size(); uint32_t numHFMMeshes = (uint32_t)hfmModel.meshes.size(); int totalNumVertices = 0; glm::vec3 dimensions = getScaledDimensions(); glm::mat4 invRegistraionOffset = glm::translate(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT)); - for (uint32_t i = 0; i < numHFMMeshes; i++) { - const HFMMesh& mesh = hfmModel.meshes.at(i); - if (i < hfmModel.skinDeformers.size() && hfmModel.skinDeformers[i].clusters.size() > 0) { + for (uint32_t s = 0; s < numHFMShapes; s++) { + const HFMShape& shape = hfmModel.shapes[s]; + // for (uint32_t i = 0; i < numHFMMeshes; i++) { + const HFMMesh& mesh = hfmModel.meshes.at(shape.mesh); + const HFMMeshPart& part = mesh.parts.at(shape.meshPart); + /* if (shape.skinDeformer != hfm::UNDEFINED_KEY) { + const HFMCluster& cluster = hfmModel.skinDeformers[shape.skinDeformer].clusters.at(0); + auto jointMatrix = model->getRig().getJointTransform(cluster.jointIndex); + // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later + localTransforms.push_back(invRegistraionOffset * jointMatrix * cluster.inverseBindMatrix); + } else {*/ + + if (shape.joint != hfm::UNDEFINED_KEY) { + auto jointMatrix = model->getRig().getJointTransform(shape.joint); + // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later + localTransforms.push_back(invRegistraionOffset * jointMatrix/* * cluster.inverseBindMatrix*/); + } else { + localTransforms.push_back(invRegistraionOffset); + } + /* if (i < hfmModel.skinDeformers.size() && hfmModel.skinDeformers[i].clusters.size() > 0) { const HFMCluster& cluster = hfmModel.skinDeformers[i].clusters.at(0); auto jointMatrix = model->getRig().getJointTransform(cluster.jointIndex); // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later localTransforms.push_back(invRegistraionOffset * jointMatrix * cluster.inverseBindMatrix); } else { localTransforms.push_back(invRegistraionOffset); - } + }*/ totalNumVertices += mesh.vertices.size(); } const int32_t MAX_VERTICES_PER_STATIC_MESH = 1e6; diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 20fc70f131..f092c91e99 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -401,6 +401,7 @@ typedef hfm::Mesh HFMMesh; typedef hfm::SkinDeformer HFMSkinDeformer; typedef hfm::AnimationFrame HFMAnimationFrame; typedef hfm::Light HFMLight; +typedef hfm::Shape HFMShape; typedef hfm::Model HFMModel; typedef hfm::FlowData FlowData; From 921eed1ec165aabc79e535b25c3abb8ba8657001 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 1 Nov 2019 18:01:22 -0700 Subject: [PATCH 112/121] Remove CollisionPick::computeShapeInfo --- interface/src/raypick/CollisionPick.cpp | 234 +----------------------- interface/src/raypick/CollisionPick.h | 1 - 2 files changed, 3 insertions(+), 232 deletions(-) diff --git a/interface/src/raypick/CollisionPick.cpp b/interface/src/raypick/CollisionPick.cpp index 9f8510c603..756c8fab7f 100644 --- a/interface/src/raypick/CollisionPick.cpp +++ b/interface/src/raypick/CollisionPick.cpp @@ -121,8 +121,9 @@ bool CollisionPick::isLoaded() const { bool CollisionPick::getShapeInfoReady(const CollisionRegion& pick) { if (_mathPick.shouldComputeShapeInfo()) { if (_cachedResource && _cachedResource->isLoaded()) { - computeShapeInfo(pick, *_mathPick.shapeInfo, _cachedResource); - _mathPick.loaded = true; + // TODO: Model CollisionPick support + //computeShapeInfo(pick, *_mathPick.shapeInfo, _cachedResource); + //_mathPick.loaded = true; } else { _mathPick.loaded = false; } @@ -147,235 +148,6 @@ void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick, } } -void CollisionPick::computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource) { - // This code was copied and modified from RenderableModelEntityItem::computeShapeInfo - // TODO: Move to some shared code area (in entities-renderer? model-networking?) - // after we verify this is working and do a diff comparison with RenderableModelEntityItem::computeShapeInfo - // to consolidate the code. - // We may also want to make computeShapeInfo always abstract away from the gpu model mesh, like it does here. - const uint32_t TRIANGLE_STRIDE = 3; - const uint32_t QUAD_STRIDE = 4; - - ShapeType type = shapeInfo.getType(); - glm::vec3 dimensions = pick.transform.getScale(); - if (type == SHAPE_TYPE_COMPOUND) { - // should never fall in here when collision model not fully loaded - // TODO: assert that all geometries exist and are loaded - //assert(_model && _model->isLoaded() && _compoundShapeResource && _compoundShapeResource->isLoaded()); - const HFMModel& collisionModel = resource->getHFMModel(); - - ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection(); - pointCollection.clear(); - uint32_t i = 0; - - // the way OBJ files get read, each section under a "g" line is its own meshPart. We only expect - // to find one actual "mesh" (with one or more meshParts in it), but we loop over the meshes, just in case. - foreach (const HFMMesh& mesh, collisionModel.meshes) { - // each meshPart is a convex hull - foreach (const HFMMeshPart &meshPart, mesh.parts) { - pointCollection.push_back(QVector()); - ShapeInfo::PointList& pointsInPart = pointCollection[i]; - - // run through all the triangles and (uniquely) add each point to the hull - uint32_t numIndices = (uint32_t)meshPart.triangleIndices.size(); - // TODO: assert rather than workaround after we start sanitizing HFMMesh higher up - //assert(numIndices % TRIANGLE_STRIDE == 0); - numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer - - for (uint32_t j = 0; j < numIndices; j += TRIANGLE_STRIDE) { - glm::vec3 p0 = mesh.vertices[meshPart.triangleIndices[j]]; - glm::vec3 p1 = mesh.vertices[meshPart.triangleIndices[j + 1]]; - glm::vec3 p2 = mesh.vertices[meshPart.triangleIndices[j + 2]]; - if (!pointsInPart.contains(p0)) { - pointsInPart << p0; - } - if (!pointsInPart.contains(p1)) { - pointsInPart << p1; - } - if (!pointsInPart.contains(p2)) { - pointsInPart << p2; - } - } - - // run through all the quads and (uniquely) add each point to the hull - numIndices = (uint32_t)meshPart.quadIndices.size(); - // TODO: assert rather than workaround after we start sanitizing HFMMesh higher up - //assert(numIndices % QUAD_STRIDE == 0); - numIndices -= numIndices % QUAD_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer - - for (uint32_t j = 0; j < numIndices; j += QUAD_STRIDE) { - glm::vec3 p0 = mesh.vertices[meshPart.quadIndices[j]]; - glm::vec3 p1 = mesh.vertices[meshPart.quadIndices[j + 1]]; - glm::vec3 p2 = mesh.vertices[meshPart.quadIndices[j + 2]]; - glm::vec3 p3 = mesh.vertices[meshPart.quadIndices[j + 3]]; - if (!pointsInPart.contains(p0)) { - pointsInPart << p0; - } - if (!pointsInPart.contains(p1)) { - pointsInPart << p1; - } - if (!pointsInPart.contains(p2)) { - pointsInPart << p2; - } - if (!pointsInPart.contains(p3)) { - pointsInPart << p3; - } - } - - if (pointsInPart.size() == 0) { - qCDebug(scriptengine) << "Warning -- meshPart has no faces"; - pointCollection.pop_back(); - continue; - } - ++i; - } - } - - // We expect that the collision model will have the same units and will be displaced - // from its origin in the same way the visual model is. The visual model has - // been centered and probably scaled. We take the scaling and offset which were applied - // to the visual model and apply them to the collision model (without regard for the - // collision model's extents). - - glm::vec3 scaleToFit = dimensions / resource->getHFMModel().getUnscaledMeshExtents().size(); - // multiply each point by scale - for (int32_t i = 0; i < pointCollection.size(); i++) { - for (int32_t j = 0; j < pointCollection[i].size(); j++) { - // back compensate for registration so we can apply that offset to the shapeInfo later - pointCollection[i][j] = scaleToFit * pointCollection[i][j]; - } - } - shapeInfo.setParams(type, dimensions, resource->getURL().toString()); - } else if (type >= SHAPE_TYPE_SIMPLE_HULL && type <= SHAPE_TYPE_STATIC_MESH) { - const HFMModel& hfmModel = resource->getHFMModel(); - uint32_t numHFMMeshes = (uint32_t)hfmModel.meshes.size(); - int totalNumVertices = 0; - for (uint32_t i = 0; i < numHFMMeshes; i++) { - const HFMMesh& mesh = hfmModel.meshes.at(i); - totalNumVertices += mesh.vertices.size(); - } - const int32_t MAX_VERTICES_PER_STATIC_MESH = 1e6; - if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) { - qWarning() << "model" << "has too many vertices" << totalNumVertices << "and will collide as a box."; - shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions); - return; - } - - auto& meshes = resource->getHFMModel().meshes; - int32_t numMeshes = (int32_t)(meshes.size()); - - const int MAX_ALLOWED_MESH_COUNT = 1000; - if (numMeshes > MAX_ALLOWED_MESH_COUNT) { - // too many will cause the deadlock timer to throw... - shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions); - return; - } - - ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection(); - pointCollection.clear(); - if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { - pointCollection.resize(numMeshes); - } else { - pointCollection.resize(1); - } - - ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices(); - triangleIndices.clear(); - - Extents extents; - int32_t meshCount = 0; - int32_t pointListIndex = 0; - for (auto& mesh : meshes) { - if (!mesh.vertices.size()) { - continue; - } - QVector vertices = mesh.vertices; - - ShapeInfo::PointList& points = pointCollection[pointListIndex]; - - // reserve room - int32_t sizeToReserve = (int32_t)(vertices.count()); - if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { - // a list of points for each mesh - pointListIndex++; - } else { - // only one list of points - sizeToReserve += (int32_t)points.size(); - } - points.reserve(sizeToReserve); - - // copy points - const glm::vec3* vertexItr = vertices.cbegin(); - while (vertexItr != vertices.cend()) { - glm::vec3 point = *vertexItr; - points.push_back(point); - extents.addPoint(point); - ++vertexItr; - } - - if (type == SHAPE_TYPE_STATIC_MESH) { - // copy into triangleIndices - size_t triangleIndicesCount = 0; - for (const HFMMeshPart& meshPart : mesh.parts) { - triangleIndicesCount += meshPart.triangleIndices.count(); - } - triangleIndices.reserve((int)triangleIndicesCount); - - for (const HFMMeshPart& meshPart : mesh.parts) { - const int* indexItr = meshPart.triangleIndices.cbegin(); - while (indexItr != meshPart.triangleIndices.cend()) { - triangleIndices.push_back(*indexItr); - ++indexItr; - } - } - } else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { - // for each mesh copy unique part indices, separated by special bogus (flag) index values - for (const HFMMeshPart& meshPart : mesh.parts) { - // collect unique list of indices for this part - std::set uniqueIndices; - auto numIndices = meshPart.triangleIndices.count(); - // TODO: assert rather than workaround after we start sanitizing HFMMesh higher up - //assert(numIndices% TRIANGLE_STRIDE == 0); - numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer - - auto indexItr = meshPart.triangleIndices.cbegin(); - while (indexItr != meshPart.triangleIndices.cend()) { - uniqueIndices.insert(*indexItr); - ++indexItr; - } - - // store uniqueIndices in triangleIndices - triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size()); - for (auto index : uniqueIndices) { - triangleIndices.push_back(index); - } - // flag end of part - triangleIndices.push_back(END_OF_MESH_PART); - } - // flag end of mesh - triangleIndices.push_back(END_OF_MESH); - } - ++meshCount; - } - - // scale and shift - glm::vec3 extentsSize = extents.size(); - glm::vec3 scaleToFit = dimensions / extentsSize; - for (int32_t i = 0; i < 3; ++i) { - if (extentsSize[i] < 1.0e-6f) { - scaleToFit[i] = 1.0f; - } - } - for (auto points : pointCollection) { - for (int32_t i = 0; i < points.size(); ++i) { - points[i] = (points[i] * scaleToFit); - } - } - - shapeInfo.setParams(type, 0.5f * dimensions, resource->getURL().toString()); - } -} - CollisionPick::CollisionPick(const PickFilter& filter, float maxDistance, bool enabled, bool scaleWithParent, CollisionRegion collisionRegion, PhysicsEnginePointer physicsEngine) : Pick(collisionRegion, filter, maxDistance, enabled), _scaleWithParent(scaleWithParent), diff --git a/interface/src/raypick/CollisionPick.h b/interface/src/raypick/CollisionPick.h index 115ee1727e..617c7b1f00 100644 --- a/interface/src/raypick/CollisionPick.h +++ b/interface/src/raypick/CollisionPick.h @@ -63,7 +63,6 @@ protected: bool isLoaded() const; // Returns true if _mathPick.shapeInfo is valid. Otherwise, attempts to get the _mathPick ready for use. bool getShapeInfoReady(const CollisionRegion& pick); - void computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource); void computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer resource); void filterIntersections(std::vector& intersections) const; From da5f80c139f9de9fd66eed4c4aa6f7a2c96614eb Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 1 Nov 2019 18:01:41 -0700 Subject: [PATCH 113/121] Use std::vector types in ShapeInfo --- .../src/RenderableModelEntityItem.cpp | 30 ++++++------ .../src/RenderablePolyVoxEntityItem.cpp | 49 ++++++++----------- libraries/physics/src/ShapeFactory.cpp | 8 +-- libraries/render-utils/src/GeometryCache.cpp | 2 +- libraries/render-utils/src/GeometryCache.h | 2 +- libraries/shared/src/ShapeInfo.cpp | 10 ++-- libraries/shared/src/ShapeInfo.h | 12 ++--- 7 files changed, 53 insertions(+), 60 deletions(-) diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index 0029a20f40..1276c1361f 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -387,7 +387,7 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { foreach (const HFMMesh& mesh, collisionGeometry.meshes) { // each meshPart is a convex hull foreach (const HFMMeshPart &meshPart, mesh.parts) { - pointCollection.push_back(QVector()); + pointCollection.emplace_back(); ShapeInfo::PointList& pointsInPart = pointCollection[i]; // run through all the triangles and (uniquely) add each point to the hull @@ -400,14 +400,14 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { glm::vec3 p0 = mesh.vertices[meshPart.triangleIndices[j]]; glm::vec3 p1 = mesh.vertices[meshPart.triangleIndices[j + 1]]; glm::vec3 p2 = mesh.vertices[meshPart.triangleIndices[j + 2]]; - if (!pointsInPart.contains(p0)) { - pointsInPart << p0; + if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p0) == pointsInPart.cend()) { + pointsInPart.push_back(p0); } - if (!pointsInPart.contains(p1)) { - pointsInPart << p1; + if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p1) == pointsInPart.cend()) { + pointsInPart.push_back(p1); } - if (!pointsInPart.contains(p2)) { - pointsInPart << p2; + if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p2) == pointsInPart.cend()) { + pointsInPart.push_back(p2); } } @@ -422,17 +422,17 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { glm::vec3 p1 = mesh.vertices[meshPart.quadIndices[j + 1]]; glm::vec3 p2 = mesh.vertices[meshPart.quadIndices[j + 2]]; glm::vec3 p3 = mesh.vertices[meshPart.quadIndices[j + 3]]; - if (!pointsInPart.contains(p0)) { - pointsInPart << p0; + if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p0) == pointsInPart.cend()) { + pointsInPart.push_back(p0); } - if (!pointsInPart.contains(p1)) { - pointsInPart << p1; + if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p1) == pointsInPart.cend()) { + pointsInPart.push_back(p1); } - if (!pointsInPart.contains(p2)) { - pointsInPart << p2; + if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p2) == pointsInPart.cend()) { + pointsInPart.push_back(p2); } - if (!pointsInPart.contains(p3)) { - pointsInPart << p3; + if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p3) == pointsInPart.cend()) { + pointsInPart.push_back(p3); } } diff --git a/libraries/entities-renderer/src/RenderablePolyVoxEntityItem.cpp b/libraries/entities-renderer/src/RenderablePolyVoxEntityItem.cpp index 853e36b45b..09abd73d53 100644 --- a/libraries/entities-renderer/src/RenderablePolyVoxEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderablePolyVoxEntityItem.cpp @@ -1429,14 +1429,13 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() { QtConcurrent::run([entity, voxelSurfaceStyle, voxelVolumeSize, mesh] { auto polyVoxEntity = std::static_pointer_cast(entity); - QVector> pointCollection; + ShapeInfo::PointCollection pointCollection; AABox box; glm::mat4 vtoM = std::static_pointer_cast(entity)->voxelToLocalMatrix(); if (voxelSurfaceStyle == PolyVoxEntityItem::SURFACE_MARCHING_CUBES || voxelSurfaceStyle == PolyVoxEntityItem::SURFACE_EDGED_MARCHING_CUBES) { // pull each triangle in the mesh into a polyhedron which can be collided with - unsigned int i = 0; const gpu::BufferView& vertexBufferView = mesh->getVertexBuffer(); const gpu::BufferView& indexBufferView = mesh->getIndexBuffer(); @@ -1465,19 +1464,16 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() { box += p2Model; box += p3Model; - QVector pointsInPart; - pointsInPart << p0Model; - pointsInPart << p1Model; - pointsInPart << p2Model; - pointsInPart << p3Model; - // add next convex hull - QVector newMeshPoints; - pointCollection << newMeshPoints; - // add points to the new convex hull - pointCollection[i++] << pointsInPart; + ShapeInfo::PointList pointsInPart; + pointsInPart.push_back(p0Model); + pointsInPart.push_back(p1Model); + pointsInPart.push_back(p2Model); + pointsInPart.push_back(p3Model); + + // add points to a new convex hull + pointCollection.push_back(pointsInPart); } } else { - unsigned int i = 0; polyVoxEntity->forEachVoxelValue(voxelVolumeSize, [&](const ivec3& v, uint8_t value) { if (value > 0) { const auto& x = v.x; @@ -1496,7 +1492,7 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() { return; } - QVector pointsInPart; + ShapeInfo::PointList pointsInPart; float offL = -0.5f; float offH = 0.5f; @@ -1523,20 +1519,17 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() { box += p110; box += p111; - pointsInPart << p000; - pointsInPart << p001; - pointsInPart << p010; - pointsInPart << p011; - pointsInPart << p100; - pointsInPart << p101; - pointsInPart << p110; - pointsInPart << p111; + pointsInPart.push_back(p000); + pointsInPart.push_back(p001); + pointsInPart.push_back(p010); + pointsInPart.push_back(p011); + pointsInPart.push_back(p100); + pointsInPart.push_back(p101); + pointsInPart.push_back(p110); + pointsInPart.push_back(p111); - // add next convex hull - QVector newMeshPoints; - pointCollection << newMeshPoints; - // add points to the new convex hull - pointCollection[i++] << pointsInPart; + // add points to a new convex hull + pointCollection.push_back(pointsInPart); } }); } @@ -1546,7 +1539,7 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() { void RenderablePolyVoxEntityItem::setCollisionPoints(ShapeInfo::PointCollection pointCollection, AABox box) { // this catches the payload from computeShapeInfoWorker - if (pointCollection.isEmpty()) { + if (pointCollection.empty()) { EntityItem::computeShapeInfo(_shapeInfo); withWriteLock([&] { _shapeReady = true; diff --git a/libraries/physics/src/ShapeFactory.cpp b/libraries/physics/src/ShapeFactory.cpp index ef5213df8f..569ddb52ce 100644 --- a/libraries/physics/src/ShapeFactory.cpp +++ b/libraries/physics/src/ShapeFactory.cpp @@ -217,7 +217,7 @@ btTriangleIndexVertexArray* createStaticMeshArray(const ShapeInfo& info) { } const ShapeInfo::TriangleIndices& triangleIndices = info.getTriangleIndices(); - int32_t numIndices = triangleIndices.size(); + int32_t numIndices = (int32_t)triangleIndices.size(); if (numIndices < 3) { // not enough indices to make a single triangle return nullptr; @@ -237,7 +237,7 @@ btTriangleIndexVertexArray* createStaticMeshArray(const ShapeInfo& info) { mesh.m_indexType = PHY_INTEGER; mesh.m_triangleIndexStride = VERTICES_PER_TRIANGLE * sizeof(int32_t); } - mesh.m_numVertices = pointList.size(); + mesh.m_numVertices = (int)pointList.size(); mesh.m_vertexBase = new unsigned char[VERTICES_PER_TRIANGLE * sizeof(btScalar) * (size_t)mesh.m_numVertices]; mesh.m_vertexStride = VERTICES_PER_TRIANGLE * sizeof(btScalar); mesh.m_vertexType = PHY_FLOAT; @@ -362,7 +362,7 @@ const btCollisionShape* ShapeFactory::createShapeFromInfo(const ShapeInfo& info) const ShapeInfo::PointCollection& pointCollection = info.getPointCollection(); uint32_t numSubShapes = info.getNumSubShapes(); if (numSubShapes == 1) { - if (!pointCollection.isEmpty()) { + if (!pointCollection.empty()) { shape = createConvexHull(pointCollection[0]); } } else { @@ -380,7 +380,7 @@ const btCollisionShape* ShapeFactory::createShapeFromInfo(const ShapeInfo& info) case SHAPE_TYPE_SIMPLE_COMPOUND: { const ShapeInfo::PointCollection& pointCollection = info.getPointCollection(); const ShapeInfo::TriangleIndices& triangleIndices = info.getTriangleIndices(); - uint32_t numIndices = triangleIndices.size(); + uint32_t numIndices = (uint32_t)triangleIndices.size(); uint32_t numMeshes = info.getNumSubShapes(); const uint32_t MIN_NUM_SIMPLE_COMPOUND_INDICES = 2; // END_OF_MESH_PART + END_OF_MESH if (numMeshes > 0 && numIndices > MIN_NUM_SIMPLE_COMPOUND_INDICES) { diff --git a/libraries/render-utils/src/GeometryCache.cpp b/libraries/render-utils/src/GeometryCache.cpp index 997f87b8d6..621c20227c 100644 --- a/libraries/render-utils/src/GeometryCache.cpp +++ b/libraries/render-utils/src/GeometryCache.cpp @@ -116,7 +116,7 @@ static const uint SHAPE_TANGENT_OFFSET = offsetof(GeometryCache::ShapeVertex, ta std::map, gpu::PipelinePointer> GeometryCache::_webPipelines; std::map, gpu::PipelinePointer> GeometryCache::_gridPipelines; -void GeometryCache::computeSimpleHullPointListForShape(const int entityShape, const glm::vec3 &entityExtents, QVector &outPointList) { +void GeometryCache::computeSimpleHullPointListForShape(const int entityShape, const glm::vec3 &entityExtents, ShapeInfo::PointList &outPointList) { auto geometryCache = DependencyManager::get(); const GeometryCache::Shape geometryShape = GeometryCache::getShapeForEntityShape( entityShape ); diff --git a/libraries/render-utils/src/GeometryCache.h b/libraries/render-utils/src/GeometryCache.h index b474e6c712..bfd133183d 100644 --- a/libraries/render-utils/src/GeometryCache.h +++ b/libraries/render-utils/src/GeometryCache.h @@ -155,7 +155,7 @@ public: static GeometryCache::Shape getShapeForEntityShape(int entityShapeEnum); static QString stringFromShape(GeometryCache::Shape geoShape); - static void computeSimpleHullPointListForShape(int entityShape, const glm::vec3 &entityExtents, QVector &outPointList); + static void computeSimpleHullPointListForShape(int entityShape, const glm::vec3 &entityExtents, ShapeInfo::PointList &outPointList); int allocateID() { return _nextID++; } void releaseID(int id); diff --git a/libraries/shared/src/ShapeInfo.cpp b/libraries/shared/src/ShapeInfo.cpp index c60d1c2574..048cdaaf0d 100644 --- a/libraries/shared/src/ShapeInfo.cpp +++ b/libraries/shared/src/ShapeInfo.cpp @@ -189,7 +189,7 @@ uint32_t ShapeInfo::getNumSubShapes() const { return 0; case SHAPE_TYPE_COMPOUND: case SHAPE_TYPE_SIMPLE_COMPOUND: - return _pointCollection.size(); + return (uint32_t)_pointCollection.size(); case SHAPE_TYPE_MULTISPHERE: case SHAPE_TYPE_SIMPLE_HULL: case SHAPE_TYPE_STATIC_MESH: @@ -200,10 +200,10 @@ uint32_t ShapeInfo::getNumSubShapes() const { } } -int ShapeInfo::getLargestSubshapePointCount() const { - int numPoints = 0; - for (int i = 0; i < _pointCollection.size(); ++i) { - int n = _pointCollection[i].size(); +uint32_t ShapeInfo::getLargestSubshapePointCount() const { + uint32_t numPoints = 0; + for (uint32_t i = 0; i < (uint32_t)_pointCollection.size(); ++i) { + uint32_t n = _pointCollection[i].size(); if (n > numPoints) { numPoints = n; } diff --git a/libraries/shared/src/ShapeInfo.h b/libraries/shared/src/ShapeInfo.h index 6b0f981b24..676f38d087 100644 --- a/libraries/shared/src/ShapeInfo.h +++ b/libraries/shared/src/ShapeInfo.h @@ -12,7 +12,7 @@ #ifndef hifi_ShapeInfo_h #define hifi_ShapeInfo_h -#include +#include #include #include #include @@ -53,11 +53,11 @@ class ShapeInfo { public: - using PointList = QVector; - using PointCollection = QVector; - using TriangleIndices = QVector; + using PointList = std::vector; + using PointCollection = std::vector; + using TriangleIndices = std::vector; using SphereData = glm::vec4; - using SphereCollection = QVector; + using SphereCollection = std::vector; static QString getNameForShapeType(ShapeType type); static ShapeType getShapeTypeForName(QString string); @@ -85,7 +85,7 @@ public: TriangleIndices& getTriangleIndices() { return _triangleIndices; } const TriangleIndices& getTriangleIndices() const { return _triangleIndices; } - int getLargestSubshapePointCount() const; + uint32_t getLargestSubshapePointCount() const; float computeVolume() const; From 88b6472cedcdc5d1ade53917240e5a81144541e2 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 4 Nov 2019 10:29:13 -0800 Subject: [PATCH 114/121] Fix build warning --- libraries/shared/src/ShapeInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/shared/src/ShapeInfo.cpp b/libraries/shared/src/ShapeInfo.cpp index 048cdaaf0d..cb9ad41fd0 100644 --- a/libraries/shared/src/ShapeInfo.cpp +++ b/libraries/shared/src/ShapeInfo.cpp @@ -203,7 +203,7 @@ uint32_t ShapeInfo::getNumSubShapes() const { uint32_t ShapeInfo::getLargestSubshapePointCount() const { uint32_t numPoints = 0; for (uint32_t i = 0; i < (uint32_t)_pointCollection.size(); ++i) { - uint32_t n = _pointCollection[i].size(); + uint32_t n = (uint32_t)_pointCollection[i].size(); if (n > numPoints) { numPoints = n; } From 33bb0e2b19e9df3f7c2a5ff138d49840c5beb8ed Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 4 Nov 2019 13:59:36 -0800 Subject: [PATCH 115/121] Change RenderableModelEntityItem::computeShapeInfo to support new HFM format --- .../src/RenderableModelEntityItem.cpp | 370 ++++++++---------- 1 file changed, 162 insertions(+), 208 deletions(-) diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index 1276c1361f..c87d24c425 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -380,26 +380,35 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection(); pointCollection.clear(); - uint32_t i = 0; + + size_t numParts = 0; + for (const HFMMesh& mesh : collisionGeometry.meshes) { + numParts += mesh.triangleListMesh.parts.size(); + } + pointCollection.reserve(numParts); // the way OBJ files get read, each section under a "g" line is its own meshPart. We only expect // to find one actual "mesh" (with one or more meshParts in it), but we loop over the meshes, just in case. - foreach (const HFMMesh& mesh, collisionGeometry.meshes) { + for (const HFMMesh& mesh : collisionGeometry.meshes) { + const hfm::TriangleListMesh& triangleListMesh = mesh.triangleListMesh; // each meshPart is a convex hull - foreach (const HFMMeshPart &meshPart, mesh.parts) { - pointCollection.emplace_back(); - ShapeInfo::PointList& pointsInPart = pointCollection[i]; - + for (const glm::ivec2& part : triangleListMesh.parts) { // run through all the triangles and (uniquely) add each point to the hull - uint32_t numIndices = (uint32_t)meshPart.triangleIndices.size(); + + pointCollection.emplace_back(); + ShapeInfo::PointList& pointsInPart = pointCollection.back(); + + uint32_t numIndices = (uint32_t)part.y; // TODO: assert rather than workaround after we start sanitizing HFMMesh higher up //assert(numIndices % TRIANGLE_STRIDE == 0); numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer - - for (uint32_t j = 0; j < numIndices; j += TRIANGLE_STRIDE) { - glm::vec3 p0 = mesh.vertices[meshPart.triangleIndices[j]]; - glm::vec3 p1 = mesh.vertices[meshPart.triangleIndices[j + 1]]; - glm::vec3 p2 = mesh.vertices[meshPart.triangleIndices[j + 2]]; + uint32_t indexStart = (uint32_t)part.x; + uint32_t indexEnd = indexStart + numIndices; + for (uint32_t j = indexStart; j < indexEnd; j += TRIANGLE_STRIDE) { + // NOTE: It seems odd to skip vertices when initializing a btConvexHullShape, but let's keep the behavior similar to the old behavior for now + glm::vec3 p0 = triangleListMesh.vertices[triangleListMesh.indices[j]]; + glm::vec3 p1 = triangleListMesh.vertices[triangleListMesh.indices[j + 1]]; + glm::vec3 p2 = triangleListMesh.vertices[triangleListMesh.indices[j + 2]]; if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p0) == pointsInPart.cend()) { pointsInPart.push_back(p0); } @@ -411,37 +420,11 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { } } - // run through all the quads and (uniquely) add each point to the hull - numIndices = (uint32_t)meshPart.quadIndices.size(); - // TODO: assert rather than workaround after we start sanitizing HFMMesh higher up - //assert(numIndices % QUAD_STRIDE == 0); - numIndices -= numIndices % QUAD_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer - - for (uint32_t j = 0; j < numIndices; j += QUAD_STRIDE) { - glm::vec3 p0 = mesh.vertices[meshPart.quadIndices[j]]; - glm::vec3 p1 = mesh.vertices[meshPart.quadIndices[j + 1]]; - glm::vec3 p2 = mesh.vertices[meshPart.quadIndices[j + 2]]; - glm::vec3 p3 = mesh.vertices[meshPart.quadIndices[j + 3]]; - if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p0) == pointsInPart.cend()) { - pointsInPart.push_back(p0); - } - if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p1) == pointsInPart.cend()) { - pointsInPart.push_back(p1); - } - if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p2) == pointsInPart.cend()) { - pointsInPart.push_back(p2); - } - if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p3) == pointsInPart.cend()) { - pointsInPart.push_back(p3); - } - } - if (pointsInPart.size() == 0) { qCDebug(entitiesrenderer) << "Warning -- meshPart has no faces"; pointCollection.pop_back(); continue; } - ++i; } } @@ -474,61 +457,83 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { QVector localTransforms; const HFMModel& hfmModel = model->getHFMModel(); uint32_t numHFMShapes = (uint32_t)hfmModel.shapes.size(); - uint32_t numHFMMeshes = (uint32_t)hfmModel.meshes.size(); - int totalNumVertices = 0; + localTransforms.reserve(numHFMShapes); glm::vec3 dimensions = getScaledDimensions(); glm::mat4 invRegistraionOffset = glm::translate(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT)); for (uint32_t s = 0; s < numHFMShapes; s++) { const HFMShape& shape = hfmModel.shapes[s]; - // for (uint32_t i = 0; i < numHFMMeshes; i++) { - const HFMMesh& mesh = hfmModel.meshes.at(shape.mesh); - const HFMMeshPart& part = mesh.parts.at(shape.meshPart); - /* if (shape.skinDeformer != hfm::UNDEFINED_KEY) { - const HFMCluster& cluster = hfmModel.skinDeformers[shape.skinDeformer].clusters.at(0); - auto jointMatrix = model->getRig().getJointTransform(cluster.jointIndex); - // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later - localTransforms.push_back(invRegistraionOffset * jointMatrix * cluster.inverseBindMatrix); - } else {*/ - if (shape.joint != hfm::UNDEFINED_KEY) { auto jointMatrix = model->getRig().getJointTransform(shape.joint); // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later - localTransforms.push_back(invRegistraionOffset * jointMatrix/* * cluster.inverseBindMatrix*/); + if (shape.skinDeformer != hfm::UNDEFINED_KEY) { + const auto& skinDeformer = hfmModel.skinDeformers[shape.skinDeformer]; + glm::mat4 inverseBindMatrix; + if (!skinDeformer.clusters.empty()) { + const auto& cluster = skinDeformer.clusters.back(); + inverseBindMatrix = cluster.inverseBindMatrix; + } + localTransforms.push_back(invRegistraionOffset * jointMatrix * inverseBindMatrix); + } else { + localTransforms.push_back(invRegistraionOffset * jointMatrix); + } } else { localTransforms.push_back(invRegistraionOffset); } - /* if (i < hfmModel.skinDeformers.size() && hfmModel.skinDeformers[i].clusters.size() > 0) { - const HFMCluster& cluster = hfmModel.skinDeformers[i].clusters.at(0); - auto jointMatrix = model->getRig().getJointTransform(cluster.jointIndex); - // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later - localTransforms.push_back(invRegistraionOffset * jointMatrix * cluster.inverseBindMatrix); - } else { - localTransforms.push_back(invRegistraionOffset); - }*/ - totalNumVertices += mesh.vertices.size(); } - const int32_t MAX_VERTICES_PER_STATIC_MESH = 1e6; - if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) { - qWarning() << "model" << getModelURL() << "has too many vertices" << totalNumVertices << "and will collide as a box."; + + ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices(); + triangleIndices.clear(); + + Extents extents; + int32_t shapeCount = 0; + int32_t instanceIndex = 0; + + // NOTE: Each pointCollection corresponds to a mesh. Therefore, we should have one pointCollection per mesh instance + // A mesh instance is a unique combination of mesh/transform. For every mesh instance, there are as many shapes as there are parts for that mesh. + // We assume the shapes are grouped by mesh instance, and the group contains one of each mesh part. + uint32_t numInstances = 0; + std::vector>> shapesPerInstancePerMesh; + shapesPerInstancePerMesh.resize(hfmModel.meshes.size()); + for (uint32_t shapeIndex = 0; shapeIndex < hfmModel.shapes.size();) { + const auto& shape = hfmModel.shapes[shapeIndex]; + uint32_t meshIndex = shape.mesh; + const auto& mesh = hfmModel.meshes[meshIndex]; + uint32_t numMeshParts = (uint32_t)mesh.parts.size(); + assert(numMeshParts != 0); + + auto& shapesPerInstance = shapesPerInstancePerMesh[meshIndex]; + shapesPerInstance.emplace_back(); + + auto& shapes = shapesPerInstance.back(); + shapes.resize(numMeshParts); + std::iota(shapes.begin(), shapes.end(), shapeIndex); + + shapeIndex += numMeshParts; + ++numInstances; + } + + const uint32_t MAX_ALLOWED_MESH_COUNT = 1000; + if (numInstances > MAX_ALLOWED_MESH_COUNT) { + // too many will cause the deadlock timer to throw... + qWarning() << "model" << getModelURL() << "has too many collision meshes" << numInstances << "and will collide as a box."; shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions); return; } - std::vector> meshes; - if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { - auto& hfmMeshes = _collisionGeometryResource->getHFMModel().meshes; - meshes.reserve(hfmMeshes.size()); - for (auto& hfmMesh : hfmMeshes) { - meshes.push_back(hfmMesh._mesh); + size_t totalNumVertices = 0; + for (const auto& shapesPerInstance : shapesPerInstancePerMesh) { + for (const auto& instanceShapes : shapesPerInstance) { + const uint32_t firstShapeIndex = instanceShapes.front(); + const auto& firstShape = hfmModel.shapes[firstShapeIndex]; + const auto& mesh = hfmModel.meshes[firstShape.mesh]; + const auto& triangleListMesh = mesh.triangleListMesh; + // Added once per instance per mesh + totalNumVertices += triangleListMesh.vertices.size(); } - } else { - meshes = model->getNetworkModel()->getMeshes(); } - int32_t numMeshes = (int32_t)(meshes.size()); - - const int MAX_ALLOWED_MESH_COUNT = 1000; - if (numMeshes > MAX_ALLOWED_MESH_COUNT) { - // too many will cause the deadlock timer to throw... + const size_t MAX_VERTICES_PER_STATIC_MESH = 1e6; + if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) { + qWarning() << "model" << getModelURL() << "has too many vertices" << totalNumVertices << "and will collide as a box."; shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions); return; } @@ -536,169 +541,118 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection(); pointCollection.clear(); if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { - pointCollection.resize(numMeshes); + pointCollection.resize(numInstances); } else { pointCollection.resize(1); } - ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices(); - triangleIndices.clear(); + for (uint32_t meshIndex = 0; meshIndex < hfmModel.meshes.size(); ++meshIndex) { + const auto& mesh = hfmModel.meshes[meshIndex]; + const auto& triangleListMesh = mesh.triangleListMesh; + const auto& vertices = triangleListMesh.vertices; + const auto& indices = triangleListMesh.indices; + const std::vector& parts = triangleListMesh.parts; - Extents extents; - int32_t meshCount = 0; - int32_t pointListIndex = 0; - for (auto& mesh : meshes) { - if (!mesh) { - continue; - } - const gpu::BufferView& vertices = mesh->getVertexBuffer(); - const gpu::BufferView& indices = mesh->getIndexBuffer(); - const gpu::BufferView& parts = mesh->getPartBuffer(); + const auto& shapesPerInstance = shapesPerInstancePerMesh[meshIndex]; + for (const std::vector& instanceShapes : shapesPerInstance) { + ShapeInfo::PointList& points = pointCollection[instanceIndex]; - ShapeInfo::PointList& points = pointCollection[pointListIndex]; + // reserve room + int32_t sizeToReserve = (int32_t)(vertices.size()); + if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { + // a list of points for each instance + instanceIndex++; + } else { + // only one list of points + sizeToReserve += (int32_t)((gpu::Size)points.size()); + } + points.reserve(sizeToReserve); + + // get mesh instance transform + const uint32_t meshIndexOffset = (uint32_t)points.size(); + const uint32_t instanceShapeIndexForTransform = instanceShapes.front(); + const auto& instanceShapeForTransform = hfmModel.shapes[instanceShapeIndexForTransform]; + glm::mat4 localTransform; + if (instanceShapeForTransform.joint != hfm::UNDEFINED_KEY) { + auto jointMatrix = model->getRig().getJointTransform(instanceShapeForTransform.joint); + // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later + if (instanceShapeForTransform.skinDeformer != hfm::UNDEFINED_KEY) { + const auto& skinDeformer = hfmModel.skinDeformers[instanceShapeForTransform.skinDeformer]; + glm::mat4 inverseBindMatrix; + if (!skinDeformer.clusters.empty()) { + const auto& cluster = skinDeformer.clusters.back(); + inverseBindMatrix = cluster.inverseBindMatrix; + } + localTransform = invRegistraionOffset * jointMatrix * inverseBindMatrix; + } else { + localTransform = invRegistraionOffset * jointMatrix; + } + } else { + localTransform = invRegistraionOffset; + } - // reserve room - int32_t sizeToReserve = (int32_t)(vertices.getNumElements()); - if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { - // a list of points for each mesh - pointListIndex++; - } else { - // only one list of points - sizeToReserve += (int32_t)((gpu::Size)points.size()); - } - points.reserve(sizeToReserve); + // copy points + auto vertexItr = vertices.cbegin(); + while (vertexItr != vertices.cend()) { + glm::vec3 point = extractTranslation(localTransform * glm::translate(*vertexItr)); + points.push_back(point); + ++vertexItr; + } + for (const auto& instanceShapeIndex : instanceShapes) { + const auto& instanceShape = hfmModel.shapes[instanceShapeIndex]; + extents.addExtents(instanceShape.transformedExtents); + } - // copy points - uint32_t meshIndexOffset = (uint32_t)points.size(); - const glm::mat4& localTransform = localTransforms[meshCount]; - gpu::BufferView::Iterator vertexItr = vertices.cbegin(); - while (vertexItr != vertices.cend()) { - glm::vec3 point = extractTranslation(localTransform * glm::translate(*vertexItr)); - points.push_back(point); - extents.addPoint(point); - ++vertexItr; - } - - if (type == SHAPE_TYPE_STATIC_MESH) { - // copy into triangleIndices - triangleIndices.reserve((int32_t)((gpu::Size)(triangleIndices.size()) + indices.getNumElements())); - gpu::BufferView::Iterator partItr = parts.cbegin(); - while (partItr != parts.cend()) { - auto numIndices = partItr->_numIndices; - if (partItr->_topology == graphics::Mesh::TRIANGLES) { + if (type == SHAPE_TYPE_STATIC_MESH) { + // copy into triangleIndices + triangleIndices.reserve((int32_t)((gpu::Size)(triangleIndices.size()) + indices.size())); + auto partItr = parts.cbegin(); + while (partItr != parts.cend()) { + auto numIndices = partItr->y; // TODO: assert rather than workaround after we start sanitizing HFMMesh higher up //assert(numIndices % TRIANGLE_STRIDE == 0); numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer - - auto indexItr = indices.cbegin() + partItr->_startIndex; + auto indexItr = indices.cbegin() + partItr->x; auto indexEnd = indexItr + numIndices; while (indexItr != indexEnd) { triangleIndices.push_back(*indexItr + meshIndexOffset); ++indexItr; } - } else if (partItr->_topology == graphics::Mesh::TRIANGLE_STRIP) { - // TODO: resurrect assert after we start sanitizing HFMMesh higher up - //assert(numIndices > 2); - - uint32_t approxNumIndices = TRIANGLE_STRIDE * numIndices; - if (approxNumIndices > (uint32_t)(triangleIndices.capacity() - triangleIndices.size())) { - // we underestimated the final size of triangleIndices so we pre-emptively expand it - triangleIndices.reserve(triangleIndices.size() + approxNumIndices); - } - - auto indexItr = indices.cbegin() + partItr->_startIndex; - auto indexEnd = indexItr + (numIndices - 2); - - // first triangle uses the first three indices - triangleIndices.push_back(*(indexItr++) + meshIndexOffset); - triangleIndices.push_back(*(indexItr++) + meshIndexOffset); - triangleIndices.push_back(*(indexItr++) + meshIndexOffset); - - // the rest use previous and next index - uint32_t triangleCount = 1; - while (indexItr != indexEnd) { - if ((*indexItr) != graphics::Mesh::PRIMITIVE_RESTART_INDEX) { - if (triangleCount % 2 == 0) { - // even triangles use first two indices in order - triangleIndices.push_back(*(indexItr - 2) + meshIndexOffset); - triangleIndices.push_back(*(indexItr - 1) + meshIndexOffset); - } else { - // odd triangles swap order of first two indices - triangleIndices.push_back(*(indexItr - 1) + meshIndexOffset); - triangleIndices.push_back(*(indexItr - 2) + meshIndexOffset); - } - triangleIndices.push_back(*indexItr + meshIndexOffset); - ++triangleCount; - } - ++indexItr; - } + ++partItr; } - ++partItr; - } - } else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { - // for each mesh copy unique part indices, separated by special bogus (flag) index values - gpu::BufferView::Iterator partItr = parts.cbegin(); - while (partItr != parts.cend()) { - // collect unique list of indices for this part - std::set uniqueIndices; - auto numIndices = partItr->_numIndices; - if (partItr->_topology == graphics::Mesh::TRIANGLES) { + } else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) { + // for each mesh copy unique part indices, separated by special bogus (flag) index values + auto partItr = parts.cbegin(); + while (partItr != parts.cend()) { + // collect unique list of indices for this part + std::set uniqueIndices; + auto numIndices = partItr->y; // TODO: assert rather than workaround after we start sanitizing HFMMesh higher up //assert(numIndices% TRIANGLE_STRIDE == 0); numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer - - auto indexItr = indices.cbegin() + partItr->_startIndex; + auto indexItr = indices.cbegin() + partItr->x; auto indexEnd = indexItr + numIndices; while (indexItr != indexEnd) { uniqueIndices.insert(*indexItr); ++indexItr; } - } else if (partItr->_topology == graphics::Mesh::TRIANGLE_STRIP) { - // TODO: resurrect assert after we start sanitizing HFMMesh higher up - //assert(numIndices > TRIANGLE_STRIDE - 1); - auto indexItr = indices.cbegin() + partItr->_startIndex; - auto indexEnd = indexItr + (numIndices - 2); - - // first triangle uses the first three indices - uniqueIndices.insert(*(indexItr++)); - uniqueIndices.insert(*(indexItr++)); - uniqueIndices.insert(*(indexItr++)); - - // the rest use previous and next index - uint32_t triangleCount = 1; - while (indexItr != indexEnd) { - if ((*indexItr) != graphics::Mesh::PRIMITIVE_RESTART_INDEX) { - if (triangleCount % 2 == 0) { - // EVEN triangles use first two indices in order - uniqueIndices.insert(*(indexItr - 2)); - uniqueIndices.insert(*(indexItr - 1)); - } else { - // ODD triangles swap order of first two indices - uniqueIndices.insert(*(indexItr - 1)); - uniqueIndices.insert(*(indexItr - 2)); - } - uniqueIndices.insert(*indexItr); - ++triangleCount; - } - ++indexItr; + // store uniqueIndices in triangleIndices + triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size()); + for (auto index : uniqueIndices) { + triangleIndices.push_back(index); } - } + // flag end of part + triangleIndices.push_back(END_OF_MESH_PART); - // store uniqueIndices in triangleIndices - triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size()); - for (auto index : uniqueIndices) { - triangleIndices.push_back(index); + ++partItr; } - // flag end of part - triangleIndices.push_back(END_OF_MESH_PART); - - ++partItr; + // flag end of mesh + triangleIndices.push_back(END_OF_MESH); } - // flag end of mesh - triangleIndices.push_back(END_OF_MESH); } - ++meshCount; + + ++shapeCount; } // scale and shift From a42e09aef806c6a704a87ada9cadb36add03e5fd Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 8 Nov 2019 10:04:34 -0800 Subject: [PATCH 116/121] Fix unused variables/implicit type conversions --- .../src/RenderableModelEntityItem.cpp | 30 ++----------------- libraries/physics/src/ShapeFactory.cpp | 4 +-- 2 files changed, 5 insertions(+), 29 deletions(-) diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index c87d24c425..4de092c7fd 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -357,7 +357,6 @@ bool RenderableModelEntityItem::isReadyToComputeShape() const { void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { const uint32_t TRIANGLE_STRIDE = 3; - const uint32_t QUAD_STRIDE = 4; ShapeType type = getShapeType(); @@ -439,8 +438,8 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { // multiply each point by scale before handing the point-set off to the physics engine. // also determine the extents of the collision model. glm::vec3 registrationOffset = dimensions * (ENTITY_ITEM_DEFAULT_REGISTRATION_POINT - getRegistrationPoint()); - for (int32_t i = 0; i < pointCollection.size(); i++) { - for (int32_t j = 0; j < pointCollection[i].size(); j++) { + for (size_t i = 0; i < pointCollection.size(); i++) { + for (size_t j = 0; j < pointCollection[i].size(); j++) { // back compensate for registration so we can apply that offset to the shapeInfo later pointCollection[i][j] = scaleToFit * (pointCollection[i][j] + model->getOffset()) - registrationOffset; } @@ -454,32 +453,9 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { model->updateGeometry(); // compute meshPart local transforms - QVector localTransforms; const HFMModel& hfmModel = model->getHFMModel(); - uint32_t numHFMShapes = (uint32_t)hfmModel.shapes.size(); - localTransforms.reserve(numHFMShapes); glm::vec3 dimensions = getScaledDimensions(); glm::mat4 invRegistraionOffset = glm::translate(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT)); - for (uint32_t s = 0; s < numHFMShapes; s++) { - const HFMShape& shape = hfmModel.shapes[s]; - if (shape.joint != hfm::UNDEFINED_KEY) { - auto jointMatrix = model->getRig().getJointTransform(shape.joint); - // we backtranslate by the registration offset so we can apply that offset to the shapeInfo later - if (shape.skinDeformer != hfm::UNDEFINED_KEY) { - const auto& skinDeformer = hfmModel.skinDeformers[shape.skinDeformer]; - glm::mat4 inverseBindMatrix; - if (!skinDeformer.clusters.empty()) { - const auto& cluster = skinDeformer.clusters.back(); - inverseBindMatrix = cluster.inverseBindMatrix; - } - localTransforms.push_back(invRegistraionOffset * jointMatrix * inverseBindMatrix); - } else { - localTransforms.push_back(invRegistraionOffset * jointMatrix); - } - } else { - localTransforms.push_back(invRegistraionOffset); - } - } ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices(); triangleIndices.clear(); @@ -664,7 +640,7 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { } } for (auto points : pointCollection) { - for (int32_t i = 0; i < points.size(); ++i) { + for (size_t i = 0; i < points.size(); ++i) { points[i] = (points[i] * scaleToFit); } } diff --git a/libraries/physics/src/ShapeFactory.cpp b/libraries/physics/src/ShapeFactory.cpp index 569ddb52ce..43c6fc27dc 100644 --- a/libraries/physics/src/ShapeFactory.cpp +++ b/libraries/physics/src/ShapeFactory.cpp @@ -109,7 +109,7 @@ btConvexHullShape* createConvexHull(const ShapeInfo::PointList& points) { glm::vec3 center = points[0]; glm::vec3 maxCorner = center; glm::vec3 minCorner = center; - for (int i = 1; i < points.size(); i++) { + for (size_t i = 1; i < points.size(); i++) { center += points[i]; maxCorner = glm::max(maxCorner, points[i]); minCorner = glm::min(minCorner, points[i]); @@ -149,7 +149,7 @@ btConvexHullShape* createConvexHull(const ShapeInfo::PointList& points) { // add the points, correcting for margin glm::vec3 relativeScale = (diagonal - glm::vec3(2.0f * margin)) / diagonal; glm::vec3 correctedPoint; - for (int i = 0; i < points.size(); ++i) { + for (size_t i = 0; i < points.size(); ++i) { correctedPoint = (points[i] - center) * relativeScale + center; hull->addPoint(btVector3(correctedPoint[0], correctedPoint[1], correctedPoint[2]), false); } From b7da5d0d7283abf43d44b3f92108333f0b6a452b Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Fri, 8 Nov 2019 13:56:44 -0800 Subject: [PATCH 117/121] Update Model::calculateTriangleSets to use hfm::Shapes --- libraries/render-utils/src/Model.cpp | 112 +++++++++++++-------------- 1 file changed, 52 insertions(+), 60 deletions(-) diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index 116c3dbb19..b932e4df60 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -824,77 +824,69 @@ scriptable::ScriptableModelBase Model::getScriptableModel() { void Model::calculateTriangleSets(const HFMModel& hfmModel) { PROFILE_RANGE(render, __FUNCTION__); - uint32_t numberOfMeshes = (uint32_t)hfmModel.meshes.size(); + uint32_t meshInstanceCount = 0; + uint32_t lastMeshForInstanceCount = hfm::UNDEFINED_KEY; + for (const auto& shape : hfmModel.shapes) { + if (shape.mesh != lastMeshForInstanceCount) { + ++meshInstanceCount; + } + lastMeshForInstanceCount = shape.mesh; + } _triangleSetsValid = true; _modelSpaceMeshTriangleSets.clear(); - _modelSpaceMeshTriangleSets.resize(numberOfMeshes); + _modelSpaceMeshTriangleSets.reserve(meshInstanceCount); - for (uint32_t i = 0; i < numberOfMeshes; i++) { - const HFMMesh& mesh = hfmModel.meshes.at(i); + uint32_t lastMeshForTriangleBuilding = hfm::UNDEFINED_KEY; + glm::mat4 lastTransformForTriangleBuilding { 0 }; + std::vector transformedPoints; + for (const auto& shape : hfmModel.shapes) { + const uint32_t meshIndex = shape.mesh; + const hfm::Mesh& mesh = hfmModel.meshes.at(meshIndex); + const auto& triangleListMesh = mesh.triangleListMesh; + const glm::vec2 part = triangleListMesh.parts[shape.meshPart]; + glm::mat4 worldFromMeshTransform; + if (shape.joint != hfm::UNDEFINED_KEY) { + // globalTransform includes hfmModel.offset, + // which includes the scaling, rotation, and translation specified by the FST, + // and the scaling from the unit conversion in FBX. + // This can't change at runtime, so we can safely store these in our TriangleSet. + worldFromMeshTransform = hfmModel.joints[shape.joint].globalTransform; + } - const uint32_t numberOfParts = (uint32_t)mesh.parts.size(); - auto& meshTriangleSets = _modelSpaceMeshTriangleSets[i]; - meshTriangleSets.resize(numberOfParts); + if (meshIndex != lastMeshForTriangleBuilding || worldFromMeshTransform != lastTransformForTriangleBuilding) { + lastMeshForTriangleBuilding = meshIndex; + lastTransformForTriangleBuilding = worldFromMeshTransform; + _modelSpaceMeshTriangleSets.emplace_back(); + _modelSpaceMeshTriangleSets.back().reserve(mesh.parts.size()); - for (uint32_t j = 0; j < numberOfParts; j++) { - const HFMMeshPart& part = mesh.parts.at(j); - - auto& partTriangleSet = meshTriangleSets[j]; - - const int INDICES_PER_TRIANGLE = 3; - const int INDICES_PER_QUAD = 4; - const int TRIANGLES_PER_QUAD = 2; - - // tell our triangleSet how many triangles to expect. - int numberOfQuads = part.quadIndices.size() / INDICES_PER_QUAD; - int numberOfTris = part.triangleIndices.size() / INDICES_PER_TRIANGLE; - int totalTriangles = (numberOfQuads * TRIANGLES_PER_QUAD) + numberOfTris; - partTriangleSet.reserve(totalTriangles); - - auto meshTransform = hfmModel.offset * mesh.modelTransform; - - if (part.quadIndices.size() > 0) { - int vIndex = 0; - for (int q = 0; q < numberOfQuads; q++) { - int i0 = part.quadIndices[vIndex++]; - int i1 = part.quadIndices[vIndex++]; - int i2 = part.quadIndices[vIndex++]; - int i3 = part.quadIndices[vIndex++]; - - // track the model space version... these points will be transformed by the FST's offset, - // which includes the scaling, rotation, and translation specified by the FST/FBX, - // this can't change at runtime, so we can safely store these in our TriangleSet - glm::vec3 v0 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i0], 1.0f)); - glm::vec3 v1 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i1], 1.0f)); - glm::vec3 v2 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i2], 1.0f)); - glm::vec3 v3 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i3], 1.0f)); - - Triangle tri1 = { v0, v1, v3 }; - Triangle tri2 = { v1, v2, v3 }; - partTriangleSet.insert(tri1); - partTriangleSet.insert(tri2); + transformedPoints = triangleListMesh.vertices; + if (worldFromMeshTransform != glm::mat4()) { + for (auto& point : transformedPoints) { + point = glm::vec3(worldFromMeshTransform * glm::vec4(point, 1.0f)); } } + } + auto& meshTriangleSets = _modelSpaceMeshTriangleSets.back(); + meshTriangleSets.emplace_back(); + auto& partTriangleSet = meshTriangleSets.back(); - if (part.triangleIndices.size() > 0) { - int vIndex = 0; - for (int t = 0; t < numberOfTris; t++) { - int i0 = part.triangleIndices[vIndex++]; - int i1 = part.triangleIndices[vIndex++]; - int i2 = part.triangleIndices[vIndex++]; + const static size_t INDICES_PER_TRIANGLE = 3; + const size_t triangleCount = (size_t)(part.y) / INDICES_PER_TRIANGLE; + partTriangleSet.reserve(triangleCount); + const size_t indexStart = (uint32_t)part.x; + const size_t indexEnd = indexStart + (triangleCount * INDICES_PER_TRIANGLE); + for (size_t i = indexStart; i < indexEnd; i += INDICES_PER_TRIANGLE) { + const int i0 = triangleListMesh.indices[i]; + const int i1 = triangleListMesh.indices[i + 1]; + const int i2 = triangleListMesh.indices[i + 2]; - // track the model space version... these points will be transformed by the FST's offset, - // which includes the scaling, rotation, and translation specified by the FST/FBX, - // this can't change at runtime, so we can safely store these in our TriangleSet - glm::vec3 v0 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i0], 1.0f)); - glm::vec3 v1 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i1], 1.0f)); - glm::vec3 v2 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i2], 1.0f)); + const glm::vec3 v0 = transformedPoints[i0]; + const glm::vec3 v1 = transformedPoints[i1]; + const glm::vec3 v2 = transformedPoints[i2]; - Triangle tri = { v0, v1, v2 }; - partTriangleSet.insert(tri); - } - } + const Triangle tri = { v0, v1, v2 }; + partTriangleSet.insert(tri); } } } From 9a65d78cdfac853ac767c634ed7ed48e82db018f Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 28 Oct 2019 15:33:31 -0700 Subject: [PATCH 118/121] Fix model baking to work with new hfm::Shape material definition (1 material per mesh part) --- libraries/baking/src/OBJBaker.cpp | 34 +++++----- libraries/baking/src/OBJBaker.h | 2 +- .../model-baker/src/model-baker/Baker.cpp | 6 +- .../src/model-baker/BuildDracoMeshTask.cpp | 67 +++++++++++++------ .../src/model-baker/BuildDracoMeshTask.h | 2 +- 5 files changed, 70 insertions(+), 41 deletions(-) diff --git a/libraries/baking/src/OBJBaker.cpp b/libraries/baking/src/OBJBaker.cpp index 4adaa01845..d726dee897 100644 --- a/libraries/baking/src/OBJBaker.cpp +++ b/libraries/baking/src/OBJBaker.cpp @@ -37,10 +37,10 @@ const QByteArray MESH = "Mesh"; void OBJBaker::bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector& dracoMeshes, const std::vector>& dracoMaterialLists) { // Write OBJ Data as FBX tree nodes - createFBXNodeTree(_rootNode, hfmModel, dracoMeshes[0]); + createFBXNodeTree(_rootNode, hfmModel, dracoMeshes[0], dracoMaterialLists[0]); } -void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh) { +void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh, const std::vector& dracoMaterialList) { // Make all generated nodes children of rootNode rootNode.children = { FBXNode(), FBXNode(), FBXNode() }; FBXNode& globalSettingsNode = rootNode.children[0]; @@ -100,24 +100,22 @@ void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& h } // Generating Objects node's child - Material node - auto& meshParts = hfmModel->meshes[0].parts; - for (auto& meshPart : meshParts) { + + // Each material ID should only appear once thanks to deduplication in BuildDracoMeshTask, but we want to make sure they are created in the right order + std::unordered_map materialIDToIndex; + for (uint32_t materialIndex = 0; materialIndex < hfmModel->materials.size(); ++materialIndex) { + const auto& material = hfmModel->materials[materialIndex]; + materialIDToIndex[material.materialID] = materialIndex; + } + + // Create nodes for each material in the material list + for (const auto& dracoMaterial : dracoMaterialList) { + const QString materialID = QString(dracoMaterial); + const uint32_t materialIndex = materialIDToIndex[materialID]; + const auto& material = hfmModel->materials[materialIndex]; FBXNode materialNode; materialNode.name = MATERIAL_NODE_NAME; - if (hfmModel->materials.size() == 1) { - // case when no material information is provided, OBJSerializer considers it as a single default material - for (auto& material : hfmModel->materials) { - setMaterialNodeProperties(materialNode, material.name, material, hfmModel); - } - } else { - for (auto& material : hfmModel->materials) { - if (material.name == meshPart.materialID) { - setMaterialNodeProperties(materialNode, meshPart.materialID, material, hfmModel); - break; - } - } - } - + setMaterialNodeProperties(materialNode, material.materialID, material, hfmModel); objectNode.children.append(materialNode); } diff --git a/libraries/baking/src/OBJBaker.h b/libraries/baking/src/OBJBaker.h index 044c51d0cc..778b4da341 100644 --- a/libraries/baking/src/OBJBaker.h +++ b/libraries/baking/src/OBJBaker.h @@ -27,7 +27,7 @@ protected: virtual void bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector& dracoMeshes, const std::vector>& dracoMaterialLists) override; private: - void createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh); + void createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh, const std::vector& dracoMaterialList); void setMaterialNodeProperties(FBXNode& materialNode, const QString& materialName, const hfm::Material& material, const hfm::Model::Pointer& hfmModel); NodeID nextNodeID() { return _nodeID++; } diff --git a/libraries/model-baker/src/model-baker/Baker.cpp b/libraries/model-baker/src/model-baker/Baker.cpp index 662b4670ee..d200df211d 100644 --- a/libraries/model-baker/src/model-baker/Baker.cpp +++ b/libraries/model-baker/src/model-baker/Baker.cpp @@ -30,7 +30,7 @@ namespace baker { class GetModelPartsTask { public: using Input = hfm::Model::Pointer; - using Output = VaryingSet8, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, Extents>; + using Output = VaryingSet9, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector, std::vector, std::vector, Extents, std::vector>; using JobModel = Job::ModelIO; void run(const BakeContextPointer& context, const Input& input, Output& output) { @@ -47,6 +47,7 @@ namespace baker { output.edit5() = hfmModelIn->shapes; output.edit6() = hfmModelIn->skinDeformers; output.edit7() = hfmModelIn->meshExtents; + output.edit8() = hfmModelIn->materials; } }; @@ -170,6 +171,7 @@ namespace baker { const auto shapesIn = modelPartsIn.getN(5); const auto skinDeformersIn = modelPartsIn.getN(6); const auto modelExtentsIn = modelPartsIn.getN(7); + const auto materialsIn = modelPartsIn.getN(8); // Calculate normals and tangents for meshes and blendshapes if they do not exist // Note: Normals are never calculated here for OBJ models. OBJ files optionally define normals on a per-face basis, so for consistency normals are calculated beforehand in OBJSerializer. @@ -214,7 +216,7 @@ namespace baker { // TODO: Tangent support (Needs changes to FBXSerializer_Mesh as well) // NOTE: Due to an unresolved linker error, BuildDracoMeshTask is not functional on Android // TODO: Figure out why BuildDracoMeshTask.cpp won't link with draco on Android - const auto buildDracoMeshInputs = BuildDracoMeshTask::Input(meshesIn, normalsPerMesh, tangentsPerMesh).asVarying(); + const auto buildDracoMeshInputs = BuildDracoMeshTask::Input(shapesOut, meshesIn, materialsIn, normalsPerMesh, tangentsPerMesh).asVarying(); const auto buildDracoMeshOutputs = model.addJob("BuildDracoMesh", buildDracoMeshInputs); const auto dracoMeshes = buildDracoMeshOutputs.getN(0); const auto dracoErrors = buildDracoMeshOutputs.getN(1); diff --git a/libraries/model-baker/src/model-baker/BuildDracoMeshTask.cpp b/libraries/model-baker/src/model-baker/BuildDracoMeshTask.cpp index 12347c30b1..5c9d1dac25 100644 --- a/libraries/model-baker/src/model-baker/BuildDracoMeshTask.cpp +++ b/libraries/model-baker/src/model-baker/BuildDracoMeshTask.cpp @@ -39,19 +39,47 @@ #include "ModelMath.h" #ifndef Q_OS_ANDROID -std::vector createMaterialList(const hfm::Mesh& mesh) { - std::vector materialList; - for (const auto& meshPart : mesh.parts) { - auto materialID = QVariant(meshPart.materialID).toByteArray(); - const auto materialIt = std::find(materialList.cbegin(), materialList.cend(), materialID); - if (materialIt == materialList.cend()) { - materialList.push_back(materialID); + +void reindexMaterials(const std::vector& originalMaterialIndices, std::vector& materials, std::vector& materialIndices) { + materialIndices.resize(originalMaterialIndices.size()); + for (size_t i = 0; i < originalMaterialIndices.size(); ++i) { + uint32_t material = originalMaterialIndices[i]; + auto foundMaterial = std::find(materials.cbegin(), materials.cend(), material); + if (foundMaterial == materials.cend()) { + materials.push_back(material); + materialIndices[i] = (uint16_t)(materials.size() - 1); + } else { + materialIndices[i] = (uint16_t)(foundMaterial - materials.cbegin()); } } - return materialList; } -std::tuple, bool> createDracoMesh(const hfm::Mesh& mesh, const std::vector& normals, const std::vector& tangents, const std::vector& materialList) { +void createMaterialLists(const std::vector& shapes, const std::vector& meshes, const std::vector& hfmMaterials, std::vector>& materialIndexLists, std::vector>& partMaterialIndicesPerMesh) { + std::vector> materialsPerMesh; + for (const auto& mesh : meshes) { + materialsPerMesh.emplace_back(mesh.parts.size(), hfm::UNDEFINED_KEY); + } + for (const auto& shape : shapes) { + materialsPerMesh[shape.mesh][shape.meshPart] = shape.material; + } + + materialIndexLists.resize(materialsPerMesh.size()); + partMaterialIndicesPerMesh.resize(materialsPerMesh.size()); + for (size_t i = 0; i < materialsPerMesh.size(); ++i) { + const std::vector& materials = materialsPerMesh[i]; + std::vector uniqueMaterials; + + reindexMaterials(materials, uniqueMaterials, partMaterialIndicesPerMesh[i]); + + materialIndexLists[i].reserve(uniqueMaterials.size()); + for (const uint32_t material : uniqueMaterials) { + const auto& hfmMaterial = hfmMaterials[material]; + materialIndexLists[i].push_back(QVariant(hfmMaterial.materialID).toByteArray()); + } + } +} + +std::tuple, bool> createDracoMesh(const hfm::Mesh& mesh, const std::vector& normals, const std::vector& tangents, const std::vector& partMaterialIndices) { Q_ASSERT(normals.size() == 0 || (int)normals.size() == mesh.vertices.size()); Q_ASSERT(mesh.colors.size() == 0 || mesh.colors.size() == mesh.vertices.size()); Q_ASSERT(mesh.texCoords.size() == 0 || mesh.texCoords.size() == mesh.vertices.size()); @@ -122,11 +150,9 @@ std::tuple, bool> createDracoMesh(const hfm::Mesh& auto partIndex = 0; draco::FaceIndex face; - uint16_t materialID; for (auto& part : mesh.parts) { - auto materialIt = std::find(materialList.cbegin(), materialList.cend(), QVariant(part.materialID).toByteArray()); - materialID = (uint16_t)(materialIt - materialList.cbegin()); + uint16_t materialID = partMaterialIndices[partIndex]; auto addFace = [&](const QVector& indices, int index, draco::FaceIndex face) { int32_t idx0 = indices[index]; @@ -214,30 +240,33 @@ void BuildDracoMeshTask::run(const baker::BakeContextPointer& context, const Inp #ifdef Q_OS_ANDROID qCWarning(model_baker) << "BuildDracoMesh is disabled on Android. Output meshes will be empty."; #else - const auto& meshes = input.get0(); - const auto& normalsPerMesh = input.get1(); - const auto& tangentsPerMesh = input.get2(); + const auto& shapes = input.get0(); + const auto& meshes = input.get1(); + const auto& materials = input.get2(); + const auto& normalsPerMesh = input.get3(); + const auto& tangentsPerMesh = input.get4(); auto& dracoBytesPerMesh = output.edit0(); auto& dracoErrorsPerMesh = output.edit1(); + auto& materialLists = output.edit2(); + std::vector> partMaterialIndicesPerMesh; + createMaterialLists(shapes, meshes, materials, materialLists, partMaterialIndicesPerMesh); dracoBytesPerMesh.reserve(meshes.size()); // vector is an exception to the std::vector conventions as it is a bit field // So a bool reference to an element doesn't work dracoErrorsPerMesh.resize(meshes.size()); - materialLists.reserve(meshes.size()); for (size_t i = 0; i < meshes.size(); i++) { const auto& mesh = meshes[i]; const auto& normals = baker::safeGet(normalsPerMesh, i); const auto& tangents = baker::safeGet(tangentsPerMesh, i); dracoBytesPerMesh.emplace_back(); auto& dracoBytes = dracoBytesPerMesh.back(); - materialLists.push_back(createMaterialList(mesh)); - const auto& materialList = materialLists.back(); + const auto& partMaterialIndices = partMaterialIndicesPerMesh[i]; bool dracoError; std::unique_ptr dracoMesh; - std::tie(dracoMesh, dracoError) = createDracoMesh(mesh, normals, tangents, materialList); + std::tie(dracoMesh, dracoError) = createDracoMesh(mesh, normals, tangents, partMaterialIndices); dracoErrorsPerMesh[i] = dracoError; if (dracoMesh) { diff --git a/libraries/model-baker/src/model-baker/BuildDracoMeshTask.h b/libraries/model-baker/src/model-baker/BuildDracoMeshTask.h index ac9ad648ab..a83f2ae163 100644 --- a/libraries/model-baker/src/model-baker/BuildDracoMeshTask.h +++ b/libraries/model-baker/src/model-baker/BuildDracoMeshTask.h @@ -33,7 +33,7 @@ public: class BuildDracoMeshTask { public: using Config = BuildDracoMeshConfig; - using Input = baker::VaryingSet3, baker::NormalsPerMesh, baker::TangentsPerMesh>; + using Input = baker::VaryingSet5, std::vector, std::vector, baker::NormalsPerMesh, baker::TangentsPerMesh>; using Output = baker::VaryingSet3, std::vector, std::vector>>; using JobModel = baker::Job::ModelIO; From 21699526137993589f36fca5527fc0e5f8f42733 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Mon, 11 Nov 2019 14:20:13 -0800 Subject: [PATCH 119/121] Remove unused mesh.part.materialID --- libraries/fbx/src/FBXSerializer.cpp | 6 ------ libraries/fbx/src/OBJSerializer.cpp | 7 ------- libraries/hfm/src/hfm/HFM.h | 2 -- 3 files changed, 15 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index f09182c0e6..de6117eff3 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1485,12 +1485,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const shape.mesh = meshIndex; shape.meshPart = i; shape.joint = transformIndex; - - auto matName = mesh.parts[i].materialID; - auto materialIt = materialNameToID.find(matName.toStdString()); - if (materialIt != materialNameToID.end()) { - shape.material = materialIt->second; - } } // For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 31f92555f1..99299dcdec 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -174,11 +174,6 @@ glm::vec2 OBJTokenizer::getVec2() { return v; } - -void setMeshPartDefaults(HFMMeshPart& meshPart, QString materialID) { - meshPart.materialID = materialID; -} - // OBJFace // NOTE (trent, 7/20/17): The vertexColors vector being passed-in isn't necessary here, but I'm just // pairing it with the vertices vector for consistency. @@ -501,8 +496,6 @@ bool OBJSerializer::parseOBJGroup(OBJTokenizer& tokenizer, const hifi::VariantHa bool anyVertexColor { false }; int vertexCount { 0 }; - setMeshPartDefaults(meshPart, QString("dontknow") + QString::number(mesh.parts.size())); - while (true) { int tokenType = tokenizer.nextToken(); if (tokenType == OBJTokenizer::COMMENT_TOKEN) { diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index f092c91e99..c7b6789094 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -160,8 +160,6 @@ public: QVector quadIndices; // original indices from the FBX mesh QVector quadTrianglesIndices; // original indices from the FBX mesh of the quad converted as triangles QVector triangleIndices; // original indices from the FBX mesh - - QString materialID; // DEPRECATED }; class Material { From f39121c53be633f6c14baccce23a92527def380e Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Tue, 12 Nov 2019 09:52:58 -0800 Subject: [PATCH 120/121] Fix build error/warning --- libraries/fbx/src/OBJSerializer.cpp | 1 - tools/vhacd-util/src/VHACDUtil.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 99299dcdec..f9ba8e8c84 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -488,7 +488,6 @@ bool OBJSerializer::parseOBJGroup(OBJTokenizer& tokenizer, const hifi::VariantHa FaceGroup faces; HFMMesh& mesh = hfmModel.meshes[0]; mesh.parts.push_back(HFMMeshPart()); - HFMMeshPart& meshPart = mesh.parts.back(); bool sawG = false; bool result = true; int originalFaceCountForDebugging = 0; diff --git a/tools/vhacd-util/src/VHACDUtil.cpp b/tools/vhacd-util/src/VHACDUtil.cpp index f0eb94a1cf..da20339123 100644 --- a/tools/vhacd-util/src/VHACDUtil.cpp +++ b/tools/vhacd-util/src/VHACDUtil.cpp @@ -149,7 +149,6 @@ void vhacd::VHACDUtil::fattenMesh(const HFMMesh& mesh, const glm::mat4& modelOff result.vertices << p3; // add the new point to the result mesh HFMMeshPart newMeshPart; - setMeshPartDefaults(newMeshPart, "unknown"); newMeshPart.triangleIndices << index0 << index1 << index2; newMeshPart.triangleIndices << index0 << index3 << index1; newMeshPart.triangleIndices << index1 << index3 << index2; From 516debdcb2592a659d7486e212ebfd93fe38cc8b Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 13 Nov 2019 12:50:13 -0800 Subject: [PATCH 121/121] Fix code style / remove cruft --- libraries/animation/src/AnimSkeleton.cpp | 8 +- libraries/animation/src/AnimSkeleton.h | 2 +- libraries/baking/src/MaterialBaker.cpp | 2 +- .../src/RenderableModelEntityItem.cpp | 16 +--- libraries/fbx/src/FBXSerializer.cpp | 4 +- libraries/fbx/src/OBJSerializer.cpp | 10 +-- libraries/render-utils/src/Model.cpp | 15 ++-- .../render-utils/src/SoftAttachmentModel.cpp | 3 +- tests-manual/fbx/CMakeLists.txt | 11 --- tests-manual/fbx/src/main.cpp | 77 ------------------- 10 files changed, 23 insertions(+), 125 deletions(-) delete mode 100644 tests-manual/fbx/CMakeLists.txt delete mode 100644 tests-manual/fbx/src/main.cpp diff --git a/libraries/animation/src/AnimSkeleton.cpp b/libraries/animation/src/AnimSkeleton.cpp index b60fc42f89..e5f05ab45f 100644 --- a/libraries/animation/src/AnimSkeleton.cpp +++ b/libraries/animation/src/AnimSkeleton.cpp @@ -24,13 +24,13 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) { // we make a copy of the inverseBindMatrices in order to prevent mutating the model bind pose // when we are dealing with a joint offset in the model - for (int i = 0; i < (int)hfmModel.skinDeformers.size(); i++) { - const auto& defor = hfmModel.skinDeformers[i]; + for (uint32_t i = 0; i < (uint32_t)hfmModel.skinDeformers.size(); i++) { + const auto& deformer = hfmModel.skinDeformers[i]; std::vector dummyClustersList; - for (uint32_t j = 0; j < (uint32_t)defor.clusters.size(); j++) { + for (uint32_t j = 0; j < (uint32_t)deformer.clusters.size(); j++) { // cast into a non-const reference, so we can mutate the FBXCluster - HFMCluster& cluster = const_cast(defor.clusters.at(j)); + HFMCluster& cluster = const_cast(deformer.clusters.at(j)); HFMCluster localCluster; localCluster.jointIndex = cluster.jointIndex; diff --git a/libraries/animation/src/AnimSkeleton.h b/libraries/animation/src/AnimSkeleton.h index 1477efb223..a6470ac609 100644 --- a/libraries/animation/src/AnimSkeleton.h +++ b/libraries/animation/src/AnimSkeleton.h @@ -68,7 +68,7 @@ public: void dump(const AnimPoseVec& poses) const; std::vector lookUpJointIndices(const std::vector& jointNames) const; - const HFMCluster getClusterBindMatricesOriginalValues(const int skinDeformerIndex, const int clusterIndex) const { return _clusterBindMatrixOriginalValues[skinDeformerIndex][clusterIndex]; } + const HFMCluster getClusterBindMatricesOriginalValues(int skinDeformerIndex, int clusterIndex) const { return _clusterBindMatrixOriginalValues[skinDeformerIndex][clusterIndex]; } protected: void buildSkeletonFromJoints(const std::vector& joints, const QMap jointOffsets); diff --git a/libraries/baking/src/MaterialBaker.cpp b/libraries/baking/src/MaterialBaker.cpp index d177ddf358..fbb17f0d01 100644 --- a/libraries/baking/src/MaterialBaker.cpp +++ b/libraries/baking/src/MaterialBaker.cpp @@ -260,7 +260,7 @@ void MaterialBaker::addTexture(const QString& materialName, image::TextureUsage: void MaterialBaker::setMaterials(const std::vector& materials, const QString& baseURL) { _materialResource = NetworkMaterialResourcePointer(new NetworkMaterialResource(), [](NetworkMaterialResource* ptr) { ptr->deleteLater(); }); - for (auto& material : materials) { + for (const auto& material : materials) { _materialResource->parsedMaterials.names.push_back(material.name.toStdString()); _materialResource->parsedMaterials.networkMaterials[material.name.toStdString()] = std::make_shared(material, baseURL); diff --git a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp index 4de092c7fd..1bde9a0fa6 100644 --- a/libraries/entities-renderer/src/RenderableModelEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableModelEntityItem.cpp @@ -403,19 +403,11 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) { numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer uint32_t indexStart = (uint32_t)part.x; uint32_t indexEnd = indexStart + numIndices; - for (uint32_t j = indexStart; j < indexEnd; j += TRIANGLE_STRIDE) { + for (uint32_t j = indexStart; j < indexEnd; ++j) { // NOTE: It seems odd to skip vertices when initializing a btConvexHullShape, but let's keep the behavior similar to the old behavior for now - glm::vec3 p0 = triangleListMesh.vertices[triangleListMesh.indices[j]]; - glm::vec3 p1 = triangleListMesh.vertices[triangleListMesh.indices[j + 1]]; - glm::vec3 p2 = triangleListMesh.vertices[triangleListMesh.indices[j + 2]]; - if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p0) == pointsInPart.cend()) { - pointsInPart.push_back(p0); - } - if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p1) == pointsInPart.cend()) { - pointsInPart.push_back(p1); - } - if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), p2) == pointsInPart.cend()) { - pointsInPart.push_back(p2); + glm::vec3 point = triangleListMesh.vertices[triangleListMesh.indices[j]]; + if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), point) == pointsInPart.cend()) { + pointsInPart.push_back(point); } } diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index f09182c0e6..46971b01e5 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1462,7 +1462,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const const auto& partMaterialTextures = extracted.partMaterialTextures; uint32_t meshIndex = (uint32_t)hfmModel.meshes.size(); - meshIDsToMeshIndices.insert(it.key(), meshIndex); + meshIDsToMeshIndices.insert(meshID, meshIndex); hfmModel.meshes.push_back(extracted.mesh); hfm::Mesh& mesh = hfmModel.meshes.back(); @@ -1635,8 +1635,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } // Store the model's dynamic transform, and put its ID in the shapes + uint32_t skinDeformerID = (uint32_t)hfmModel.skinDeformers.size(); hfmModel.skinDeformers.push_back(skinDeformer); - uint32_t skinDeformerID = (uint32_t)(hfmModel.skinDeformers.size() - 1); for (hfm::Shape& shape : partShapes) { shape.skinDeformer = skinDeformerID; } diff --git a/libraries/fbx/src/OBJSerializer.cpp b/libraries/fbx/src/OBJSerializer.cpp index 31f92555f1..a998c3442d 100644 --- a/libraries/fbx/src/OBJSerializer.cpp +++ b/libraries/fbx/src/OBJSerializer.cpp @@ -701,9 +701,9 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V QMap materialMeshIdMap; std::vector hfmMeshParts; - for (uint32_t i = 0, meshPartCount = 0; i < (uint32_t)mesh.parts.size(); i++, meshPartCount++) { - HFMMeshPart& meshPart = mesh.parts[i]; - FaceGroup faceGroup = faceGroups[meshPartCount]; + for (uint32_t meshPartIndex = 0; meshPartIndex < (uint32_t)mesh.parts.size(); ++meshPartIndex) { + HFMMeshPart& meshPart = mesh.parts[meshPartIndex]; + FaceGroup faceGroup = faceGroups[meshPartIndex]; bool specifiesUV = false; foreach(OBJFace face, faceGroup) { // Go through all of the OBJ faces and determine the number of different materials necessary (each different material will be a unique mesh). @@ -758,8 +758,8 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V mesh.parts.clear(); mesh.parts = hfmMeshParts; - for (uint32_t i = 0, meshPartCount = 0; i < unmodifiedMeshPartCount; i++, meshPartCount++) { - FaceGroup faceGroup = faceGroups[meshPartCount]; + for (uint32_t meshPartIndex = 0; meshPartIndex < unmodifiedMeshPartCount; meshPartIndex++) { + FaceGroup faceGroup = faceGroups[meshPartIndex]; // Now that each mesh has been created with its own unique material mappings, fill them with data (vertex data is duplicated, face data is not). foreach(OBJFace face, faceGroup) { diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index b932e4df60..0e5b032b07 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -189,7 +189,7 @@ bool Model::shouldInvalidatePayloadShapeKey(int meshIndex) { const auto& networkMeshes = getNetworkModel()->getMeshes(); // if our index is ever out of range for either meshes or networkMeshes, then skip it, and set our _meshGroupsKnown // to false to rebuild out mesh groups. - if (meshIndex < 0 || meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)hfmModel.meshes.size() /* || meshIndex >= (int)_meshStates.size()*/) { + if (meshIndex < 0 || meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)hfmModel.meshes.size()) { _needsFixupInScene = true; // trigger remove/add cycle invalidCalculatedMeshBoxes(); // if we have to reload, we need to assume our mesh boxes are all invalid return true; @@ -252,9 +252,6 @@ void Model::updateRenderItems() { } Transform renderTransform = modelTransform; - // if (meshState.clusterMatrices.size() <= 1) { - // renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform); - // } data.updateTransform(renderTransform); data.updateTransformAndBound(modelTransform.worldTransform(shapeState._rootFromJointTransform)); @@ -299,12 +296,10 @@ void Model::reset() { } void Model::updateShapeStatesFromRig() { - { // Shapes state: - for (auto& shape : _shapeStates) { - uint32_t jointId = shape._jointIndex; - if (jointId < (uint32_t) _rig.getJointStateCount()) { - shape._rootFromJointTransform = _rig.getJointTransform(jointId); - } + for (auto& shape : _shapeStates) { + uint32_t jointId = shape._jointIndex; + if (jointId < (uint32_t) _rig.getJointStateCount()) { + shape._rootFromJointTransform = _rig.getJointTransform(jointId); } } } diff --git a/libraries/render-utils/src/SoftAttachmentModel.cpp b/libraries/render-utils/src/SoftAttachmentModel.cpp index 1b8d1e7b69..7a58498e50 100644 --- a/libraries/render-utils/src/SoftAttachmentModel.cpp +++ b/libraries/render-utils/src/SoftAttachmentModel.cpp @@ -61,8 +61,7 @@ void SoftAttachmentModel::updateClusterMatrices() { Transform clusterTransform; Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform); state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform); - } - else { + } else { auto jointMatrix = rig->getJointTransform(cbmov.jointIndex); glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]); } diff --git a/tests-manual/fbx/CMakeLists.txt b/tests-manual/fbx/CMakeLists.txt deleted file mode 100644 index 7221f081fe..0000000000 --- a/tests-manual/fbx/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -set(TARGET_NAME fbx-test) -# This is not a testcase -- just set it up as a regular hifi project -setup_hifi_project(Quick Gui) -setup_memory_debugger() -set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/") - -file(GLOB_RECURSE GLB_TEST_FILES "c:/Users/bdavi/git/glTF-Sample-Models/2.0/*.glb") -list(JOIN GLB_TEST_FILES "|" GLB_TEST_FILES) -target_compile_definitions(${TARGET_NAME} PRIVATE -DGLB_TEST_FILES="${GLB_TEST_FILES}") -link_hifi_libraries(shared graphics networking image gpu hfm fbx) -package_libraries_for_deployment() diff --git a/tests-manual/fbx/src/main.cpp b/tests-manual/fbx/src/main.cpp deleted file mode 100644 index 66c3a4f30e..0000000000 --- a/tests-manual/fbx/src/main.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// -// Created by Bradley Austin Davis on 2018/01/11 -// Copyright 2014 High Fidelity, Inc. -// -// Distributed under the Apache License, Version 2.0. -// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html -// - -#include -#include - -#include - -#include -#include -#include -#include - -#include - -// Currently only used by testing code -inline std::list splitString(const std::string& source, const char delimiter = ' ') { - std::list result; - size_t start = 0, next; - - while (std::string::npos != (next = source.find(delimiter, start))) { - std::string sub = source.substr(start, next - start); - if (!sub.empty()) { - result.push_back(sub); - } - start = next + 1; - } - if (source.size() > start) { - result.push_back(source.substr(start)); - } - return result; -} - -std::list getGlbTestFiles() { - return splitString(GLB_TEST_FILES, '|'); -} - -QtMessageHandler originalHandler; - -void messageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message) { -#if defined(Q_OS_WIN) - OutputDebugStringA(message.toStdString().c_str()); - OutputDebugStringA("\n"); -#endif - originalHandler(type, context, message); -} - -QByteArray readFileBytes(const std::string& filename) { - QFile file(filename.c_str()); - file.open(QFile::ReadOnly); - QByteArray result = file.readAll(); - file.close(); - return result; -} - -void processFile(const std::string& filename) { - qDebug() << filename.c_str(); - GLTFSerializer().read(readFileBytes(filename), {}, QUrl::fromLocalFile(filename.c_str())); -} - -int main(int argc, char** argv) { - QCoreApplication app{ argc, argv }; - originalHandler = qInstallMessageHandler(messageHandler); - - DependencyManager::set(false); - - //processFile("c:/Users/bdavi/git/glTF-Sample-Models/2.0/Box/glTF-Binary/Box.glb"); - - for (const auto& testFile : getGlbTestFiles()) { - processFile(testFile); - } -}