From bac22c69c1ed645b3b1adfc494936c8e5c27c948 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Wed, 11 Sep 2019 17:17:30 -0700 Subject: [PATCH 1/7] Move ExtractedMesh out of HFM --- libraries/fbx/src/FBXSerializer.h | 9 ++++++++- libraries/hfm/src/hfm/HFM.h | 9 --------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.h b/libraries/fbx/src/FBXSerializer.h index 7d41f98444..c9468708a6 100644 --- a/libraries/fbx/src/FBXSerializer.h +++ b/libraries/fbx/src/FBXSerializer.h @@ -100,7 +100,14 @@ public: {} }; -class ExtractedMesh; +class ExtractedMesh { +public: + hfm::Mesh mesh; + QMultiHash newIndices; + QVector > blendshapeIndexMaps; + QVector > partMaterialTextures; + QHash texcoordSetMap; +}; class FBXSerializer : public HFMSerializer { public: diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index d4d6dd33d0..29c4af9ec9 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -381,15 +381,6 @@ public: }; -class ExtractedMesh { -public: - hfm::Mesh mesh; - QMultiHash newIndices; - QVector > blendshapeIndexMaps; - QVector > partMaterialTextures; - QHash texcoordSetMap; -}; - typedef hfm::Blendshape HFMBlendshape; typedef hfm::JointShapeInfo HFMJointShapeInfo; typedef hfm::Joint HFMJoint; From ff5fef9c3a18326d45529cc3c4b269b395cf190c Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 12 Sep 2019 10:58:52 -0700 Subject: [PATCH 2/7] Update FBXSerializer to reference shapes, support instancing (deformers WIP) --- libraries/fbx/src/FBXSerializer.cpp | 527 ++++++++++++----------- libraries/fbx/src/FBXSerializer.h | 1 + libraries/fbx/src/FBXSerializer_Mesh.cpp | 10 +- 3 files changed, 276 insertions(+), 262 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index e6b4a62b51..e8388451d4 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -145,40 +145,19 @@ public: bool isLimbNode; // is this FBXModel transform is a "LimbNode" i.e. a joint }; -glm::mat4 getGlobalTransform(const QMultiMap& _connectionParentMap, - const QHash& fbxModels, QString nodeID, bool mixamoHack, const QString& url) { - glm::mat4 globalTransform; - QVector visitedNodes; // Used to prevent following a cycle - while (!nodeID.isNull()) { - visitedNodes.append(nodeID); // Append each node we visit - - const FBXModel& fbxModel = fbxModels.value(nodeID); - globalTransform = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(fbxModel.preRotation * - fbxModel.rotation * fbxModel.postRotation) * fbxModel.postTransform * globalTransform; - if (fbxModel.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation); - globalTransform = globalTransform * geometricOffset; - } - - if (mixamoHack) { - // there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform - return globalTransform; - } - QList parentIDs = _connectionParentMap.values(nodeID); - nodeID = QString(); - foreach (const QString& parentID, parentIDs) { - if (visitedNodes.contains(parentID)) { - qCWarning(modelformat) << "Ignoring loop detected in FBX connection map for" << url; - continue; - } - +std::vector getModelIDsForMeshID(const QString& meshID, const QHash& fbxModels, const QMultiMap& _connectionParentMap) { + std::vector modelsForMesh; + if (fbxModels.contains(meshID)) { + modelsForMesh.push_back(meshID); + } else { + // This mesh may have more than one parent model, with different material and transform, representing a different instance of the mesh + for (const auto& parentID : _connectionParentMap.values(meshID)) { if (fbxModels.contains(parentID)) { - nodeID = parentID; - break; + modelsForMesh.push_back(parentID); } } } - return globalTransform; + return modelsForMesh; } class ExtractedBlendshape { @@ -404,7 +383,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const QVector blendshapes; QHash fbxModels; - QHash clusters; + QHash fbxClusters; QHash animationCurves; QHash typeFlags; @@ -1058,9 +1037,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } - // skip empty clusters + // skip empty fbxClusters if (cluster.indices.size() > 0 && cluster.weights.size() > 0) { - clusters.insert(getID(object.properties), cluster); + fbxClusters.insert(getID(object.properties), cluster); } } else if (object.properties.last() == "BlendShapeChannel") { @@ -1233,13 +1212,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const QVector modelIDs; QSet remainingFBXModels; for (QHash::const_iterator fbxModel = fbxModels.constBegin(); fbxModel != fbxModels.constEnd(); fbxModel++) { - // models with clusters must be parented to the cluster top + // models with fbxClusters must be parented to the cluster top // Unless the model is a root node. bool isARootNode = !modelIDs.contains(_connectionParentMap.value(fbxModel.key())); if (!isARootNode) { foreach(const QString& deformerID, _connectionChildMap.values(fbxModel.key())) { foreach(const QString& clusterID, _connectionChildMap.values(deformerID)) { - if (!clusters.contains(clusterID)) { + if (!fbxClusters.contains(clusterID)) { continue; } QString topID = getTopModelID(_connectionParentMap, fbxModels, _connectionChildMap.value(clusterID), url); @@ -1283,8 +1262,15 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // convert the models to joints hfmModel.hasSkeletonJoints = false; + + // Note that these transform nodes are initially defined in world space + bool needMixamoHack = hfmModel.applicationName == "mixamo.com"; + hfmModel.transforms.reserve(modelIDs.size()); + std::vector globalTransforms; + globalTransforms.reserve(modelIDs.size()); - foreach (const QString& modelID, modelIDs) { + int jointIndex = 0; + for (const QString& modelID : modelIDs) { const FBXModel& fbxModel = fbxModels[modelID]; HFMJoint joint; joint.parentIndex = fbxModel.parentIndex; @@ -1358,6 +1344,42 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } hfmModel.joints.push_back(joint); + + // Now that we've initialized the joint, we can define the transform + // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate + glm::mat4 localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; + if (joint.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); + localTransform = localTransform * geometricOffset; + } + glm::mat4 globalTransform; + if (joint.parentIndex != -1 && joint.parentIndex < jointIndex) { + hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; + glm::mat4& parentGlobalTransform = globalTransforms[joint.parentIndex]; + if (needMixamoHack) { + // there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform + globalTransform = localTransform; + localTransform = globalTransform * glm::inverse(parentGlobalTransform); + } else { + if (parentJoint.hasGeometricOffset) { + // Per the FBX standard, geometric offsets should not propagate to children + glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); + globalTransform = localTransform * parentGlobalTransform * glm::inverse(parentGeometricOffset); + localTransform = globalTransform * glm::inverse(parentGlobalTransform); + } else { + globalTransform = localTransform * parentGlobalTransform; + } + } + } else { + globalTransform = localTransform; + } + hfm::TransformNode transformNode; + transformNode.parent = joint.parentIndex == -1 ? hfm::UNDEFINED_KEY : joint.parentIndex; + transformNode.transform = Transform(localTransform); + globalTransforms.push_back(globalTransform); + hfmModel.transforms.push_back(transformNode); + + ++jointIndex; } // NOTE: shapeVertices are in joint-frame @@ -1401,235 +1423,222 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } #endif + std::unordered_map materialNameToID; for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) { + materialNameToID[materialIt.key().toStdString()] = hfmModel.materials.size(); hfmModel.materials.push_back(materialIt.value()); } // see if any materials have texture children bool materialsHaveTextures = checkMaterialsHaveTextures(_hfmMaterials, _textureFilenames, _connectionChildMap); + // Note that the transforms in the TransformNodes are initially in world-space, and need to be converted to parent-space + std::vector transformNodes; + for (QMap::iterator it = meshes.begin(); it != meshes.end(); it++) { - ExtractedMesh& extracted = it.value(); + const QString& meshID = it.key(); + const ExtractedMesh& extracted = it.value(); + const auto& partMaterialTextures = extracted.partMaterialTextures; + const auto& newIndices = extracted.newIndices; - extracted.mesh.meshExtents.reset(); - - // accumulate local transforms - QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key()); - glm::mat4 modelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url); - - // compute the mesh extents from the transformed vertices - foreach (const glm::vec3& vertex, extracted.mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f)); - hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex); - hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex); - - extracted.mesh.meshExtents.minimum = glm::min(extracted.mesh.meshExtents.minimum, transformedVertex); - extracted.mesh.meshExtents.maximum = glm::max(extracted.mesh.meshExtents.maximum, transformedVertex); - extracted.mesh.modelTransform = modelTransform; - } - - // look for textures, material properties - // allocate the Part material library - // NOTE: extracted.partMaterialTextures is empty for FBX_DRACO_MESH_VERSION >= 2. In that case, the mesh part's materialID string is already defined. - int materialIndex = 0; - int textureIndex = 0; - QList children = _connectionChildMap.values(modelID); - for (int i = children.size() - 1; i >= 0; i--) { - - const QString& childID = children.at(i); - if (_hfmMaterials.contains(childID)) { - // the pure material associated with this part - HFMMaterial material = _hfmMaterials.value(childID); - - for (int j = 0; j < extracted.partMaterialTextures.size(); j++) { - if (extracted.partMaterialTextures.at(j).first == materialIndex) { - HFMMeshPart& part = extracted.mesh.parts[j]; - part.materialID = material.materialID; - } - } - - materialIndex++; - } else if (_textureFilenames.contains(childID)) { - // NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale") - // I'm leaving the second parameter blank right now as this code may never be used. - HFMTexture texture = getTexture(childID, ""); - for (int j = 0; j < extracted.partMaterialTextures.size(); j++) { - int partTexture = extracted.partMaterialTextures.at(j).second; - if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) { - // TODO: DO something here that replaces this legacy code - // Maybe create a material just for this part with the correct textures? - // extracted.mesh.parts[j].diffuseTexture = texture; - } - } - textureIndex++; - } - } - - // find the clusters with which the mesh is associated - QVector clusterIDs; - foreach (const QString& childID, _connectionChildMap.values(it.key())) { - foreach (const QString& clusterID, _connectionChildMap.values(childID)) { - if (!clusters.contains(clusterID)) { - continue; - } - HFMCluster hfmCluster; - const Cluster& cluster = clusters[clusterID]; - clusterIDs.append(clusterID); - - // see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion - // of skinning information in FBX - QString jointID = _connectionChildMap.value(clusterID); - hfmCluster.jointIndex = modelIDs.indexOf(jointID); - if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { - qCDebug(modelformat) << "Joint not in model list: " << jointID; - hfmCluster.jointIndex = 0; - } - - hfmCluster.inverseBindMatrix = glm::inverse(cluster.transformLink) * modelTransform; - - // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and - // sometimes floating point fuzz can be introduced after the inverse. - hfmCluster.inverseBindMatrix[0][3] = 0.0f; - hfmCluster.inverseBindMatrix[1][3] = 0.0f; - hfmCluster.inverseBindMatrix[2][3] = 0.0f; - hfmCluster.inverseBindMatrix[3][3] = 1.0f; - - hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix); - - extracted.mesh.clusters.append(hfmCluster); - - // override the bind rotation with the transform link - HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex]; - joint.inverseBindRotation = glm::inverse(extractRotation(cluster.transformLink)); - joint.bindTransform = cluster.transformLink; - joint.bindTransformFoundInCluster = true; - - // update the bind pose extents - glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform); - hfmModel.bindExtents.addPoint(bindTranslation); - } - } - - // the last cluster is the root cluster - { - HFMCluster cluster; - cluster.jointIndex = modelIDs.indexOf(modelID); - if (cluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { - qCDebug(modelformat) << "Model not in model list: " << modelID; - cluster.jointIndex = 0; - } - extracted.mesh.clusters.append(cluster); - } - - // whether we're skinned depends on how many clusters are attached - if (clusterIDs.size() > 1) { - // this is a multi-mesh joint - const int WEIGHTS_PER_VERTEX = 4; - int numClusterIndices = extracted.mesh.vertices.size() * WEIGHTS_PER_VERTEX; - extracted.mesh.clusterIndices.fill(extracted.mesh.clusters.size() - 1, numClusterIndices); - QVector weightAccumulators; - weightAccumulators.fill(0.0f, numClusterIndices); - - for (int i = 0; i < clusterIDs.size(); i++) { - QString clusterID = clusterIDs.at(i); - const Cluster& cluster = clusters[clusterID]; - const HFMCluster& hfmCluster = extracted.mesh.clusters.at(i); - int jointIndex = hfmCluster.jointIndex; - HFMJoint& joint = hfmModel.joints[jointIndex]; - - glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform; - ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex); - - for (int j = 0; j < cluster.indices.size(); j++) { - int oldIndex = cluster.indices.at(j); - float weight = cluster.weights.at(j); - for (QMultiHash::const_iterator it = extracted.newIndices.constFind(oldIndex); - it != extracted.newIndices.end() && it.key() == oldIndex; it++) { - int newIndex = it.value(); - - // remember vertices with at least 1/4 weight - // FIXME: vertices with no weightpainting won't get recorded here - const float EXPANSION_WEIGHT_THRESHOLD = 0.25f; - if (weight >= EXPANSION_WEIGHT_THRESHOLD) { - // transform to joint-frame and save for later - const glm::mat4 vertexTransform = meshToJoint * glm::translate(extracted.mesh.vertices.at(newIndex)); - points.push_back(extractTranslation(vertexTransform)); - } - - // look for an unused slot in the weights vector - int weightIndex = newIndex * WEIGHTS_PER_VERTEX; - int lowestIndex = -1; - float lowestWeight = FLT_MAX; - int k = 0; - for (; k < WEIGHTS_PER_VERTEX; k++) { - if (weightAccumulators[weightIndex + k] == 0.0f) { - extracted.mesh.clusterIndices[weightIndex + k] = i; - weightAccumulators[weightIndex + k] = weight; - break; - } - if (weightAccumulators[weightIndex + k] < lowestWeight) { - lowestIndex = k; - lowestWeight = weightAccumulators[weightIndex + k]; - } - } - if (k == WEIGHTS_PER_VERTEX && weight > lowestWeight) { - // no space for an additional weight; we must replace the lowest - weightAccumulators[weightIndex + lowestIndex] = weight; - extracted.mesh.clusterIndices[weightIndex + lowestIndex] = i; - } - } - } - } - - // now that we've accumulated the most relevant weights for each vertex - // normalize and compress to 16-bits - extracted.mesh.clusterWeights.fill(0, numClusterIndices); - int numVertices = extracted.mesh.vertices.size(); - for (int i = 0; i < numVertices; ++i) { - int j = i * WEIGHTS_PER_VERTEX; - - // normalize weights into uint16_t - float totalWeight = 0.0f; - for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { - totalWeight += weightAccumulators[k]; - } - - const float ALMOST_HALF = 0.499f; - if (totalWeight > 0.0f) { - float weightScalingFactor = (float)(UINT16_MAX) / totalWeight; - for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) { - extracted.mesh.clusterWeights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF); - } - } else { - extracted.mesh.clusterWeights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF); - } - } - } else { - // this is a single-joint mesh - const HFMCluster& firstHFMCluster = extracted.mesh.clusters.at(0); - int jointIndex = firstHFMCluster.jointIndex; - HFMJoint& joint = hfmModel.joints[jointIndex]; - - // transform cluster vertices to joint-frame and save for later - glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform; - ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex); - for (const glm::vec3& vertex : extracted.mesh.vertices) { - const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertex); - points.push_back(extractTranslation(vertexTransform)); - } - - // Apply geometric offset, if present, by transforming the vertices directly - if (joint.hasGeometricOffset) { - glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); - for (int i = 0; i < extracted.mesh.vertices.size(); i++) { - extracted.mesh.vertices[i] = transformPoint(geometricOffset, extracted.mesh.vertices[i]); - } - } - } - - hfmModel.meshes.push_back(extracted.mesh); - uint32_t meshIndex = (uint32_t)hfmModel.meshes.size() - 1; + uint32_t meshIndex = (uint32_t)hfmModel.meshes.size(); meshIDsToMeshIndices.insert(it.key(), meshIndex); + hfmModel.meshes.push_back(extracted.mesh); + hfm::Mesh& mesh = hfmModel.meshes.back(); + + std::vector instanceModelIDs = getModelIDsForMeshID(meshID, fbxModels, _connectionParentMap); + // meshShapes will be added to hfmModel at the very end + std::vector meshShapes; + meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size()); + for (const QString& modelID : instanceModelIDs) { + // The transform node has the same indexing order as the joints + const uint32_t transformNodeIndex = (uint32_t)modelIDs.indexOf(modelID); + + // accumulate local transforms + glm::mat4 modelTransform = globalTransforms[transformNodeIndex]; + // compute the mesh extents from the transformed vertices + for (const glm::vec3& vertex : mesh.vertices) { + glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f)); + hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex); + hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex); + } + + // partShapes will be added to meshShapes at the very end + std::vector partShapes { mesh.parts.size() }; + for (uint32_t i = 0; i < (uint32_t)partShapes.size(); ++i) { + hfm::Shape& shape = partShapes[i]; + shape.mesh = meshIndex; + shape.meshPart = i; + shape.transform = transformNodeIndex; + glm::mat4 shapeGlobalTransform = globalTransforms[transformNodeIndex]; + + shape.transformedExtents.reset(); + // compute the shape extents from the transformed vertices + for (const glm::vec3& vertex : mesh.vertices) { + glm::vec3 transformedVertex = glm::vec3(shapeGlobalTransform * glm::vec4(vertex, 1.0f)); + shape.transformedExtents.minimum = glm::min(shape.transformedExtents.minimum, transformedVertex); + shape.transformedExtents.maximum = glm::max(shape.transformedExtents.maximum, transformedVertex); + } + } + + // For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures + int materialIndex = 0; + int textureIndex = 0; + QList children = _connectionChildMap.values(modelID); + for (int i = children.size() - 1; i >= 0; i--) { + const QString& childID = children.at(i); + if (_hfmMaterials.contains(childID)) { + // the pure material associated with this part + const HFMMaterial& material = _hfmMaterials.value(childID); + for (int j = 0; j < partMaterialTextures.size(); j++) { + if (partMaterialTextures.at(j).first == materialIndex) { + hfm::Shape& shape = partShapes[j]; + shape.material = materialNameToID[material.materialID.toStdString()]; + } + } + materialIndex++; + } else if (_textureFilenames.contains(childID)) { + // NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale") + // I'm leaving the second parameter blank right now as this code may never be used. + HFMTexture texture = getTexture(childID, ""); + for (int j = 0; j < partMaterialTextures.size(); j++) { + int partTexture = partMaterialTextures.at(j).second; + if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) { + // TODO: DO something here that replaces this legacy code + // Maybe create a material just for this part with the correct textures? + // material.albedoTexture = texture; + // partShapes[j].material = materialIndex; + } + } + textureIndex++; + } + } + // For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart + if (!extracted.materialIDPerMeshPart.empty()) { + for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) { + hfm::Shape& shape = partShapes[i]; + const std::string& materialID = extracted.materialIDPerMeshPart[i]; + auto materialIt = materialNameToID.find(materialID); + if (materialIt != materialNameToID.end()) { + shape.material = materialIt->second; + } + } + } + + // find the clusters with which the mesh is associated + QVector clusterIDs; + for (const QString& childID : _connectionChildMap.values(meshID)) { + for (const QString& clusterID : _connectionChildMap.values(childID)) { + if (!fbxClusters.contains(clusterID)) { + continue; + } + clusterIDs.append(clusterID); + } + } + + auto rootJointIndex = modelIDs.indexOf(modelID); + if (rootJointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { + qCDebug(modelformat) << "Model not in model list: " << modelID; + rootJointIndex = 0; + } + + // whether we're skinned depends on how many clusters are attached + if (clusterIDs.size() > 1) { + hfm::DynamicTransform dynamicTransform; + auto& clusters = dynamicTransform.clusters; + std::vector deformers; + for (const auto& clusterID : clusterIDs) { + HFMCluster hfmCluster; + const Cluster& fbxCluster = fbxClusters[clusterID]; + + // see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion + // of skinning information in FBX + QString jointID = _connectionChildMap.value(clusterID); + hfmCluster.jointIndex = modelIDs.indexOf(jointID); + if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { + qCDebug(modelformat) << "Joint not in model list: " << jointID; + hfmCluster.jointIndex = 0; + } + + hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * modelTransform; + + // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and + // sometimes floating point fuzz can be introduced after the inverse. + hfmCluster.inverseBindMatrix[0][3] = 0.0f; + hfmCluster.inverseBindMatrix[1][3] = 0.0f; + hfmCluster.inverseBindMatrix[2][3] = 0.0f; + hfmCluster.inverseBindMatrix[3][3] = 1.0f; + + hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix); + + clusters.push_back(hfmCluster); + + // override the bind rotation with the transform link + HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex]; + joint.inverseBindRotation = glm::inverse(extractRotation(fbxCluster.transformLink)); + joint.bindTransform = fbxCluster.transformLink; + joint.bindTransformFoundInCluster = true; + + // update the bind pose extents + glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform); + hfmModel.bindExtents.addPoint(bindTranslation); + + // the last cluster is the root cluster + HFMCluster cluster; + cluster.jointIndex = rootJointIndex; + clusters.push_back(cluster); + } + + // Skinned mesh instances have a dynamic transform + dynamicTransform.deformers.reserve(clusterIDs.size()); + clusters.reserve(clusterIDs.size()); + for (const auto& clusterID : clusterIDs) { + const Cluster& fbxCluster = fbxClusters[clusterID]; + dynamicTransform.deformers.emplace_back(); + deformers.emplace_back(); + hfm::Deformer& deformer = deformers.back(); + size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size()); + deformer.indices.reserve(indexWeightPairs); + deformer.weights.reserve(indexWeightPairs); + for (size_t i = 0; i < indexWeightPairs; i++) { + int oldIndex = fbxCluster.indices[i]; + uint32_t newIndex = (uint32_t)extracted.newIndices.value(oldIndex); + deformer.indices.push_back(newIndex); + deformer.indices.push_back((float)fbxCluster.weights[i]); + } + } + + // Store this model's deformers, this dynamic transform's deformer IDs + uint32_t deformerMinID = (uint32_t)hfmModel.deformers.size(); + hfmModel.deformers.insert(hfmModel.deformers.end(), deformers.cbegin(), deformers.cend()); + dynamicTransform.deformers.resize(deformers.size()); + std::iota(dynamicTransform.deformers.begin(), dynamicTransform.deformers.end(), deformerMinID); + + // Store the model's dynamic transform, and put its ID in the shapes + hfmModel.dynamicTransforms.push_back(dynamicTransform); + uint32_t dynamicTransformID = (uint32_t)(hfmModel.dynamicTransforms.size() - 1); + for (hfm::Shape& shape : partShapes) { + shape.dynamicTransform = dynamicTransformID; + } + } else { + // this is a single-joint mesh + HFMJoint& joint = hfmModel.joints[rootJointIndex]; + + // Apply geometric offset, if present, by transforming the vertices directly + if (joint.hasGeometricOffset) { + glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); + for (int i = 0; i < mesh.vertices.size(); i++) { + mesh.vertices[i] = transformPoint(geometricOffset, mesh.vertices[i]); + } + } + } + + // Store the parts for this mesh (or instance of this mesh, as the case may be) + meshShapes.insert(meshShapes.cend(), partShapes.cbegin(), partShapes.cend()); + } + + // Store the shapes for the mesh (or multiple instances of the mesh, as the case may be) + hfmModel.shapes.insert(hfmModel.shapes.cend(), meshShapes.cbegin(), meshShapes.cend()); } // attempt to map any meshes to a named model @@ -1651,9 +1660,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const if (applyUpAxisZRotation) { hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation)); hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation)); - for (auto &mesh : hfmModelPtr->meshes) { - mesh.modelTransform *= glm::mat4_cast(upAxisZRotation); - mesh.meshExtents.transform(glm::mat4_cast(upAxisZRotation)); + for (auto &shape : hfmModelPtr->shapes) { + auto transformIndex = shape.transform; + auto& transformNode = hfmModelPtr->transforms[transformIndex]; + transformNode.transform.postRotate(upAxisZRotation); + shape.transformedExtents.transform(glm::mat4_cast(upAxisZRotation)); } } return hfmModelPtr; diff --git a/libraries/fbx/src/FBXSerializer.h b/libraries/fbx/src/FBXSerializer.h index c9468708a6..2044d82710 100644 --- a/libraries/fbx/src/FBXSerializer.h +++ b/libraries/fbx/src/FBXSerializer.h @@ -103,6 +103,7 @@ public: class ExtractedMesh { public: hfm::Mesh mesh; + std::vector materialIDPerMeshPart; QMultiHash newIndices; QVector > blendshapeIndexMaps; QVector > partMaterialTextures; diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index 479e7acfc9..a89be38fe3 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -355,7 +355,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me // Check for additional metadata unsigned int dracoMeshNodeVersion = 1; - std::vector dracoMaterialList; + std::vector dracoMaterialList; for (const auto& dracoChild : child.children) { if (dracoChild.name == "FBXDracoMeshVersion") { if (!dracoChild.properties.isEmpty()) { @@ -364,7 +364,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } else if (dracoChild.name == "MaterialList") { dracoMaterialList.reserve(dracoChild.properties.size()); for (const auto& materialID : dracoChild.properties) { - dracoMaterialList.push_back(materialID.toString()); + dracoMaterialList.push_back(materialID.toString().toStdString()); } } } @@ -467,6 +467,8 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } } + ExtractedMesh& extracted = data.extracted; + extracted.materialIDPerMeshPart.resize(dracoMaterialList.size()); for (uint32_t i = 0; i < dracoMesh->num_faces(); ++i) { // grab the material ID and texture ID for this face, if we have it auto& dracoFace = dracoMesh->face(draco::FaceIndex(i)); @@ -487,13 +489,13 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); - HFMMeshPart& part = data.extracted.mesh.parts.back(); + HFMMeshPart& part = extracted.mesh.parts.back(); // Figure out what material this part is if (dracoMeshNodeVersion >= 2) { // Define the materialID now if (materialID < dracoMaterialList.size()) { - part.materialID = dracoMaterialList[materialID]; + extracted.materialIDPerMeshPart[materialID] = dracoMaterialList[materialID]; } } else { // Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap From ba6833df8fcf0468411ebcf25d4ecfd5fec8b51c Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 10:49:14 -0700 Subject: [PATCH 3/7] Make small improvements to FBXSerializer code changes --- libraries/fbx/src/FBXSerializer.cpp | 61 +++++++++++------------- libraries/fbx/src/FBXSerializer_Mesh.cpp | 19 ++++---- 2 files changed, 37 insertions(+), 43 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index e8388451d4..424c06b1c4 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1263,13 +1263,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // convert the models to joints hfmModel.hasSkeletonJoints = false; - // Note that these transform nodes are initially defined in world space bool needMixamoHack = hfmModel.applicationName == "mixamo.com"; hfmModel.transforms.reserve(modelIDs.size()); std::vector globalTransforms; globalTransforms.reserve(modelIDs.size()); - int jointIndex = 0; for (const QString& modelID : modelIDs) { const FBXModel& fbxModel = fbxModels[modelID]; HFMJoint joint; @@ -1378,13 +1376,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const transformNode.transform = Transform(localTransform); globalTransforms.push_back(globalTransform); hfmModel.transforms.push_back(transformNode); - - ++jointIndex; } - // NOTE: shapeVertices are in joint-frame - hfmModel.shapeVertices.resize(std::max((size_t)1, hfmModel.joints.size()) ); - hfmModel.bindExtents.reset(); hfmModel.meshExtents.reset(); @@ -1482,35 +1475,37 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } // For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures - int materialIndex = 0; - int textureIndex = 0; - QList children = _connectionChildMap.values(modelID); - for (int i = children.size() - 1; i >= 0; i--) { - const QString& childID = children.at(i); - if (_hfmMaterials.contains(childID)) { - // the pure material associated with this part - const HFMMaterial& material = _hfmMaterials.value(childID); - for (int j = 0; j < partMaterialTextures.size(); j++) { - if (partMaterialTextures.at(j).first == materialIndex) { - hfm::Shape& shape = partShapes[j]; - shape.material = materialNameToID[material.materialID.toStdString()]; + if (!partMaterialTextures.empty()) { + int materialIndex = 0; + int textureIndex = 0; + QList children = _connectionChildMap.values(modelID); + for (int i = children.size() - 1; i >= 0; i--) { + const QString& childID = children.at(i); + if (_hfmMaterials.contains(childID)) { + // the pure material associated with this part + const HFMMaterial& material = _hfmMaterials.value(childID); + for (int j = 0; j < partMaterialTextures.size(); j++) { + if (partMaterialTextures.at(j).first == materialIndex) { + hfm::Shape& shape = partShapes[j]; + shape.material = materialNameToID[material.materialID.toStdString()]; + } } - } - materialIndex++; - } else if (_textureFilenames.contains(childID)) { - // NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale") - // I'm leaving the second parameter blank right now as this code may never be used. - HFMTexture texture = getTexture(childID, ""); - for (int j = 0; j < partMaterialTextures.size(); j++) { - int partTexture = partMaterialTextures.at(j).second; - if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) { - // TODO: DO something here that replaces this legacy code - // Maybe create a material just for this part with the correct textures? - // material.albedoTexture = texture; - // partShapes[j].material = materialIndex; + materialIndex++; + } else if (_textureFilenames.contains(childID)) { + // NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale") + // I'm leaving the second parameter blank right now as this code may never be used. + HFMTexture texture = getTexture(childID, ""); + for (int j = 0; j < partMaterialTextures.size(); j++) { + int partTexture = partMaterialTextures.at(j).second; + if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) { + // TODO: DO something here that replaces this legacy code + // Maybe create a material just for this part with the correct textures? + // material.albedoTexture = texture; + // partShapes[j].material = materialIndex; + } } + textureIndex++; } - textureIndex++; } } // For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index a89be38fe3..f19cd7c526 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -369,6 +369,11 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } } + if (dracoMeshNodeVersion >= 2) { + // Define the materialIDs now + data.extracted.materialIDPerMeshPart = dracoMaterialList; + } + // load the draco mesh from the FBX and create a draco::Mesh draco::Decoder decoder; draco::DecoderBuffer decodedBuffer; @@ -467,8 +472,6 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me } } - ExtractedMesh& extracted = data.extracted; - extracted.materialIDPerMeshPart.resize(dracoMaterialList.size()); for (uint32_t i = 0; i < dracoMesh->num_faces(); ++i) { // grab the material ID and texture ID for this face, if we have it auto& dracoFace = dracoMesh->face(draco::FaceIndex(i)); @@ -489,18 +492,14 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); - HFMMeshPart& part = extracted.mesh.parts.back(); + HFMMeshPart& part = data.extracted.mesh.parts.back(); - // Figure out what material this part is - if (dracoMeshNodeVersion >= 2) { - // Define the materialID now - if (materialID < dracoMaterialList.size()) { - extracted.materialIDPerMeshPart[materialID] = dracoMaterialList[materialID]; - } - } else { + // Figure out if this is the older way of defining the per-part material for baked FBX + if (dracoMeshNodeVersion < 2) { // Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap data.extracted.partMaterialTextures.append(materialTexture); } + // in dracoMeshNodeVersion >= 2, fbx meshes have their per-part materials already defined in data.extracted.materialIDPerMeshPart partIndexPlusOne = (int)data.extracted.mesh.parts.size(); } From 4a8cdee38ab0485fc399b2f5ccadc660b173fff8 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 14:30:03 -0700 Subject: [PATCH 4/7] Update HFM format and deprecate hfm::TransformNode --- libraries/hfm/src/hfm/HFM.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/libraries/hfm/src/hfm/HFM.h b/libraries/hfm/src/hfm/HFM.h index 29c4af9ec9..08410f17f2 100644 --- a/libraries/hfm/src/hfm/HFM.h +++ b/libraries/hfm/src/hfm/HFM.h @@ -118,6 +118,9 @@ public: glm::vec3 geometricTranslation; glm::quat geometricRotation; glm::vec3 geometricScaling; + + // globalTransform is the transform of the joint with all parent transforms applied, plus the geometric offset + glm::mat4 globalTransform; }; @@ -245,7 +248,7 @@ public: QVector clusters; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::clusters) Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents) - glm::mat4 modelTransform; // DEPRECATED (see hfm::Shape::transform, hfm::TransformNode, hfm::Model::transforms) + glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints) QVector blendshapes; @@ -289,6 +292,7 @@ public: bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; } }; +// DEPRECATED in favor of using hfm::Joint class TransformNode { public: uint32_t parent { 0 }; @@ -316,9 +320,9 @@ public: uint32_t mesh { UNDEFINED_KEY }; uint32_t meshPart { UNDEFINED_KEY }; uint32_t material { UNDEFINED_KEY }; - uint32_t transform { UNDEFINED_KEY }; // The static transform node when not taking into account rigging/skinning + uint32_t transform { UNDEFINED_KEY }; // The hfm::Joint associated with this shape, containing transform information // TODO: Have all serializers calculate hfm::Shape::transformedExtents in world space where they previously calculated hfm::Mesh::meshExtents. Change all code that uses hfm::Mesh::meshExtents to use this instead. - Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after the transform node and parent transform nodes are applied, while not taking into account rigging/skinning + Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after transform information is applied, while not taking into account rigging/skinning uint32_t dynamicTransform { UNDEFINED_KEY }; }; From e8d421fa3549330b0972f46d8c5f1d578eb9d4d2 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 14:30:20 -0700 Subject: [PATCH 5/7] Fix transforms and other issues with FBXSerializer --- libraries/fbx/src/FBXSerializer.cpp | 83 ++++++++++++----------------- 1 file changed, 33 insertions(+), 50 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index 424c06b1c4..e7d6000c28 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1264,9 +1264,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfmModel.hasSkeletonJoints = false; bool needMixamoHack = hfmModel.applicationName == "mixamo.com"; - hfmModel.transforms.reserve(modelIDs.size()); - std::vector globalTransforms; - globalTransforms.reserve(modelIDs.size()); for (const QString& modelID : modelIDs) { const FBXModel& fbxModel = fbxModels[modelID]; @@ -1341,41 +1338,28 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } } - hfmModel.joints.push_back(joint); // Now that we've initialized the joint, we can define the transform // modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate - glm::mat4 localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; + joint.globalTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform; + if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) { + hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; + joint.globalTransform = joint.globalTransform * parentJoint.globalTransform; + if (parentJoint.hasGeometricOffset) { + // Per the FBX standard, geometric offset should not propagate to children. + // However, we must be careful when modifying the behavior of FBXSerializer. + // So, we leave this here, as a breakpoint for debugging, or stub for implementation. + // qCDebug(modelformat) << "Geometric offset encountered on non-leaf node. jointIndex: " << jointIndex << ", modelURL: " << url; + // glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); + // globalTransform = globalTransform * glm::inverse(geometricOffset); + } + } if (joint.hasGeometricOffset) { glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation); - localTransform = localTransform * geometricOffset; + joint.globalTransform = joint.globalTransform * geometricOffset; } - glm::mat4 globalTransform; - if (joint.parentIndex != -1 && joint.parentIndex < jointIndex) { - hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex]; - glm::mat4& parentGlobalTransform = globalTransforms[joint.parentIndex]; - if (needMixamoHack) { - // there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform - globalTransform = localTransform; - localTransform = globalTransform * glm::inverse(parentGlobalTransform); - } else { - if (parentJoint.hasGeometricOffset) { - // Per the FBX standard, geometric offsets should not propagate to children - glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation); - globalTransform = localTransform * parentGlobalTransform * glm::inverse(parentGeometricOffset); - localTransform = globalTransform * glm::inverse(parentGlobalTransform); - } else { - globalTransform = localTransform * parentGlobalTransform; - } - } - } else { - globalTransform = localTransform; - } - hfm::TransformNode transformNode; - transformNode.parent = joint.parentIndex == -1 ? hfm::UNDEFINED_KEY : joint.parentIndex; - transformNode.transform = Transform(localTransform); - globalTransforms.push_back(globalTransform); - hfmModel.transforms.push_back(transformNode); + + hfmModel.joints.push_back(joint); } hfmModel.bindExtents.reset(); @@ -1418,7 +1402,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const std::unordered_map materialNameToID; for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) { - materialNameToID[materialIt.key().toStdString()] = hfmModel.materials.size(); + materialNameToID[materialIt.key().toStdString()] = (uint32_t)hfmModel.materials.size(); hfmModel.materials.push_back(materialIt.value()); } @@ -1445,13 +1429,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size()); for (const QString& modelID : instanceModelIDs) { // The transform node has the same indexing order as the joints - const uint32_t transformNodeIndex = (uint32_t)modelIDs.indexOf(modelID); + const uint32_t transformIndex = (uint32_t)modelIDs.indexOf(modelID); // accumulate local transforms - glm::mat4 modelTransform = globalTransforms[transformNodeIndex]; + glm::mat4 globalTransform = hfmModel.joints[transformIndex].globalTransform; // compute the mesh extents from the transformed vertices for (const glm::vec3& vertex : mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f)); + glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex); hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex); } @@ -1462,13 +1446,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfm::Shape& shape = partShapes[i]; shape.mesh = meshIndex; shape.meshPart = i; - shape.transform = transformNodeIndex; - glm::mat4 shapeGlobalTransform = globalTransforms[transformNodeIndex]; + shape.transform = transformIndex; shape.transformedExtents.reset(); // compute the shape extents from the transformed vertices for (const glm::vec3& vertex : mesh.vertices) { - glm::vec3 transformedVertex = glm::vec3(shapeGlobalTransform * glm::vec4(vertex, 1.0f)); + glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f)); shape.transformedExtents.minimum = glm::min(shape.transformedExtents.minimum, transformedVertex); shape.transformedExtents.maximum = glm::max(shape.transformedExtents.maximum, transformedVertex); } @@ -1555,7 +1538,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const hfmCluster.jointIndex = 0; } - hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * modelTransform; + hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * globalTransform; // slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and // sometimes floating point fuzz can be introduced after the inverse. @@ -1577,13 +1560,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // update the bind pose extents glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform); hfmModel.bindExtents.addPoint(bindTranslation); - - // the last cluster is the root cluster - HFMCluster cluster; - cluster.jointIndex = rootJointIndex; - clusters.push_back(cluster); } + // the last cluster is the root cluster + HFMCluster cluster; + cluster.jointIndex = rootJointIndex; + clusters.push_back(cluster); + // Skinned mesh instances have a dynamic transform dynamicTransform.deformers.reserve(clusterIDs.size()); clusters.reserve(clusterIDs.size()); @@ -1595,7 +1578,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size()); deformer.indices.reserve(indexWeightPairs); deformer.weights.reserve(indexWeightPairs); - for (size_t i = 0; i < indexWeightPairs; i++) { + for (int i = 0; i < (int)indexWeightPairs; i++) { int oldIndex = fbxCluster.indices[i]; uint32_t newIndex = (uint32_t)extracted.newIndices.value(oldIndex); deformer.indices.push_back(newIndex); @@ -1655,12 +1638,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const if (applyUpAxisZRotation) { hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation)); hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation)); - for (auto &shape : hfmModelPtr->shapes) { - auto transformIndex = shape.transform; - auto& transformNode = hfmModelPtr->transforms[transformIndex]; - transformNode.transform.postRotate(upAxisZRotation); + for (auto& shape : hfmModelPtr->shapes) { shape.transformedExtents.transform(glm::mat4_cast(upAxisZRotation)); } + for (auto& joint : hfmModelPtr->joints) { + joint.globalTransform = joint.globalTransform * glm::mat4_cast(upAxisZRotation); + } } return hfmModelPtr; } From 41de373570d4587635c18b188ed93ffc49b8b7d7 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 15:17:26 -0700 Subject: [PATCH 6/7] Fix not allocating shapeVerticesPerJoint in CollectShapeVerticesTask.cpp --- .../model-baker/src/model-baker/CollectShapeVerticesTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp index 8aeb0145d5..36c2aa04a6 100644 --- a/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp +++ b/libraries/model-baker/src/model-baker/CollectShapeVerticesTask.cpp @@ -33,7 +33,7 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con const auto& reweightedDeformers = input.get4(); auto& shapeVerticesPerJoint = output; - shapeVerticesPerJoint.reserve(joints.size()); + shapeVerticesPerJoint.resize(joints.size()); std::vector> vertexSourcesPerJoint; vertexSourcesPerJoint.resize(joints.size()); for (size_t i = 0; i < shapes.size(); ++i) { From b15771e9fefb32306a1d697569514925ba13d121 Mon Sep 17 00:00:00 2001 From: sabrina-shanman Date: Thu, 3 Oct 2019 16:47:41 -0700 Subject: [PATCH 7/7] Fix build warnings and wrong use of indexOf --- libraries/fbx/src/FBXSerializer.cpp | 5 ++--- libraries/fbx/src/FBXSerializer_Mesh.cpp | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/libraries/fbx/src/FBXSerializer.cpp b/libraries/fbx/src/FBXSerializer.cpp index e7d6000c28..78bc1836c3 100644 --- a/libraries/fbx/src/FBXSerializer.cpp +++ b/libraries/fbx/src/FBXSerializer.cpp @@ -1416,7 +1416,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const const QString& meshID = it.key(); const ExtractedMesh& extracted = it.value(); const auto& partMaterialTextures = extracted.partMaterialTextures; - const auto& newIndices = extracted.newIndices; uint32_t meshIndex = (uint32_t)hfmModel.meshes.size(); meshIDsToMeshIndices.insert(it.key(), meshIndex); @@ -1515,7 +1514,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const } auto rootJointIndex = modelIDs.indexOf(modelID); - if (rootJointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { + if (rootJointIndex == -1) { qCDebug(modelformat) << "Model not in model list: " << modelID; rootJointIndex = 0; } @@ -1533,7 +1532,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const // of skinning information in FBX QString jointID = _connectionChildMap.value(clusterID); hfmCluster.jointIndex = modelIDs.indexOf(jointID); - if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) { + if (hfmCluster.jointIndex == -1) { qCDebug(modelformat) << "Joint not in model list: " << jointID; hfmCluster.jointIndex = 0; } diff --git a/libraries/fbx/src/FBXSerializer_Mesh.cpp b/libraries/fbx/src/FBXSerializer_Mesh.cpp index f19cd7c526..7c6be5740a 100644 --- a/libraries/fbx/src/FBXSerializer_Mesh.cpp +++ b/libraries/fbx/src/FBXSerializer_Mesh.cpp @@ -492,7 +492,6 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me int& partIndexPlusOne = materialTextureParts[materialTexture]; if (partIndexPlusOne == 0) { data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1); - HFMMeshPart& part = data.extracted.mesh.parts.back(); // Figure out if this is the older way of defining the per-part material for baked FBX if (dracoMeshNodeVersion < 2) {