Update FBXSerializer to reference shapes, support instancing (deformers

WIP)
This commit is contained in:
sabrina-shanman 2019-09-12 10:58:52 -07:00
parent bac22c69c1
commit ff5fef9c3a
3 changed files with 276 additions and 262 deletions

View file

@ -145,40 +145,19 @@ public:
bool isLimbNode; // is this FBXModel transform is a "LimbNode" i.e. a joint
};
glm::mat4 getGlobalTransform(const QMultiMap<QString, QString>& _connectionParentMap,
const QHash<QString, FBXModel>& fbxModels, QString nodeID, bool mixamoHack, const QString& url) {
glm::mat4 globalTransform;
QVector<QString> visitedNodes; // Used to prevent following a cycle
while (!nodeID.isNull()) {
visitedNodes.append(nodeID); // Append each node we visit
const FBXModel& fbxModel = fbxModels.value(nodeID);
globalTransform = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(fbxModel.preRotation *
fbxModel.rotation * fbxModel.postRotation) * fbxModel.postTransform * globalTransform;
if (fbxModel.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation);
globalTransform = globalTransform * geometricOffset;
}
if (mixamoHack) {
// there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform
return globalTransform;
}
QList<QString> parentIDs = _connectionParentMap.values(nodeID);
nodeID = QString();
foreach (const QString& parentID, parentIDs) {
if (visitedNodes.contains(parentID)) {
qCWarning(modelformat) << "Ignoring loop detected in FBX connection map for" << url;
continue;
}
std::vector<QString> getModelIDsForMeshID(const QString& meshID, const QHash<QString, FBXModel>& fbxModels, const QMultiMap<QString, QString>& _connectionParentMap) {
std::vector<QString> modelsForMesh;
if (fbxModels.contains(meshID)) {
modelsForMesh.push_back(meshID);
} else {
// This mesh may have more than one parent model, with different material and transform, representing a different instance of the mesh
for (const auto& parentID : _connectionParentMap.values(meshID)) {
if (fbxModels.contains(parentID)) {
nodeID = parentID;
break;
modelsForMesh.push_back(parentID);
}
}
}
return globalTransform;
return modelsForMesh;
}
class ExtractedBlendshape {
@ -404,7 +383,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
QVector<ExtractedBlendshape> blendshapes;
QHash<QString, FBXModel> fbxModels;
QHash<QString, Cluster> clusters;
QHash<QString, Cluster> fbxClusters;
QHash<QString, AnimationCurve> animationCurves;
QHash<QString, QString> typeFlags;
@ -1058,9 +1037,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
}
// skip empty clusters
// skip empty fbxClusters
if (cluster.indices.size() > 0 && cluster.weights.size() > 0) {
clusters.insert(getID(object.properties), cluster);
fbxClusters.insert(getID(object.properties), cluster);
}
} else if (object.properties.last() == "BlendShapeChannel") {
@ -1233,13 +1212,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
QVector<QString> modelIDs;
QSet<QString> remainingFBXModels;
for (QHash<QString, FBXModel>::const_iterator fbxModel = fbxModels.constBegin(); fbxModel != fbxModels.constEnd(); fbxModel++) {
// models with clusters must be parented to the cluster top
// models with fbxClusters must be parented to the cluster top
// Unless the model is a root node.
bool isARootNode = !modelIDs.contains(_connectionParentMap.value(fbxModel.key()));
if (!isARootNode) {
foreach(const QString& deformerID, _connectionChildMap.values(fbxModel.key())) {
foreach(const QString& clusterID, _connectionChildMap.values(deformerID)) {
if (!clusters.contains(clusterID)) {
if (!fbxClusters.contains(clusterID)) {
continue;
}
QString topID = getTopModelID(_connectionParentMap, fbxModels, _connectionChildMap.value(clusterID), url);
@ -1283,8 +1262,15 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
// convert the models to joints
hfmModel.hasSkeletonJoints = false;
// Note that these transform nodes are initially defined in world space
bool needMixamoHack = hfmModel.applicationName == "mixamo.com";
hfmModel.transforms.reserve(modelIDs.size());
std::vector<glm::mat4> globalTransforms;
globalTransforms.reserve(modelIDs.size());
foreach (const QString& modelID, modelIDs) {
int jointIndex = 0;
for (const QString& modelID : modelIDs) {
const FBXModel& fbxModel = fbxModels[modelID];
HFMJoint joint;
joint.parentIndex = fbxModel.parentIndex;
@ -1358,6 +1344,42 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
hfmModel.joints.push_back(joint);
// Now that we've initialized the joint, we can define the transform
// modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate
glm::mat4 localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform;
if (joint.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation);
localTransform = localTransform * geometricOffset;
}
glm::mat4 globalTransform;
if (joint.parentIndex != -1 && joint.parentIndex < jointIndex) {
hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex];
glm::mat4& parentGlobalTransform = globalTransforms[joint.parentIndex];
if (needMixamoHack) {
// there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform
globalTransform = localTransform;
localTransform = globalTransform * glm::inverse(parentGlobalTransform);
} else {
if (parentJoint.hasGeometricOffset) {
// Per the FBX standard, geometric offsets should not propagate to children
glm::mat4 parentGeometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation);
globalTransform = localTransform * parentGlobalTransform * glm::inverse(parentGeometricOffset);
localTransform = globalTransform * glm::inverse(parentGlobalTransform);
} else {
globalTransform = localTransform * parentGlobalTransform;
}
}
} else {
globalTransform = localTransform;
}
hfm::TransformNode transformNode;
transformNode.parent = joint.parentIndex == -1 ? hfm::UNDEFINED_KEY : joint.parentIndex;
transformNode.transform = Transform(localTransform);
globalTransforms.push_back(globalTransform);
hfmModel.transforms.push_back(transformNode);
++jointIndex;
}
// NOTE: shapeVertices are in joint-frame
@ -1401,235 +1423,222 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
#endif
std::unordered_map<std::string, uint32_t> materialNameToID;
for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) {
materialNameToID[materialIt.key().toStdString()] = hfmModel.materials.size();
hfmModel.materials.push_back(materialIt.value());
}
// see if any materials have texture children
bool materialsHaveTextures = checkMaterialsHaveTextures(_hfmMaterials, _textureFilenames, _connectionChildMap);
// Note that the transforms in the TransformNodes are initially in world-space, and need to be converted to parent-space
std::vector<hfm::TransformNode> transformNodes;
for (QMap<QString, ExtractedMesh>::iterator it = meshes.begin(); it != meshes.end(); it++) {
ExtractedMesh& extracted = it.value();
const QString& meshID = it.key();
const ExtractedMesh& extracted = it.value();
const auto& partMaterialTextures = extracted.partMaterialTextures;
const auto& newIndices = extracted.newIndices;
extracted.mesh.meshExtents.reset();
// accumulate local transforms
QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key());
glm::mat4 modelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url);
// compute the mesh extents from the transformed vertices
foreach (const glm::vec3& vertex, extracted.mesh.vertices) {
glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f));
hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex);
hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex);
extracted.mesh.meshExtents.minimum = glm::min(extracted.mesh.meshExtents.minimum, transformedVertex);
extracted.mesh.meshExtents.maximum = glm::max(extracted.mesh.meshExtents.maximum, transformedVertex);
extracted.mesh.modelTransform = modelTransform;
}
// look for textures, material properties
// allocate the Part material library
// NOTE: extracted.partMaterialTextures is empty for FBX_DRACO_MESH_VERSION >= 2. In that case, the mesh part's materialID string is already defined.
int materialIndex = 0;
int textureIndex = 0;
QList<QString> children = _connectionChildMap.values(modelID);
for (int i = children.size() - 1; i >= 0; i--) {
const QString& childID = children.at(i);
if (_hfmMaterials.contains(childID)) {
// the pure material associated with this part
HFMMaterial material = _hfmMaterials.value(childID);
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
if (extracted.partMaterialTextures.at(j).first == materialIndex) {
HFMMeshPart& part = extracted.mesh.parts[j];
part.materialID = material.materialID;
}
}
materialIndex++;
} else if (_textureFilenames.contains(childID)) {
// NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale")
// I'm leaving the second parameter blank right now as this code may never be used.
HFMTexture texture = getTexture(childID, "");
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
int partTexture = extracted.partMaterialTextures.at(j).second;
if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) {
// TODO: DO something here that replaces this legacy code
// Maybe create a material just for this part with the correct textures?
// extracted.mesh.parts[j].diffuseTexture = texture;
}
}
textureIndex++;
}
}
// find the clusters with which the mesh is associated
QVector<QString> clusterIDs;
foreach (const QString& childID, _connectionChildMap.values(it.key())) {
foreach (const QString& clusterID, _connectionChildMap.values(childID)) {
if (!clusters.contains(clusterID)) {
continue;
}
HFMCluster hfmCluster;
const Cluster& cluster = clusters[clusterID];
clusterIDs.append(clusterID);
// see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion
// of skinning information in FBX
QString jointID = _connectionChildMap.value(clusterID);
hfmCluster.jointIndex = modelIDs.indexOf(jointID);
if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) {
qCDebug(modelformat) << "Joint not in model list: " << jointID;
hfmCluster.jointIndex = 0;
}
hfmCluster.inverseBindMatrix = glm::inverse(cluster.transformLink) * modelTransform;
// slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and
// sometimes floating point fuzz can be introduced after the inverse.
hfmCluster.inverseBindMatrix[0][3] = 0.0f;
hfmCluster.inverseBindMatrix[1][3] = 0.0f;
hfmCluster.inverseBindMatrix[2][3] = 0.0f;
hfmCluster.inverseBindMatrix[3][3] = 1.0f;
hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix);
extracted.mesh.clusters.append(hfmCluster);
// override the bind rotation with the transform link
HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex];
joint.inverseBindRotation = glm::inverse(extractRotation(cluster.transformLink));
joint.bindTransform = cluster.transformLink;
joint.bindTransformFoundInCluster = true;
// update the bind pose extents
glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform);
hfmModel.bindExtents.addPoint(bindTranslation);
}
}
// the last cluster is the root cluster
{
HFMCluster cluster;
cluster.jointIndex = modelIDs.indexOf(modelID);
if (cluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) {
qCDebug(modelformat) << "Model not in model list: " << modelID;
cluster.jointIndex = 0;
}
extracted.mesh.clusters.append(cluster);
}
// whether we're skinned depends on how many clusters are attached
if (clusterIDs.size() > 1) {
// this is a multi-mesh joint
const int WEIGHTS_PER_VERTEX = 4;
int numClusterIndices = extracted.mesh.vertices.size() * WEIGHTS_PER_VERTEX;
extracted.mesh.clusterIndices.fill(extracted.mesh.clusters.size() - 1, numClusterIndices);
QVector<float> weightAccumulators;
weightAccumulators.fill(0.0f, numClusterIndices);
for (int i = 0; i < clusterIDs.size(); i++) {
QString clusterID = clusterIDs.at(i);
const Cluster& cluster = clusters[clusterID];
const HFMCluster& hfmCluster = extracted.mesh.clusters.at(i);
int jointIndex = hfmCluster.jointIndex;
HFMJoint& joint = hfmModel.joints[jointIndex];
glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform;
ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex);
for (int j = 0; j < cluster.indices.size(); j++) {
int oldIndex = cluster.indices.at(j);
float weight = cluster.weights.at(j);
for (QMultiHash<int, int>::const_iterator it = extracted.newIndices.constFind(oldIndex);
it != extracted.newIndices.end() && it.key() == oldIndex; it++) {
int newIndex = it.value();
// remember vertices with at least 1/4 weight
// FIXME: vertices with no weightpainting won't get recorded here
const float EXPANSION_WEIGHT_THRESHOLD = 0.25f;
if (weight >= EXPANSION_WEIGHT_THRESHOLD) {
// transform to joint-frame and save for later
const glm::mat4 vertexTransform = meshToJoint * glm::translate(extracted.mesh.vertices.at(newIndex));
points.push_back(extractTranslation(vertexTransform));
}
// look for an unused slot in the weights vector
int weightIndex = newIndex * WEIGHTS_PER_VERTEX;
int lowestIndex = -1;
float lowestWeight = FLT_MAX;
int k = 0;
for (; k < WEIGHTS_PER_VERTEX; k++) {
if (weightAccumulators[weightIndex + k] == 0.0f) {
extracted.mesh.clusterIndices[weightIndex + k] = i;
weightAccumulators[weightIndex + k] = weight;
break;
}
if (weightAccumulators[weightIndex + k] < lowestWeight) {
lowestIndex = k;
lowestWeight = weightAccumulators[weightIndex + k];
}
}
if (k == WEIGHTS_PER_VERTEX && weight > lowestWeight) {
// no space for an additional weight; we must replace the lowest
weightAccumulators[weightIndex + lowestIndex] = weight;
extracted.mesh.clusterIndices[weightIndex + lowestIndex] = i;
}
}
}
}
// now that we've accumulated the most relevant weights for each vertex
// normalize and compress to 16-bits
extracted.mesh.clusterWeights.fill(0, numClusterIndices);
int numVertices = extracted.mesh.vertices.size();
for (int i = 0; i < numVertices; ++i) {
int j = i * WEIGHTS_PER_VERTEX;
// normalize weights into uint16_t
float totalWeight = 0.0f;
for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) {
totalWeight += weightAccumulators[k];
}
const float ALMOST_HALF = 0.499f;
if (totalWeight > 0.0f) {
float weightScalingFactor = (float)(UINT16_MAX) / totalWeight;
for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) {
extracted.mesh.clusterWeights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF);
}
} else {
extracted.mesh.clusterWeights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF);
}
}
} else {
// this is a single-joint mesh
const HFMCluster& firstHFMCluster = extracted.mesh.clusters.at(0);
int jointIndex = firstHFMCluster.jointIndex;
HFMJoint& joint = hfmModel.joints[jointIndex];
// transform cluster vertices to joint-frame and save for later
glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform;
ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex);
for (const glm::vec3& vertex : extracted.mesh.vertices) {
const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertex);
points.push_back(extractTranslation(vertexTransform));
}
// Apply geometric offset, if present, by transforming the vertices directly
if (joint.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation);
for (int i = 0; i < extracted.mesh.vertices.size(); i++) {
extracted.mesh.vertices[i] = transformPoint(geometricOffset, extracted.mesh.vertices[i]);
}
}
}
hfmModel.meshes.push_back(extracted.mesh);
uint32_t meshIndex = (uint32_t)hfmModel.meshes.size() - 1;
uint32_t meshIndex = (uint32_t)hfmModel.meshes.size();
meshIDsToMeshIndices.insert(it.key(), meshIndex);
hfmModel.meshes.push_back(extracted.mesh);
hfm::Mesh& mesh = hfmModel.meshes.back();
std::vector<QString> instanceModelIDs = getModelIDsForMeshID(meshID, fbxModels, _connectionParentMap);
// meshShapes will be added to hfmModel at the very end
std::vector<hfm::Shape> meshShapes;
meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size());
for (const QString& modelID : instanceModelIDs) {
// The transform node has the same indexing order as the joints
const uint32_t transformNodeIndex = (uint32_t)modelIDs.indexOf(modelID);
// accumulate local transforms
glm::mat4 modelTransform = globalTransforms[transformNodeIndex];
// compute the mesh extents from the transformed vertices
for (const glm::vec3& vertex : mesh.vertices) {
glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f));
hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex);
hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex);
}
// partShapes will be added to meshShapes at the very end
std::vector<hfm::Shape> partShapes { mesh.parts.size() };
for (uint32_t i = 0; i < (uint32_t)partShapes.size(); ++i) {
hfm::Shape& shape = partShapes[i];
shape.mesh = meshIndex;
shape.meshPart = i;
shape.transform = transformNodeIndex;
glm::mat4 shapeGlobalTransform = globalTransforms[transformNodeIndex];
shape.transformedExtents.reset();
// compute the shape extents from the transformed vertices
for (const glm::vec3& vertex : mesh.vertices) {
glm::vec3 transformedVertex = glm::vec3(shapeGlobalTransform * glm::vec4(vertex, 1.0f));
shape.transformedExtents.minimum = glm::min(shape.transformedExtents.minimum, transformedVertex);
shape.transformedExtents.maximum = glm::max(shape.transformedExtents.maximum, transformedVertex);
}
}
// For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures
int materialIndex = 0;
int textureIndex = 0;
QList<QString> children = _connectionChildMap.values(modelID);
for (int i = children.size() - 1; i >= 0; i--) {
const QString& childID = children.at(i);
if (_hfmMaterials.contains(childID)) {
// the pure material associated with this part
const HFMMaterial& material = _hfmMaterials.value(childID);
for (int j = 0; j < partMaterialTextures.size(); j++) {
if (partMaterialTextures.at(j).first == materialIndex) {
hfm::Shape& shape = partShapes[j];
shape.material = materialNameToID[material.materialID.toStdString()];
}
}
materialIndex++;
} else if (_textureFilenames.contains(childID)) {
// NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale")
// I'm leaving the second parameter blank right now as this code may never be used.
HFMTexture texture = getTexture(childID, "");
for (int j = 0; j < partMaterialTextures.size(); j++) {
int partTexture = partMaterialTextures.at(j).second;
if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) {
// TODO: DO something here that replaces this legacy code
// Maybe create a material just for this part with the correct textures?
// material.albedoTexture = texture;
// partShapes[j].material = materialIndex;
}
}
textureIndex++;
}
}
// For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart
if (!extracted.materialIDPerMeshPart.empty()) {
for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) {
hfm::Shape& shape = partShapes[i];
const std::string& materialID = extracted.materialIDPerMeshPart[i];
auto materialIt = materialNameToID.find(materialID);
if (materialIt != materialNameToID.end()) {
shape.material = materialIt->second;
}
}
}
// find the clusters with which the mesh is associated
QVector<QString> clusterIDs;
for (const QString& childID : _connectionChildMap.values(meshID)) {
for (const QString& clusterID : _connectionChildMap.values(childID)) {
if (!fbxClusters.contains(clusterID)) {
continue;
}
clusterIDs.append(clusterID);
}
}
auto rootJointIndex = modelIDs.indexOf(modelID);
if (rootJointIndex == hfm::Cluster::INVALID_JOINT_INDEX) {
qCDebug(modelformat) << "Model not in model list: " << modelID;
rootJointIndex = 0;
}
// whether we're skinned depends on how many clusters are attached
if (clusterIDs.size() > 1) {
hfm::DynamicTransform dynamicTransform;
auto& clusters = dynamicTransform.clusters;
std::vector<hfm::Deformer> deformers;
for (const auto& clusterID : clusterIDs) {
HFMCluster hfmCluster;
const Cluster& fbxCluster = fbxClusters[clusterID];
// see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion
// of skinning information in FBX
QString jointID = _connectionChildMap.value(clusterID);
hfmCluster.jointIndex = modelIDs.indexOf(jointID);
if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) {
qCDebug(modelformat) << "Joint not in model list: " << jointID;
hfmCluster.jointIndex = 0;
}
hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * modelTransform;
// slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and
// sometimes floating point fuzz can be introduced after the inverse.
hfmCluster.inverseBindMatrix[0][3] = 0.0f;
hfmCluster.inverseBindMatrix[1][3] = 0.0f;
hfmCluster.inverseBindMatrix[2][3] = 0.0f;
hfmCluster.inverseBindMatrix[3][3] = 1.0f;
hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix);
clusters.push_back(hfmCluster);
// override the bind rotation with the transform link
HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex];
joint.inverseBindRotation = glm::inverse(extractRotation(fbxCluster.transformLink));
joint.bindTransform = fbxCluster.transformLink;
joint.bindTransformFoundInCluster = true;
// update the bind pose extents
glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform);
hfmModel.bindExtents.addPoint(bindTranslation);
// the last cluster is the root cluster
HFMCluster cluster;
cluster.jointIndex = rootJointIndex;
clusters.push_back(cluster);
}
// Skinned mesh instances have a dynamic transform
dynamicTransform.deformers.reserve(clusterIDs.size());
clusters.reserve(clusterIDs.size());
for (const auto& clusterID : clusterIDs) {
const Cluster& fbxCluster = fbxClusters[clusterID];
dynamicTransform.deformers.emplace_back();
deformers.emplace_back();
hfm::Deformer& deformer = deformers.back();
size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size());
deformer.indices.reserve(indexWeightPairs);
deformer.weights.reserve(indexWeightPairs);
for (size_t i = 0; i < indexWeightPairs; i++) {
int oldIndex = fbxCluster.indices[i];
uint32_t newIndex = (uint32_t)extracted.newIndices.value(oldIndex);
deformer.indices.push_back(newIndex);
deformer.indices.push_back((float)fbxCluster.weights[i]);
}
}
// Store this model's deformers, this dynamic transform's deformer IDs
uint32_t deformerMinID = (uint32_t)hfmModel.deformers.size();
hfmModel.deformers.insert(hfmModel.deformers.end(), deformers.cbegin(), deformers.cend());
dynamicTransform.deformers.resize(deformers.size());
std::iota(dynamicTransform.deformers.begin(), dynamicTransform.deformers.end(), deformerMinID);
// Store the model's dynamic transform, and put its ID in the shapes
hfmModel.dynamicTransforms.push_back(dynamicTransform);
uint32_t dynamicTransformID = (uint32_t)(hfmModel.dynamicTransforms.size() - 1);
for (hfm::Shape& shape : partShapes) {
shape.dynamicTransform = dynamicTransformID;
}
} else {
// this is a single-joint mesh
HFMJoint& joint = hfmModel.joints[rootJointIndex];
// Apply geometric offset, if present, by transforming the vertices directly
if (joint.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation);
for (int i = 0; i < mesh.vertices.size(); i++) {
mesh.vertices[i] = transformPoint(geometricOffset, mesh.vertices[i]);
}
}
}
// Store the parts for this mesh (or instance of this mesh, as the case may be)
meshShapes.insert(meshShapes.cend(), partShapes.cbegin(), partShapes.cend());
}
// Store the shapes for the mesh (or multiple instances of the mesh, as the case may be)
hfmModel.shapes.insert(hfmModel.shapes.cend(), meshShapes.cbegin(), meshShapes.cend());
}
// attempt to map any meshes to a named model
@ -1651,9 +1660,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
if (applyUpAxisZRotation) {
hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation));
hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation));
for (auto &mesh : hfmModelPtr->meshes) {
mesh.modelTransform *= glm::mat4_cast(upAxisZRotation);
mesh.meshExtents.transform(glm::mat4_cast(upAxisZRotation));
for (auto &shape : hfmModelPtr->shapes) {
auto transformIndex = shape.transform;
auto& transformNode = hfmModelPtr->transforms[transformIndex];
transformNode.transform.postRotate(upAxisZRotation);
shape.transformedExtents.transform(glm::mat4_cast(upAxisZRotation));
}
}
return hfmModelPtr;

View file

@ -103,6 +103,7 @@ public:
class ExtractedMesh {
public:
hfm::Mesh mesh;
std::vector<std::string> materialIDPerMeshPart;
QMultiHash<int, int> newIndices;
QVector<QHash<int, int> > blendshapeIndexMaps;
QVector<QPair<int, int> > partMaterialTextures;

View file

@ -355,7 +355,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
// Check for additional metadata
unsigned int dracoMeshNodeVersion = 1;
std::vector<QString> dracoMaterialList;
std::vector<std::string> dracoMaterialList;
for (const auto& dracoChild : child.children) {
if (dracoChild.name == "FBXDracoMeshVersion") {
if (!dracoChild.properties.isEmpty()) {
@ -364,7 +364,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
} else if (dracoChild.name == "MaterialList") {
dracoMaterialList.reserve(dracoChild.properties.size());
for (const auto& materialID : dracoChild.properties) {
dracoMaterialList.push_back(materialID.toString());
dracoMaterialList.push_back(materialID.toString().toStdString());
}
}
}
@ -467,6 +467,8 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
}
}
ExtractedMesh& extracted = data.extracted;
extracted.materialIDPerMeshPart.resize(dracoMaterialList.size());
for (uint32_t i = 0; i < dracoMesh->num_faces(); ++i) {
// grab the material ID and texture ID for this face, if we have it
auto& dracoFace = dracoMesh->face(draco::FaceIndex(i));
@ -487,13 +489,13 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
int& partIndexPlusOne = materialTextureParts[materialTexture];
if (partIndexPlusOne == 0) {
data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1);
HFMMeshPart& part = data.extracted.mesh.parts.back();
HFMMeshPart& part = extracted.mesh.parts.back();
// Figure out what material this part is
if (dracoMeshNodeVersion >= 2) {
// Define the materialID now
if (materialID < dracoMaterialList.size()) {
part.materialID = dracoMaterialList[materialID];
extracted.materialIDPerMeshPart[materialID] = dracoMaterialList[materialID];
}
} else {
// Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap