Merge branch 'instancing' of github.com:highfidelity/hifi into instancing_gltf

This commit is contained in:
sabrina-shanman 2019-10-03 16:49:31 -07:00
commit 30680e027b
5 changed files with 276 additions and 285 deletions

View file

@ -145,40 +145,19 @@ public:
bool isLimbNode; // is this FBXModel transform is a "LimbNode" i.e. a joint
};
glm::mat4 getGlobalTransform(const QMultiMap<QString, QString>& _connectionParentMap,
const QHash<QString, FBXModel>& fbxModels, QString nodeID, bool mixamoHack, const QString& url) {
glm::mat4 globalTransform;
QVector<QString> visitedNodes; // Used to prevent following a cycle
while (!nodeID.isNull()) {
visitedNodes.append(nodeID); // Append each node we visit
const FBXModel& fbxModel = fbxModels.value(nodeID);
globalTransform = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(fbxModel.preRotation *
fbxModel.rotation * fbxModel.postRotation) * fbxModel.postTransform * globalTransform;
if (fbxModel.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation);
globalTransform = globalTransform * geometricOffset;
}
if (mixamoHack) {
// there's something weird about the models from Mixamo Fuse; they don't skin right with the full transform
return globalTransform;
}
QList<QString> parentIDs = _connectionParentMap.values(nodeID);
nodeID = QString();
foreach (const QString& parentID, parentIDs) {
if (visitedNodes.contains(parentID)) {
qCWarning(modelformat) << "Ignoring loop detected in FBX connection map for" << url;
continue;
}
std::vector<QString> getModelIDsForMeshID(const QString& meshID, const QHash<QString, FBXModel>& fbxModels, const QMultiMap<QString, QString>& _connectionParentMap) {
std::vector<QString> modelsForMesh;
if (fbxModels.contains(meshID)) {
modelsForMesh.push_back(meshID);
} else {
// This mesh may have more than one parent model, with different material and transform, representing a different instance of the mesh
for (const auto& parentID : _connectionParentMap.values(meshID)) {
if (fbxModels.contains(parentID)) {
nodeID = parentID;
break;
modelsForMesh.push_back(parentID);
}
}
}
return globalTransform;
return modelsForMesh;
}
class ExtractedBlendshape {
@ -404,7 +383,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
QVector<ExtractedBlendshape> blendshapes;
QHash<QString, FBXModel> fbxModels;
QHash<QString, Cluster> clusters;
QHash<QString, Cluster> fbxClusters;
QHash<QString, AnimationCurve> animationCurves;
QHash<QString, QString> typeFlags;
@ -1058,9 +1037,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
}
// skip empty clusters
// skip empty fbxClusters
if (cluster.indices.size() > 0 && cluster.weights.size() > 0) {
clusters.insert(getID(object.properties), cluster);
fbxClusters.insert(getID(object.properties), cluster);
}
} else if (object.properties.last() == "BlendShapeChannel") {
@ -1233,13 +1212,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
QVector<QString> modelIDs;
QSet<QString> remainingFBXModels;
for (QHash<QString, FBXModel>::const_iterator fbxModel = fbxModels.constBegin(); fbxModel != fbxModels.constEnd(); fbxModel++) {
// models with clusters must be parented to the cluster top
// models with fbxClusters must be parented to the cluster top
// Unless the model is a root node.
bool isARootNode = !modelIDs.contains(_connectionParentMap.value(fbxModel.key()));
if (!isARootNode) {
foreach(const QString& deformerID, _connectionChildMap.values(fbxModel.key())) {
foreach(const QString& clusterID, _connectionChildMap.values(deformerID)) {
if (!clusters.contains(clusterID)) {
if (!fbxClusters.contains(clusterID)) {
continue;
}
QString topID = getTopModelID(_connectionParentMap, fbxModels, _connectionChildMap.value(clusterID), url);
@ -1283,8 +1262,10 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
// convert the models to joints
hfmModel.hasSkeletonJoints = false;
bool needMixamoHack = hfmModel.applicationName == "mixamo.com";
foreach (const QString& modelID, modelIDs) {
for (const QString& modelID : modelIDs) {
const FBXModel& fbxModel = fbxModels[modelID];
HFMJoint joint;
joint.parentIndex = fbxModel.parentIndex;
@ -1357,12 +1338,30 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
}
// Now that we've initialized the joint, we can define the transform
// modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate
joint.globalTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation * joint.postRotation) * joint.postTransform;
if (joint.parentIndex != -1 && joint.parentIndex < (int)jointIndex && !needMixamoHack) {
hfm::Joint& parentJoint = hfmModel.joints[joint.parentIndex];
joint.globalTransform = joint.globalTransform * parentJoint.globalTransform;
if (parentJoint.hasGeometricOffset) {
// Per the FBX standard, geometric offset should not propagate to children.
// However, we must be careful when modifying the behavior of FBXSerializer.
// So, we leave this here, as a breakpoint for debugging, or stub for implementation.
// qCDebug(modelformat) << "Geometric offset encountered on non-leaf node. jointIndex: " << jointIndex << ", modelURL: " << url;
// glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(parentJoint.geometricScaling, parentJoint.geometricRotation, parentJoint.geometricTranslation);
// globalTransform = globalTransform * glm::inverse(geometricOffset);
}
}
if (joint.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation);
joint.globalTransform = joint.globalTransform * geometricOffset;
}
hfmModel.joints.push_back(joint);
}
// NOTE: shapeVertices are in joint-frame
hfmModel.shapeVertices.resize(std::max((size_t)1, hfmModel.joints.size()) );
hfmModel.bindExtents.reset();
hfmModel.meshExtents.reset();
@ -1401,235 +1400,222 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
#endif
std::unordered_map<std::string, uint32_t> materialNameToID;
for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) {
materialNameToID[materialIt.key().toStdString()] = (uint32_t)hfmModel.materials.size();
hfmModel.materials.push_back(materialIt.value());
}
// see if any materials have texture children
bool materialsHaveTextures = checkMaterialsHaveTextures(_hfmMaterials, _textureFilenames, _connectionChildMap);
// Note that the transforms in the TransformNodes are initially in world-space, and need to be converted to parent-space
std::vector<hfm::TransformNode> transformNodes;
for (QMap<QString, ExtractedMesh>::iterator it = meshes.begin(); it != meshes.end(); it++) {
ExtractedMesh& extracted = it.value();
const QString& meshID = it.key();
const ExtractedMesh& extracted = it.value();
const auto& partMaterialTextures = extracted.partMaterialTextures;
extracted.mesh.meshExtents.reset();
// accumulate local transforms
QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key());
glm::mat4 modelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url);
// compute the mesh extents from the transformed vertices
foreach (const glm::vec3& vertex, extracted.mesh.vertices) {
glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f));
hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex);
hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex);
extracted.mesh.meshExtents.minimum = glm::min(extracted.mesh.meshExtents.minimum, transformedVertex);
extracted.mesh.meshExtents.maximum = glm::max(extracted.mesh.meshExtents.maximum, transformedVertex);
extracted.mesh.modelTransform = modelTransform;
}
// look for textures, material properties
// allocate the Part material library
// NOTE: extracted.partMaterialTextures is empty for FBX_DRACO_MESH_VERSION >= 2. In that case, the mesh part's materialID string is already defined.
int materialIndex = 0;
int textureIndex = 0;
QList<QString> children = _connectionChildMap.values(modelID);
for (int i = children.size() - 1; i >= 0; i--) {
const QString& childID = children.at(i);
if (_hfmMaterials.contains(childID)) {
// the pure material associated with this part
HFMMaterial material = _hfmMaterials.value(childID);
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
if (extracted.partMaterialTextures.at(j).first == materialIndex) {
HFMMeshPart& part = extracted.mesh.parts[j];
part.materialID = material.materialID;
}
}
materialIndex++;
} else if (_textureFilenames.contains(childID)) {
// NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale")
// I'm leaving the second parameter blank right now as this code may never be used.
HFMTexture texture = getTexture(childID, "");
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
int partTexture = extracted.partMaterialTextures.at(j).second;
if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) {
// TODO: DO something here that replaces this legacy code
// Maybe create a material just for this part with the correct textures?
// extracted.mesh.parts[j].diffuseTexture = texture;
}
}
textureIndex++;
}
}
// find the clusters with which the mesh is associated
QVector<QString> clusterIDs;
foreach (const QString& childID, _connectionChildMap.values(it.key())) {
foreach (const QString& clusterID, _connectionChildMap.values(childID)) {
if (!clusters.contains(clusterID)) {
continue;
}
HFMCluster hfmCluster;
const Cluster& cluster = clusters[clusterID];
clusterIDs.append(clusterID);
// see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion
// of skinning information in FBX
QString jointID = _connectionChildMap.value(clusterID);
hfmCluster.jointIndex = modelIDs.indexOf(jointID);
if (hfmCluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) {
qCDebug(modelformat) << "Joint not in model list: " << jointID;
hfmCluster.jointIndex = 0;
}
hfmCluster.inverseBindMatrix = glm::inverse(cluster.transformLink) * modelTransform;
// slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and
// sometimes floating point fuzz can be introduced after the inverse.
hfmCluster.inverseBindMatrix[0][3] = 0.0f;
hfmCluster.inverseBindMatrix[1][3] = 0.0f;
hfmCluster.inverseBindMatrix[2][3] = 0.0f;
hfmCluster.inverseBindMatrix[3][3] = 1.0f;
hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix);
extracted.mesh.clusters.append(hfmCluster);
// override the bind rotation with the transform link
HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex];
joint.inverseBindRotation = glm::inverse(extractRotation(cluster.transformLink));
joint.bindTransform = cluster.transformLink;
joint.bindTransformFoundInCluster = true;
// update the bind pose extents
glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform);
hfmModel.bindExtents.addPoint(bindTranslation);
}
}
// the last cluster is the root cluster
{
HFMCluster cluster;
cluster.jointIndex = modelIDs.indexOf(modelID);
if (cluster.jointIndex == hfm::Cluster::INVALID_JOINT_INDEX) {
qCDebug(modelformat) << "Model not in model list: " << modelID;
cluster.jointIndex = 0;
}
extracted.mesh.clusters.append(cluster);
}
// whether we're skinned depends on how many clusters are attached
if (clusterIDs.size() > 1) {
// this is a multi-mesh joint
const int WEIGHTS_PER_VERTEX = 4;
int numClusterIndices = extracted.mesh.vertices.size() * WEIGHTS_PER_VERTEX;
extracted.mesh.clusterIndices.fill(extracted.mesh.clusters.size() - 1, numClusterIndices);
QVector<float> weightAccumulators;
weightAccumulators.fill(0.0f, numClusterIndices);
for (int i = 0; i < clusterIDs.size(); i++) {
QString clusterID = clusterIDs.at(i);
const Cluster& cluster = clusters[clusterID];
const HFMCluster& hfmCluster = extracted.mesh.clusters.at(i);
int jointIndex = hfmCluster.jointIndex;
HFMJoint& joint = hfmModel.joints[jointIndex];
glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform;
ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex);
for (int j = 0; j < cluster.indices.size(); j++) {
int oldIndex = cluster.indices.at(j);
float weight = cluster.weights.at(j);
for (QMultiHash<int, int>::const_iterator it = extracted.newIndices.constFind(oldIndex);
it != extracted.newIndices.end() && it.key() == oldIndex; it++) {
int newIndex = it.value();
// remember vertices with at least 1/4 weight
// FIXME: vertices with no weightpainting won't get recorded here
const float EXPANSION_WEIGHT_THRESHOLD = 0.25f;
if (weight >= EXPANSION_WEIGHT_THRESHOLD) {
// transform to joint-frame and save for later
const glm::mat4 vertexTransform = meshToJoint * glm::translate(extracted.mesh.vertices.at(newIndex));
points.push_back(extractTranslation(vertexTransform));
}
// look for an unused slot in the weights vector
int weightIndex = newIndex * WEIGHTS_PER_VERTEX;
int lowestIndex = -1;
float lowestWeight = FLT_MAX;
int k = 0;
for (; k < WEIGHTS_PER_VERTEX; k++) {
if (weightAccumulators[weightIndex + k] == 0.0f) {
extracted.mesh.clusterIndices[weightIndex + k] = i;
weightAccumulators[weightIndex + k] = weight;
break;
}
if (weightAccumulators[weightIndex + k] < lowestWeight) {
lowestIndex = k;
lowestWeight = weightAccumulators[weightIndex + k];
}
}
if (k == WEIGHTS_PER_VERTEX && weight > lowestWeight) {
// no space for an additional weight; we must replace the lowest
weightAccumulators[weightIndex + lowestIndex] = weight;
extracted.mesh.clusterIndices[weightIndex + lowestIndex] = i;
}
}
}
}
// now that we've accumulated the most relevant weights for each vertex
// normalize and compress to 16-bits
extracted.mesh.clusterWeights.fill(0, numClusterIndices);
int numVertices = extracted.mesh.vertices.size();
for (int i = 0; i < numVertices; ++i) {
int j = i * WEIGHTS_PER_VERTEX;
// normalize weights into uint16_t
float totalWeight = 0.0f;
for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) {
totalWeight += weightAccumulators[k];
}
const float ALMOST_HALF = 0.499f;
if (totalWeight > 0.0f) {
float weightScalingFactor = (float)(UINT16_MAX) / totalWeight;
for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) {
extracted.mesh.clusterWeights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF);
}
} else {
extracted.mesh.clusterWeights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF);
}
}
} else {
// this is a single-joint mesh
const HFMCluster& firstHFMCluster = extracted.mesh.clusters.at(0);
int jointIndex = firstHFMCluster.jointIndex;
HFMJoint& joint = hfmModel.joints[jointIndex];
// transform cluster vertices to joint-frame and save for later
glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform;
ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex);
for (const glm::vec3& vertex : extracted.mesh.vertices) {
const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertex);
points.push_back(extractTranslation(vertexTransform));
}
// Apply geometric offset, if present, by transforming the vertices directly
if (joint.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation);
for (int i = 0; i < extracted.mesh.vertices.size(); i++) {
extracted.mesh.vertices[i] = transformPoint(geometricOffset, extracted.mesh.vertices[i]);
}
}
}
hfmModel.meshes.push_back(extracted.mesh);
uint32_t meshIndex = (uint32_t)hfmModel.meshes.size() - 1;
uint32_t meshIndex = (uint32_t)hfmModel.meshes.size();
meshIDsToMeshIndices.insert(it.key(), meshIndex);
hfmModel.meshes.push_back(extracted.mesh);
hfm::Mesh& mesh = hfmModel.meshes.back();
std::vector<QString> instanceModelIDs = getModelIDsForMeshID(meshID, fbxModels, _connectionParentMap);
// meshShapes will be added to hfmModel at the very end
std::vector<hfm::Shape> meshShapes;
meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size());
for (const QString& modelID : instanceModelIDs) {
// The transform node has the same indexing order as the joints
const uint32_t transformIndex = (uint32_t)modelIDs.indexOf(modelID);
// accumulate local transforms
glm::mat4 globalTransform = hfmModel.joints[transformIndex].globalTransform;
// compute the mesh extents from the transformed vertices
for (const glm::vec3& vertex : mesh.vertices) {
glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f));
hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex);
hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex);
}
// partShapes will be added to meshShapes at the very end
std::vector<hfm::Shape> partShapes { mesh.parts.size() };
for (uint32_t i = 0; i < (uint32_t)partShapes.size(); ++i) {
hfm::Shape& shape = partShapes[i];
shape.mesh = meshIndex;
shape.meshPart = i;
shape.transform = transformIndex;
shape.transformedExtents.reset();
// compute the shape extents from the transformed vertices
for (const glm::vec3& vertex : mesh.vertices) {
glm::vec3 transformedVertex = glm::vec3(globalTransform * glm::vec4(vertex, 1.0f));
shape.transformedExtents.minimum = glm::min(shape.transformedExtents.minimum, transformedVertex);
shape.transformedExtents.maximum = glm::max(shape.transformedExtents.maximum, transformedVertex);
}
}
// For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures
if (!partMaterialTextures.empty()) {
int materialIndex = 0;
int textureIndex = 0;
QList<QString> children = _connectionChildMap.values(modelID);
for (int i = children.size() - 1; i >= 0; i--) {
const QString& childID = children.at(i);
if (_hfmMaterials.contains(childID)) {
// the pure material associated with this part
const HFMMaterial& material = _hfmMaterials.value(childID);
for (int j = 0; j < partMaterialTextures.size(); j++) {
if (partMaterialTextures.at(j).first == materialIndex) {
hfm::Shape& shape = partShapes[j];
shape.material = materialNameToID[material.materialID.toStdString()];
}
}
materialIndex++;
} else if (_textureFilenames.contains(childID)) {
// NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale")
// I'm leaving the second parameter blank right now as this code may never be used.
HFMTexture texture = getTexture(childID, "");
for (int j = 0; j < partMaterialTextures.size(); j++) {
int partTexture = partMaterialTextures.at(j).second;
if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) {
// TODO: DO something here that replaces this legacy code
// Maybe create a material just for this part with the correct textures?
// material.albedoTexture = texture;
// partShapes[j].material = materialIndex;
}
}
textureIndex++;
}
}
}
// For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart
if (!extracted.materialIDPerMeshPart.empty()) {
for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) {
hfm::Shape& shape = partShapes[i];
const std::string& materialID = extracted.materialIDPerMeshPart[i];
auto materialIt = materialNameToID.find(materialID);
if (materialIt != materialNameToID.end()) {
shape.material = materialIt->second;
}
}
}
// find the clusters with which the mesh is associated
QVector<QString> clusterIDs;
for (const QString& childID : _connectionChildMap.values(meshID)) {
for (const QString& clusterID : _connectionChildMap.values(childID)) {
if (!fbxClusters.contains(clusterID)) {
continue;
}
clusterIDs.append(clusterID);
}
}
auto rootJointIndex = modelIDs.indexOf(modelID);
if (rootJointIndex == -1) {
qCDebug(modelformat) << "Model not in model list: " << modelID;
rootJointIndex = 0;
}
// whether we're skinned depends on how many clusters are attached
if (clusterIDs.size() > 1) {
hfm::DynamicTransform dynamicTransform;
auto& clusters = dynamicTransform.clusters;
std::vector<hfm::Deformer> deformers;
for (const auto& clusterID : clusterIDs) {
HFMCluster hfmCluster;
const Cluster& fbxCluster = fbxClusters[clusterID];
// see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion
// of skinning information in FBX
QString jointID = _connectionChildMap.value(clusterID);
hfmCluster.jointIndex = modelIDs.indexOf(jointID);
if (hfmCluster.jointIndex == -1) {
qCDebug(modelformat) << "Joint not in model list: " << jointID;
hfmCluster.jointIndex = 0;
}
hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * globalTransform;
// slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and
// sometimes floating point fuzz can be introduced after the inverse.
hfmCluster.inverseBindMatrix[0][3] = 0.0f;
hfmCluster.inverseBindMatrix[1][3] = 0.0f;
hfmCluster.inverseBindMatrix[2][3] = 0.0f;
hfmCluster.inverseBindMatrix[3][3] = 1.0f;
hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix);
clusters.push_back(hfmCluster);
// override the bind rotation with the transform link
HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex];
joint.inverseBindRotation = glm::inverse(extractRotation(fbxCluster.transformLink));
joint.bindTransform = fbxCluster.transformLink;
joint.bindTransformFoundInCluster = true;
// update the bind pose extents
glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform);
hfmModel.bindExtents.addPoint(bindTranslation);
}
// the last cluster is the root cluster
HFMCluster cluster;
cluster.jointIndex = rootJointIndex;
clusters.push_back(cluster);
// Skinned mesh instances have a dynamic transform
dynamicTransform.deformers.reserve(clusterIDs.size());
clusters.reserve(clusterIDs.size());
for (const auto& clusterID : clusterIDs) {
const Cluster& fbxCluster = fbxClusters[clusterID];
dynamicTransform.deformers.emplace_back();
deformers.emplace_back();
hfm::Deformer& deformer = deformers.back();
size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size());
deformer.indices.reserve(indexWeightPairs);
deformer.weights.reserve(indexWeightPairs);
for (int i = 0; i < (int)indexWeightPairs; i++) {
int oldIndex = fbxCluster.indices[i];
uint32_t newIndex = (uint32_t)extracted.newIndices.value(oldIndex);
deformer.indices.push_back(newIndex);
deformer.indices.push_back((float)fbxCluster.weights[i]);
}
}
// Store this model's deformers, this dynamic transform's deformer IDs
uint32_t deformerMinID = (uint32_t)hfmModel.deformers.size();
hfmModel.deformers.insert(hfmModel.deformers.end(), deformers.cbegin(), deformers.cend());
dynamicTransform.deformers.resize(deformers.size());
std::iota(dynamicTransform.deformers.begin(), dynamicTransform.deformers.end(), deformerMinID);
// Store the model's dynamic transform, and put its ID in the shapes
hfmModel.dynamicTransforms.push_back(dynamicTransform);
uint32_t dynamicTransformID = (uint32_t)(hfmModel.dynamicTransforms.size() - 1);
for (hfm::Shape& shape : partShapes) {
shape.dynamicTransform = dynamicTransformID;
}
} else {
// this is a single-joint mesh
HFMJoint& joint = hfmModel.joints[rootJointIndex];
// Apply geometric offset, if present, by transforming the vertices directly
if (joint.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation);
for (int i = 0; i < mesh.vertices.size(); i++) {
mesh.vertices[i] = transformPoint(geometricOffset, mesh.vertices[i]);
}
}
}
// Store the parts for this mesh (or instance of this mesh, as the case may be)
meshShapes.insert(meshShapes.cend(), partShapes.cbegin(), partShapes.cend());
}
// Store the shapes for the mesh (or multiple instances of the mesh, as the case may be)
hfmModel.shapes.insert(hfmModel.shapes.cend(), meshShapes.cbegin(), meshShapes.cend());
}
// attempt to map any meshes to a named model
@ -1651,9 +1637,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
if (applyUpAxisZRotation) {
hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation));
hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation));
for (auto &mesh : hfmModelPtr->meshes) {
mesh.modelTransform *= glm::mat4_cast(upAxisZRotation);
mesh.meshExtents.transform(glm::mat4_cast(upAxisZRotation));
for (auto& shape : hfmModelPtr->shapes) {
shape.transformedExtents.transform(glm::mat4_cast(upAxisZRotation));
}
for (auto& joint : hfmModelPtr->joints) {
joint.globalTransform = joint.globalTransform * glm::mat4_cast(upAxisZRotation);
}
}
return hfmModelPtr;

View file

@ -100,7 +100,15 @@ public:
{}
};
class ExtractedMesh;
class ExtractedMesh {
public:
hfm::Mesh mesh;
std::vector<std::string> materialIDPerMeshPart;
QMultiHash<int, int> newIndices;
QVector<QHash<int, int> > blendshapeIndexMaps;
QVector<QPair<int, int> > partMaterialTextures;
QHash<QString, size_t> texcoordSetMap;
};
class FBXSerializer : public HFMSerializer {
public:

View file

@ -355,7 +355,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
// Check for additional metadata
unsigned int dracoMeshNodeVersion = 1;
std::vector<QString> dracoMaterialList;
std::vector<std::string> dracoMaterialList;
for (const auto& dracoChild : child.children) {
if (dracoChild.name == "FBXDracoMeshVersion") {
if (!dracoChild.properties.isEmpty()) {
@ -364,11 +364,16 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
} else if (dracoChild.name == "MaterialList") {
dracoMaterialList.reserve(dracoChild.properties.size());
for (const auto& materialID : dracoChild.properties) {
dracoMaterialList.push_back(materialID.toString());
dracoMaterialList.push_back(materialID.toString().toStdString());
}
}
}
if (dracoMeshNodeVersion >= 2) {
// Define the materialIDs now
data.extracted.materialIDPerMeshPart = dracoMaterialList;
}
// load the draco mesh from the FBX and create a draco::Mesh
draco::Decoder decoder;
draco::DecoderBuffer decodedBuffer;
@ -487,18 +492,13 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
int& partIndexPlusOne = materialTextureParts[materialTexture];
if (partIndexPlusOne == 0) {
data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1);
HFMMeshPart& part = data.extracted.mesh.parts.back();
// Figure out what material this part is
if (dracoMeshNodeVersion >= 2) {
// Define the materialID now
if (materialID < dracoMaterialList.size()) {
part.materialID = dracoMaterialList[materialID];
}
} else {
// Figure out if this is the older way of defining the per-part material for baked FBX
if (dracoMeshNodeVersion < 2) {
// Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap
data.extracted.partMaterialTextures.append(materialTexture);
}
// in dracoMeshNodeVersion >= 2, fbx meshes have their per-part materials already defined in data.extracted.materialIDPerMeshPart
partIndexPlusOne = (int)data.extracted.mesh.parts.size();
}

View file

@ -118,6 +118,9 @@ public:
glm::vec3 geometricTranslation;
glm::quat geometricRotation;
glm::vec3 geometricScaling;
// globalTransform is the transform of the joint with all parent transforms applied, plus the geometric offset
glm::mat4 globalTransform;
};
@ -245,7 +248,7 @@ public:
QVector<Cluster> clusters; // DEPRECATED (see hfm::Shape::dynamicTransform, hfm::DynamicTransform::clusters)
Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents)
glm::mat4 modelTransform; // DEPRECATED (see hfm::Shape::transform, hfm::TransformNode, hfm::Model::transforms)
glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints)
QVector<Blendshape> blendshapes;
@ -289,6 +292,7 @@ public:
bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; }
};
// DEPRECATED in favor of using hfm::Joint
class TransformNode {
public:
static const uint32_t INVALID_PARENT_INDEX{ (uint32_t)-1 };
@ -317,9 +321,9 @@ public:
uint32_t mesh { UNDEFINED_KEY };
uint32_t meshPart { UNDEFINED_KEY };
uint32_t material { UNDEFINED_KEY };
uint32_t transform { UNDEFINED_KEY }; // The static transform node when not taking into account rigging/skinning
uint32_t transform { UNDEFINED_KEY }; // The hfm::Joint associated with this shape, containing transform information
// TODO: Have all serializers calculate hfm::Shape::transformedExtents in world space where they previously calculated hfm::Mesh::meshExtents. Change all code that uses hfm::Mesh::meshExtents to use this instead.
Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after the transform node and parent transform nodes are applied, while not taking into account rigging/skinning
Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after transform information is applied, while not taking into account rigging/skinning
uint32_t dynamicTransform { UNDEFINED_KEY };
};
@ -382,15 +386,6 @@ public:
};
class ExtractedMesh {
public:
hfm::Mesh mesh;
QMultiHash<int, int> newIndices;
QVector<QHash<int, int> > blendshapeIndexMaps;
QVector<QPair<int, int> > partMaterialTextures;
QHash<QString, size_t> texcoordSetMap;
};
typedef hfm::Blendshape HFMBlendshape;
typedef hfm::JointShapeInfo HFMJointShapeInfo;
typedef hfm::Joint HFMJoint;

View file

@ -33,7 +33,7 @@ void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, con
const auto& reweightedDeformers = input.get4();
auto& shapeVerticesPerJoint = output;
shapeVerticesPerJoint.reserve(joints.size());
shapeVerticesPerJoint.resize(joints.size());
std::vector<std::vector<VertexSource>> vertexSourcesPerJoint;
vertexSourcesPerJoint.resize(joints.size());
for (size_t i = 0; i < shapes.size(); ++i) {