Merge pull request #16197 from highfidelity/instancing

(DEV-430) Instancing: Introduce hfm::Shape to enable Instancing support
This commit is contained in:
Sabrina Shanman 2019-11-18 16:40:10 -08:00 committed by GitHub
commit 359248829c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
68 changed files with 2927 additions and 2659 deletions

View file

@ -220,6 +220,7 @@ find_package( Threads )
add_definitions(-DGLM_FORCE_RADIANS)
add_definitions(-DGLM_ENABLE_EXPERIMENTAL)
add_definitions(-DGLM_FORCE_CTOR_INIT)
add_definitions(-DGLM_LANG_STL11_FORCED) # Workaround for GLM not detecting support for C++11 templates on Android
if (WIN32)
# Deal with fakakta Visual Studo 2017 bug

View file

@ -144,10 +144,10 @@ void ScriptableAvatar::update(float deltatime) {
}
_animationDetails.currentFrame = currentFrame;
const QVector<HFMJoint>& modelJoints = _bind->getHFMModel().joints;
const std::vector<HFMJoint>& modelJoints = _bind->getHFMModel().joints;
QStringList animationJointNames = _animation->getJointNames();
const int nJoints = modelJoints.size();
const auto nJoints = (int)modelJoints.size();
if (_jointData.size() != nJoints) {
_jointData.resize(nJoints);
}

View file

@ -80,7 +80,7 @@ QVariantHash ModelPropertiesDialog::getMapping() const {
// update the joint indices
QVariantHash jointIndices;
for (int i = 0; i < _hfmModel.joints.size(); i++) {
for (size_t i = 0; i < _hfmModel.joints.size(); i++) {
jointIndices.insert(_hfmModel.joints.at(i).name, QString::number(i));
}
mapping.insert(JOINT_INDEX_FIELD, jointIndices);

View file

@ -79,7 +79,7 @@ void AvatarDoctor::startDiagnosing() {
_missingTextureCount = 0;
_unsupportedTextureCount = 0;
const auto resource = DependencyManager::get<ModelCache>()->getGeometryResource(_avatarFSTFileUrl);
const auto resource = DependencyManager::get<ModelCache>()->getModelResource(_avatarFSTFileUrl);
resource->refresh();
const auto resourceLoaded = [this, resource](bool success) {
@ -99,12 +99,12 @@ void AvatarDoctor::startDiagnosing() {
}
// RIG
if (avatarModel.joints.isEmpty()) {
if (avatarModel.joints.empty()) {
addError("Avatar has no rig.", "no-rig");
} else {
auto jointNames = avatarModel.getJointNames();
if (avatarModel.joints.length() > NETWORKED_JOINTS_LIMIT) {
if (avatarModel.joints.size() > NETWORKED_JOINTS_LIMIT) {
addError(tr( "Avatar has over %n bones.", "", NETWORKED_JOINTS_LIMIT), "maximum-bone-limit");
}
// Avatar does not have Hips bone mapped
@ -297,7 +297,7 @@ void AvatarDoctor::startDiagnosing() {
if (resource->isLoaded()) {
resourceLoaded(!resource->isFailed());
} else {
connect(resource.data(), &GeometryResource::finished, this, resourceLoaded);
connect(resource.data(), &ModelResource::finished, this, resourceLoaded);
}
} else {
addError("Model file cannot be opened", "missing-file");

View file

@ -53,7 +53,7 @@ private:
int _materialMappingCount = 0;
int _materialMappingLoadedCount = 0;
GeometryResource::Pointer _model;
ModelResource::Pointer _model;
bool _isDiagnosing = false;
};

View file

@ -2443,7 +2443,7 @@ void MyAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
if (_fullAvatarModelName.isEmpty()) {
// Store the FST file name into preferences
const auto& mapping = _skeletonModel->getGeometry()->getMapping();
const auto& mapping = _skeletonModel->getNetworkModel()->getMapping();
if (mapping.value("name").isValid()) {
_fullAvatarModelName = mapping.value("name").toString();
}
@ -2451,7 +2451,7 @@ void MyAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
initHeadBones();
_skeletonModel->setCauterizeBoneSet(_headBoneSet);
_fstAnimGraphOverrideUrl = _skeletonModel->getGeometry()->getAnimGraphOverrideUrl();
_fstAnimGraphOverrideUrl = _skeletonModel->getNetworkModel()->getAnimGraphOverrideUrl();
initAnimGraph();
initFlowFromFST();
}

View file

@ -121,8 +121,9 @@ bool CollisionPick::isLoaded() const {
bool CollisionPick::getShapeInfoReady(const CollisionRegion& pick) {
if (_mathPick.shouldComputeShapeInfo()) {
if (_cachedResource && _cachedResource->isLoaded()) {
computeShapeInfo(pick, *_mathPick.shapeInfo, _cachedResource);
_mathPick.loaded = true;
// TODO: Model CollisionPick support
//computeShapeInfo(pick, *_mathPick.shapeInfo, _cachedResource);
//_mathPick.loaded = true;
} else {
_mathPick.loaded = false;
}
@ -134,7 +135,7 @@ bool CollisionPick::getShapeInfoReady(const CollisionRegion& pick) {
return _mathPick.loaded;
}
void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<GeometryResource> resource) {
void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<ModelResource> resource) {
ShapeType type = shapeInfo.getType();
glm::vec3 dimensions = pick.transform.getScale();
QString modelURL = (resource ? resource->getURL().toString() : "");
@ -147,241 +148,12 @@ void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick,
}
}
void CollisionPick::computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<GeometryResource> resource) {
// This code was copied and modified from RenderableModelEntityItem::computeShapeInfo
// TODO: Move to some shared code area (in entities-renderer? model-networking?)
// after we verify this is working and do a diff comparison with RenderableModelEntityItem::computeShapeInfo
// to consolidate the code.
// We may also want to make computeShapeInfo always abstract away from the gpu model mesh, like it does here.
const uint32_t TRIANGLE_STRIDE = 3;
const uint32_t QUAD_STRIDE = 4;
ShapeType type = shapeInfo.getType();
glm::vec3 dimensions = pick.transform.getScale();
if (type == SHAPE_TYPE_COMPOUND) {
// should never fall in here when collision model not fully loaded
// TODO: assert that all geometries exist and are loaded
//assert(_model && _model->isLoaded() && _compoundShapeResource && _compoundShapeResource->isLoaded());
const HFMModel& collisionModel = resource->getHFMModel();
ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection();
pointCollection.clear();
uint32_t i = 0;
// the way OBJ files get read, each section under a "g" line is its own meshPart. We only expect
// to find one actual "mesh" (with one or more meshParts in it), but we loop over the meshes, just in case.
foreach (const HFMMesh& mesh, collisionModel.meshes) {
// each meshPart is a convex hull
foreach (const HFMMeshPart &meshPart, mesh.parts) {
pointCollection.push_back(QVector<glm::vec3>());
ShapeInfo::PointList& pointsInPart = pointCollection[i];
// run through all the triangles and (uniquely) add each point to the hull
uint32_t numIndices = (uint32_t)meshPart.triangleIndices.size();
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
//assert(numIndices % TRIANGLE_STRIDE == 0);
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
for (uint32_t j = 0; j < numIndices; j += TRIANGLE_STRIDE) {
glm::vec3 p0 = mesh.vertices[meshPart.triangleIndices[j]];
glm::vec3 p1 = mesh.vertices[meshPart.triangleIndices[j + 1]];
glm::vec3 p2 = mesh.vertices[meshPart.triangleIndices[j + 2]];
if (!pointsInPart.contains(p0)) {
pointsInPart << p0;
}
if (!pointsInPart.contains(p1)) {
pointsInPart << p1;
}
if (!pointsInPart.contains(p2)) {
pointsInPart << p2;
}
}
// run through all the quads and (uniquely) add each point to the hull
numIndices = (uint32_t)meshPart.quadIndices.size();
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
//assert(numIndices % QUAD_STRIDE == 0);
numIndices -= numIndices % QUAD_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
for (uint32_t j = 0; j < numIndices; j += QUAD_STRIDE) {
glm::vec3 p0 = mesh.vertices[meshPart.quadIndices[j]];
glm::vec3 p1 = mesh.vertices[meshPart.quadIndices[j + 1]];
glm::vec3 p2 = mesh.vertices[meshPart.quadIndices[j + 2]];
glm::vec3 p3 = mesh.vertices[meshPart.quadIndices[j + 3]];
if (!pointsInPart.contains(p0)) {
pointsInPart << p0;
}
if (!pointsInPart.contains(p1)) {
pointsInPart << p1;
}
if (!pointsInPart.contains(p2)) {
pointsInPart << p2;
}
if (!pointsInPart.contains(p3)) {
pointsInPart << p3;
}
}
if (pointsInPart.size() == 0) {
qCDebug(scriptengine) << "Warning -- meshPart has no faces";
pointCollection.pop_back();
continue;
}
++i;
}
}
// We expect that the collision model will have the same units and will be displaced
// from its origin in the same way the visual model is. The visual model has
// been centered and probably scaled. We take the scaling and offset which were applied
// to the visual model and apply them to the collision model (without regard for the
// collision model's extents).
glm::vec3 scaleToFit = dimensions / resource->getHFMModel().getUnscaledMeshExtents().size();
// multiply each point by scale
for (int32_t i = 0; i < pointCollection.size(); i++) {
for (int32_t j = 0; j < pointCollection[i].size(); j++) {
// back compensate for registration so we can apply that offset to the shapeInfo later
pointCollection[i][j] = scaleToFit * pointCollection[i][j];
}
}
shapeInfo.setParams(type, dimensions, resource->getURL().toString());
} else if (type >= SHAPE_TYPE_SIMPLE_HULL && type <= SHAPE_TYPE_STATIC_MESH) {
const HFMModel& hfmModel = resource->getHFMModel();
int numHFMMeshes = hfmModel.meshes.size();
int totalNumVertices = 0;
for (int i = 0; i < numHFMMeshes; i++) {
const HFMMesh& mesh = hfmModel.meshes.at(i);
totalNumVertices += mesh.vertices.size();
}
const int32_t MAX_VERTICES_PER_STATIC_MESH = 1e6;
if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) {
qWarning() << "model" << "has too many vertices" << totalNumVertices << "and will collide as a box.";
shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions);
return;
}
auto& meshes = resource->getHFMModel().meshes;
int32_t numMeshes = (int32_t)(meshes.size());
const int MAX_ALLOWED_MESH_COUNT = 1000;
if (numMeshes > MAX_ALLOWED_MESH_COUNT) {
// too many will cause the deadlock timer to throw...
shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions);
return;
}
ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection();
pointCollection.clear();
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
pointCollection.resize(numMeshes);
} else {
pointCollection.resize(1);
}
ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices();
triangleIndices.clear();
Extents extents;
int32_t meshCount = 0;
int32_t pointListIndex = 0;
for (auto& mesh : meshes) {
if (!mesh.vertices.size()) {
continue;
}
QVector<glm::vec3> vertices = mesh.vertices;
ShapeInfo::PointList& points = pointCollection[pointListIndex];
// reserve room
int32_t sizeToReserve = (int32_t)(vertices.count());
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
// a list of points for each mesh
pointListIndex++;
} else {
// only one list of points
sizeToReserve += (int32_t)points.size();
}
points.reserve(sizeToReserve);
// copy points
const glm::vec3* vertexItr = vertices.cbegin();
while (vertexItr != vertices.cend()) {
glm::vec3 point = *vertexItr;
points.push_back(point);
extents.addPoint(point);
++vertexItr;
}
if (type == SHAPE_TYPE_STATIC_MESH) {
// copy into triangleIndices
size_t triangleIndicesCount = 0;
for (const HFMMeshPart& meshPart : mesh.parts) {
triangleIndicesCount += meshPart.triangleIndices.count();
}
triangleIndices.reserve((int)triangleIndicesCount);
for (const HFMMeshPart& meshPart : mesh.parts) {
const int* indexItr = meshPart.triangleIndices.cbegin();
while (indexItr != meshPart.triangleIndices.cend()) {
triangleIndices.push_back(*indexItr);
++indexItr;
}
}
} else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
// for each mesh copy unique part indices, separated by special bogus (flag) index values
for (const HFMMeshPart& meshPart : mesh.parts) {
// collect unique list of indices for this part
std::set<int32_t> uniqueIndices;
auto numIndices = meshPart.triangleIndices.count();
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
//assert(numIndices% TRIANGLE_STRIDE == 0);
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
auto indexItr = meshPart.triangleIndices.cbegin();
while (indexItr != meshPart.triangleIndices.cend()) {
uniqueIndices.insert(*indexItr);
++indexItr;
}
// store uniqueIndices in triangleIndices
triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size());
for (auto index : uniqueIndices) {
triangleIndices.push_back(index);
}
// flag end of part
triangleIndices.push_back(END_OF_MESH_PART);
}
// flag end of mesh
triangleIndices.push_back(END_OF_MESH);
}
++meshCount;
}
// scale and shift
glm::vec3 extentsSize = extents.size();
glm::vec3 scaleToFit = dimensions / extentsSize;
for (int32_t i = 0; i < 3; ++i) {
if (extentsSize[i] < 1.0e-6f) {
scaleToFit[i] = 1.0f;
}
}
for (auto points : pointCollection) {
for (int32_t i = 0; i < points.size(); ++i) {
points[i] = (points[i] * scaleToFit);
}
}
shapeInfo.setParams(type, 0.5f * dimensions, resource->getURL().toString());
}
}
CollisionPick::CollisionPick(const PickFilter& filter, float maxDistance, bool enabled, bool scaleWithParent, CollisionRegion collisionRegion, PhysicsEnginePointer physicsEngine) :
Pick(collisionRegion, filter, maxDistance, enabled),
_scaleWithParent(scaleWithParent),
_physicsEngine(physicsEngine) {
if (collisionRegion.shouldComputeShapeInfo()) {
_cachedResource = DependencyManager::get<ModelCache>()->getCollisionGeometryResource(collisionRegion.modelURL);
_cachedResource = DependencyManager::get<ModelCache>()->getCollisionModelResource(collisionRegion.modelURL);
}
_mathPick.loaded = isLoaded();
}

View file

@ -63,14 +63,13 @@ protected:
bool isLoaded() const;
// Returns true if _mathPick.shapeInfo is valid. Otherwise, attempts to get the _mathPick ready for use.
bool getShapeInfoReady(const CollisionRegion& pick);
void computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<GeometryResource> resource);
void computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<GeometryResource> resource);
void computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<ModelResource> resource);
void filterIntersections(std::vector<ContactTestResult>& intersections) const;
bool _scaleWithParent;
PhysicsEnginePointer _physicsEngine;
QSharedPointer<GeometryResource> _cachedResource;
QSharedPointer<ModelResource> _cachedResource;
// Options for what information to get from collision results
bool _includeNormals;

View file

@ -20,24 +20,17 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) {
_geometryOffset = hfmModel.offset;
// convert to std::vector of joints
std::vector<HFMJoint> joints;
joints.reserve(hfmModel.joints.size());
for (auto& joint : hfmModel.joints) {
joints.push_back(joint);
}
buildSkeletonFromJoints(joints, hfmModel.jointRotationOffsets);
buildSkeletonFromJoints(hfmModel.joints, hfmModel.jointRotationOffsets);
// we make a copy of the inverseBindMatrices in order to prevent mutating the model bind pose
// when we are dealing with a joint offset in the model
for (int i = 0; i < (int)hfmModel.meshes.size(); i++) {
const HFMMesh& mesh = hfmModel.meshes.at(i);
for (uint32_t i = 0; i < (uint32_t)hfmModel.skinDeformers.size(); i++) {
const auto& deformer = hfmModel.skinDeformers[i];
std::vector<HFMCluster> dummyClustersList;
for (int j = 0; j < mesh.clusters.size(); j++) {
std::vector<glm::mat4> bindMatrices;
for (uint32_t j = 0; j < (uint32_t)deformer.clusters.size(); j++) {
// cast into a non-const reference, so we can mutate the FBXCluster
HFMCluster& cluster = const_cast<HFMCluster&>(mesh.clusters.at(j));
HFMCluster& cluster = const_cast<HFMCluster&>(deformer.clusters.at(j));
HFMCluster localCluster;
localCluster.jointIndex = cluster.jointIndex;

View file

@ -68,7 +68,7 @@ public:
void dump(const AnimPoseVec& poses) const;
std::vector<int> lookUpJointIndices(const std::vector<QString>& jointNames) const;
const HFMCluster getClusterBindMatricesOriginalValues(const int meshIndex, const int clusterIndex) const { return _clusterBindMatrixOriginalValues[meshIndex][clusterIndex]; }
const HFMCluster getClusterBindMatricesOriginalValues(int skinDeformerIndex, int clusterIndex) const { return _clusterBindMatrixOriginalValues[skinDeformerIndex][clusterIndex]; }
protected:
void buildSkeletonFromJoints(const std::vector<HFMJoint>& joints, const QMap<int, glm::quat> jointOffsets);

View file

@ -943,7 +943,7 @@ void Avatar::simulateAttachments(float deltaTime) {
bool texturesLoaded = _attachmentModelsTexturesLoaded.at(i);
// Watch for texture loading
if (!texturesLoaded && model->getGeometry() && model->getGeometry()->areTexturesLoaded()) {
if (!texturesLoaded && model->getNetworkModel() && model->getNetworkModel()->areTexturesLoaded()) {
_attachmentModelsTexturesLoaded[i] = true;
model->updateRenderItems();
}

View file

@ -171,7 +171,7 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
// FIXME: This texture loading logic should probably live in Avatar, to mirror RenderableModelEntityItem,
// but Avatars don't get updates in the same way
if (!_texturesLoaded && getGeometry() && getGeometry()->areTexturesLoaded()) {
if (!_texturesLoaded && getNetworkModel() && getNetworkModel()->areTexturesLoaded()) {
_texturesLoaded = true;
updateRenderItems();
}
@ -326,7 +326,7 @@ void SkeletonModel::computeBoundingShape() {
}
const HFMModel& hfmModel = getHFMModel();
if (hfmModel.joints.isEmpty() || _rig.indexOfJoint("Hips") == -1) {
if (hfmModel.joints.empty() || _rig.indexOfJoint("Hips") == -1) {
// rootJointIndex == -1 if the avatar model has no skeleton
return;
}

View file

@ -90,11 +90,11 @@ void FBXBaker::replaceMeshNodeWithDraco(FBXNode& meshNode, const QByteArray& dra
}
}
void FBXBaker::rewriteAndBakeSceneModels(const QVector<hfm::Mesh>& meshes, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) {
void FBXBaker::rewriteAndBakeSceneModels(const std::vector<hfm::Mesh>& meshes, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) {
std::vector<int> meshIndexToRuntimeOrder;
auto meshCount = (int)meshes.size();
auto meshCount = (uint32_t)meshes.size();
meshIndexToRuntimeOrder.resize(meshCount);
for (int i = 0; i < meshCount; i++) {
for (uint32_t i = 0; i < meshCount; i++) {
meshIndexToRuntimeOrder[meshes[i].meshIndex] = i;
}

View file

@ -33,7 +33,7 @@ protected:
virtual void bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) override;
private:
void rewriteAndBakeSceneModels(const QVector<hfm::Mesh>& meshes, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists);
void rewriteAndBakeSceneModels(const std::vector<hfm::Mesh>& meshes, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists);
void replaceMeshNodeWithDraco(FBXNode& meshNode, const QByteArray& dracoMeshBytes, const std::vector<hifi::ByteArray>& dracoMaterialList);
};

View file

@ -258,9 +258,9 @@ void MaterialBaker::addTexture(const QString& materialName, image::TextureUsage:
}
};
void MaterialBaker::setMaterials(const QHash<QString, hfm::Material>& materials, const QString& baseURL) {
void MaterialBaker::setMaterials(const std::vector<hfm::Material>& materials, const QString& baseURL) {
_materialResource = NetworkMaterialResourcePointer(new NetworkMaterialResource(), [](NetworkMaterialResource* ptr) { ptr->deleteLater(); });
for (auto& material : materials) {
for (const auto& material : materials) {
_materialResource->parsedMaterials.names.push_back(material.name.toStdString());
_materialResource->parsedMaterials.networkMaterials[material.name.toStdString()] = std::make_shared<NetworkMaterial>(material, baseURL);

View file

@ -32,7 +32,7 @@ public:
bool isURL() const { return _isURL; }
QString getBakedMaterialData() const { return _bakedMaterialData; }
void setMaterials(const QHash<QString, hfm::Material>& materials, const QString& baseURL);
void setMaterials(const std::vector<hfm::Material>& materials, const QString& baseURL);
void setMaterials(const NetworkMaterialResourcePointer& materialResource);
NetworkMaterialResourcePointer getNetworkMaterialResource() const { return _materialResource; }

View file

@ -265,7 +265,7 @@ void ModelBaker::bakeSourceCopy() {
return;
}
if (!_hfmModel->materials.isEmpty()) {
if (!_hfmModel->materials.empty()) {
_materialBaker = QSharedPointer<MaterialBaker>(
new MaterialBaker(_modelURL.fileName(), true, _bakedOutputDir),
&MaterialBaker::deleteLater

View file

@ -37,10 +37,10 @@ const QByteArray MESH = "Mesh";
void OBJBaker::bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) {
// Write OBJ Data as FBX tree nodes
createFBXNodeTree(_rootNode, hfmModel, dracoMeshes[0]);
createFBXNodeTree(_rootNode, hfmModel, dracoMeshes[0], dracoMaterialLists[0]);
}
void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh) {
void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh, const std::vector<hifi::ByteArray>& dracoMaterialList) {
// Make all generated nodes children of rootNode
rootNode.children = { FBXNode(), FBXNode(), FBXNode() };
FBXNode& globalSettingsNode = rootNode.children[0];
@ -100,19 +100,22 @@ void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& h
}
// Generating Objects node's child - Material node
auto& meshParts = hfmModel->meshes[0].parts;
for (auto& meshPart : meshParts) {
// Each material ID should only appear once thanks to deduplication in BuildDracoMeshTask, but we want to make sure they are created in the right order
std::unordered_map<QString, uint32_t> materialIDToIndex;
for (uint32_t materialIndex = 0; materialIndex < hfmModel->materials.size(); ++materialIndex) {
const auto& material = hfmModel->materials[materialIndex];
materialIDToIndex[material.materialID] = materialIndex;
}
// Create nodes for each material in the material list
for (const auto& dracoMaterial : dracoMaterialList) {
const QString materialID = QString(dracoMaterial);
const uint32_t materialIndex = materialIDToIndex[materialID];
const auto& material = hfmModel->materials[materialIndex];
FBXNode materialNode;
materialNode.name = MATERIAL_NODE_NAME;
if (hfmModel->materials.size() == 1) {
// case when no material information is provided, OBJSerializer considers it as a single default material
for (auto& materialID : hfmModel->materials.keys()) {
setMaterialNodeProperties(materialNode, materialID, hfmModel);
}
} else {
setMaterialNodeProperties(materialNode, meshPart.materialID, hfmModel);
}
setMaterialNodeProperties(materialNode, material.materialID, material, hfmModel);
objectNode.children.append(materialNode);
}
@ -153,12 +156,10 @@ void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& h
}
// Set properties for material nodes
void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material, const hfm::Model::Pointer& hfmModel) {
void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, const QString& materialName, const hfm::Material& material, const hfm::Model::Pointer& hfmModel) {
auto materialID = nextNodeID();
_materialIDs.push_back(materialID);
materialNode.properties = { materialID, material, MESH };
HFMMaterial currentMaterial = hfmModel->materials[material];
materialNode.properties = { materialID, materialName, MESH };
// Setting the hierarchy: Material -> Properties70 -> P -> Properties
FBXNode properties70Node;
@ -170,7 +171,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material
pNodeDiffuseColor.name = P_NODE_NAME;
pNodeDiffuseColor.properties.append({
"DiffuseColor", "Color", "", "A",
currentMaterial.diffuseColor[0], currentMaterial.diffuseColor[1], currentMaterial.diffuseColor[2]
material.diffuseColor[0], material.diffuseColor[1], material.diffuseColor[2]
});
}
properties70Node.children.append(pNodeDiffuseColor);
@ -181,7 +182,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material
pNodeSpecularColor.name = P_NODE_NAME;
pNodeSpecularColor.properties.append({
"SpecularColor", "Color", "", "A",
currentMaterial.specularColor[0], currentMaterial.specularColor[1], currentMaterial.specularColor[2]
material.specularColor[0], material.specularColor[1], material.specularColor[2]
});
}
properties70Node.children.append(pNodeSpecularColor);
@ -192,7 +193,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material
pNodeShininess.name = P_NODE_NAME;
pNodeShininess.properties.append({
"Shininess", "Number", "", "A",
currentMaterial.shininess
material.shininess
});
}
properties70Node.children.append(pNodeShininess);
@ -203,7 +204,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material
pNodeOpacity.name = P_NODE_NAME;
pNodeOpacity.properties.append({
"Opacity", "Number", "", "A",
currentMaterial.opacity
material.opacity
});
}
properties70Node.children.append(pNodeOpacity);

View file

@ -27,8 +27,8 @@ protected:
virtual void bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) override;
private:
void createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh);
void setMaterialNodeProperties(FBXNode& materialNode, QString material, const hfm::Model::Pointer& hfmModel);
void createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh, const std::vector<hifi::ByteArray>& dracoMaterialList);
void setMaterialNodeProperties(FBXNode& materialNode, const QString& materialName, const hfm::Material& material, const hfm::Model::Pointer& hfmModel);
NodeID nextNodeID() { return _nodeID++; }
NodeID _nodeID { 0 };

View file

@ -282,7 +282,7 @@ bool RenderableModelEntityItem::findDetailedParabolaIntersection(const glm::vec3
}
void RenderableModelEntityItem::fetchCollisionGeometryResource() {
_collisionGeometryResource = DependencyManager::get<ModelCache>()->getCollisionGeometryResource(getCollisionShapeURL());
_collisionGeometryResource = DependencyManager::get<ModelCache>()->getCollisionModelResource(getCollisionShapeURL());
}
bool RenderableModelEntityItem::unableToLoadCollisionShape() {
@ -357,7 +357,6 @@ bool RenderableModelEntityItem::isReadyToComputeShape() const {
void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
const uint32_t TRIANGLE_STRIDE = 3;
const uint32_t QUAD_STRIDE = 4;
ShapeType type = getShapeType();
@ -380,59 +379,35 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection();
pointCollection.clear();
uint32_t i = 0;
size_t numParts = 0;
for (const HFMMesh& mesh : collisionGeometry.meshes) {
numParts += mesh.triangleListMesh.parts.size();
}
pointCollection.reserve(numParts);
// the way OBJ files get read, each section under a "g" line is its own meshPart. We only expect
// to find one actual "mesh" (with one or more meshParts in it), but we loop over the meshes, just in case.
foreach (const HFMMesh& mesh, collisionGeometry.meshes) {
for (const HFMMesh& mesh : collisionGeometry.meshes) {
const hfm::TriangleListMesh& triangleListMesh = mesh.triangleListMesh;
// each meshPart is a convex hull
foreach (const HFMMeshPart &meshPart, mesh.parts) {
pointCollection.push_back(QVector<glm::vec3>());
ShapeInfo::PointList& pointsInPart = pointCollection[i];
for (const glm::ivec2& part : triangleListMesh.parts) {
// run through all the triangles and (uniquely) add each point to the hull
uint32_t numIndices = (uint32_t)meshPart.triangleIndices.size();
pointCollection.emplace_back();
ShapeInfo::PointList& pointsInPart = pointCollection.back();
uint32_t numIndices = (uint32_t)part.y;
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
//assert(numIndices % TRIANGLE_STRIDE == 0);
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
for (uint32_t j = 0; j < numIndices; j += TRIANGLE_STRIDE) {
glm::vec3 p0 = mesh.vertices[meshPart.triangleIndices[j]];
glm::vec3 p1 = mesh.vertices[meshPart.triangleIndices[j + 1]];
glm::vec3 p2 = mesh.vertices[meshPart.triangleIndices[j + 2]];
if (!pointsInPart.contains(p0)) {
pointsInPart << p0;
}
if (!pointsInPart.contains(p1)) {
pointsInPart << p1;
}
if (!pointsInPart.contains(p2)) {
pointsInPart << p2;
}
}
// run through all the quads and (uniquely) add each point to the hull
numIndices = (uint32_t)meshPart.quadIndices.size();
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
//assert(numIndices % QUAD_STRIDE == 0);
numIndices -= numIndices % QUAD_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
for (uint32_t j = 0; j < numIndices; j += QUAD_STRIDE) {
glm::vec3 p0 = mesh.vertices[meshPart.quadIndices[j]];
glm::vec3 p1 = mesh.vertices[meshPart.quadIndices[j + 1]];
glm::vec3 p2 = mesh.vertices[meshPart.quadIndices[j + 2]];
glm::vec3 p3 = mesh.vertices[meshPart.quadIndices[j + 3]];
if (!pointsInPart.contains(p0)) {
pointsInPart << p0;
}
if (!pointsInPart.contains(p1)) {
pointsInPart << p1;
}
if (!pointsInPart.contains(p2)) {
pointsInPart << p2;
}
if (!pointsInPart.contains(p3)) {
pointsInPart << p3;
uint32_t indexStart = (uint32_t)part.x;
uint32_t indexEnd = indexStart + numIndices;
for (uint32_t j = indexStart; j < indexEnd; ++j) {
// NOTE: It seems odd to skip vertices when initializing a btConvexHullShape, but let's keep the behavior similar to the old behavior for now
glm::vec3 point = triangleListMesh.vertices[triangleListMesh.indices[j]];
if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), point) == pointsInPart.cend()) {
pointsInPart.push_back(point);
}
}
@ -441,7 +416,6 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
pointCollection.pop_back();
continue;
}
++i;
}
}
@ -456,8 +430,8 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
// multiply each point by scale before handing the point-set off to the physics engine.
// also determine the extents of the collision model.
glm::vec3 registrationOffset = dimensions * (ENTITY_ITEM_DEFAULT_REGISTRATION_POINT - getRegistrationPoint());
for (int32_t i = 0; i < pointCollection.size(); i++) {
for (int32_t j = 0; j < pointCollection[i].size(); j++) {
for (size_t i = 0; i < pointCollection.size(); i++) {
for (size_t j = 0; j < pointCollection[i].size(); j++) {
// back compensate for registration so we can apply that offset to the shapeInfo later
pointCollection[i][j] = scaleToFit * (pointCollection[i][j] + model->getOffset()) - registrationOffset;
}
@ -471,46 +445,63 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
model->updateGeometry();
// compute meshPart local transforms
QVector<glm::mat4> localTransforms;
const HFMModel& hfmModel = model->getHFMModel();
int numHFMMeshes = hfmModel.meshes.size();
int totalNumVertices = 0;
glm::vec3 dimensions = getScaledDimensions();
glm::mat4 invRegistraionOffset = glm::translate(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT));
for (int i = 0; i < numHFMMeshes; i++) {
const HFMMesh& mesh = hfmModel.meshes.at(i);
if (mesh.clusters.size() > 0) {
const HFMCluster& cluster = mesh.clusters.at(0);
auto jointMatrix = model->getRig().getJointTransform(cluster.jointIndex);
// we backtranslate by the registration offset so we can apply that offset to the shapeInfo later
localTransforms.push_back(invRegistraionOffset * jointMatrix * cluster.inverseBindMatrix);
} else {
localTransforms.push_back(invRegistraionOffset);
}
totalNumVertices += mesh.vertices.size();
ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices();
triangleIndices.clear();
Extents extents;
int32_t shapeCount = 0;
int32_t instanceIndex = 0;
// NOTE: Each pointCollection corresponds to a mesh. Therefore, we should have one pointCollection per mesh instance
// A mesh instance is a unique combination of mesh/transform. For every mesh instance, there are as many shapes as there are parts for that mesh.
// We assume the shapes are grouped by mesh instance, and the group contains one of each mesh part.
uint32_t numInstances = 0;
std::vector<std::vector<std::vector<uint32_t>>> shapesPerInstancePerMesh;
shapesPerInstancePerMesh.resize(hfmModel.meshes.size());
for (uint32_t shapeIndex = 0; shapeIndex < hfmModel.shapes.size();) {
const auto& shape = hfmModel.shapes[shapeIndex];
uint32_t meshIndex = shape.mesh;
const auto& mesh = hfmModel.meshes[meshIndex];
uint32_t numMeshParts = (uint32_t)mesh.parts.size();
assert(numMeshParts != 0);
auto& shapesPerInstance = shapesPerInstancePerMesh[meshIndex];
shapesPerInstance.emplace_back();
auto& shapes = shapesPerInstance.back();
shapes.resize(numMeshParts);
std::iota(shapes.begin(), shapes.end(), shapeIndex);
shapeIndex += numMeshParts;
++numInstances;
}
const int32_t MAX_VERTICES_PER_STATIC_MESH = 1e6;
if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) {
qWarning() << "model" << getModelURL() << "has too many vertices" << totalNumVertices << "and will collide as a box.";
const uint32_t MAX_ALLOWED_MESH_COUNT = 1000;
if (numInstances > MAX_ALLOWED_MESH_COUNT) {
// too many will cause the deadlock timer to throw...
qWarning() << "model" << getModelURL() << "has too many collision meshes" << numInstances << "and will collide as a box.";
shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions);
return;
}
std::vector<std::shared_ptr<const graphics::Mesh>> meshes;
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
auto& hfmMeshes = _collisionGeometryResource->getHFMModel().meshes;
meshes.reserve(hfmMeshes.size());
for (auto& hfmMesh : hfmMeshes) {
meshes.push_back(hfmMesh._mesh);
size_t totalNumVertices = 0;
for (const auto& shapesPerInstance : shapesPerInstancePerMesh) {
for (const auto& instanceShapes : shapesPerInstance) {
const uint32_t firstShapeIndex = instanceShapes.front();
const auto& firstShape = hfmModel.shapes[firstShapeIndex];
const auto& mesh = hfmModel.meshes[firstShape.mesh];
const auto& triangleListMesh = mesh.triangleListMesh;
// Added once per instance per mesh
totalNumVertices += triangleListMesh.vertices.size();
}
} else {
meshes = model->getGeometry()->getMeshes();
}
int32_t numMeshes = (int32_t)(meshes.size());
const int MAX_ALLOWED_MESH_COUNT = 1000;
if (numMeshes > MAX_ALLOWED_MESH_COUNT) {
// too many will cause the deadlock timer to throw...
const size_t MAX_VERTICES_PER_STATIC_MESH = 1e6;
if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) {
qWarning() << "model" << getModelURL() << "has too many vertices" << totalNumVertices << "and will collide as a box.";
shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions);
return;
}
@ -518,169 +509,118 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection();
pointCollection.clear();
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
pointCollection.resize(numMeshes);
pointCollection.resize(numInstances);
} else {
pointCollection.resize(1);
}
ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices();
triangleIndices.clear();
for (uint32_t meshIndex = 0; meshIndex < hfmModel.meshes.size(); ++meshIndex) {
const auto& mesh = hfmModel.meshes[meshIndex];
const auto& triangleListMesh = mesh.triangleListMesh;
const auto& vertices = triangleListMesh.vertices;
const auto& indices = triangleListMesh.indices;
const std::vector<glm::ivec2>& parts = triangleListMesh.parts;
Extents extents;
int32_t meshCount = 0;
int32_t pointListIndex = 0;
for (auto& mesh : meshes) {
if (!mesh) {
continue;
}
const gpu::BufferView& vertices = mesh->getVertexBuffer();
const gpu::BufferView& indices = mesh->getIndexBuffer();
const gpu::BufferView& parts = mesh->getPartBuffer();
const auto& shapesPerInstance = shapesPerInstancePerMesh[meshIndex];
for (const std::vector<uint32_t>& instanceShapes : shapesPerInstance) {
ShapeInfo::PointList& points = pointCollection[instanceIndex];
ShapeInfo::PointList& points = pointCollection[pointListIndex];
// reserve room
int32_t sizeToReserve = (int32_t)(vertices.size());
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
// a list of points for each instance
instanceIndex++;
} else {
// only one list of points
sizeToReserve += (int32_t)((gpu::Size)points.size());
}
points.reserve(sizeToReserve);
// get mesh instance transform
const uint32_t meshIndexOffset = (uint32_t)points.size();
const uint32_t instanceShapeIndexForTransform = instanceShapes.front();
const auto& instanceShapeForTransform = hfmModel.shapes[instanceShapeIndexForTransform];
glm::mat4 localTransform;
if (instanceShapeForTransform.joint != hfm::UNDEFINED_KEY) {
auto jointMatrix = model->getRig().getJointTransform(instanceShapeForTransform.joint);
// we backtranslate by the registration offset so we can apply that offset to the shapeInfo later
if (instanceShapeForTransform.skinDeformer != hfm::UNDEFINED_KEY) {
const auto& skinDeformer = hfmModel.skinDeformers[instanceShapeForTransform.skinDeformer];
glm::mat4 inverseBindMatrix;
if (!skinDeformer.clusters.empty()) {
const auto& cluster = skinDeformer.clusters.back();
inverseBindMatrix = cluster.inverseBindMatrix;
}
localTransform = invRegistraionOffset * jointMatrix * inverseBindMatrix;
} else {
localTransform = invRegistraionOffset * jointMatrix;
}
} else {
localTransform = invRegistraionOffset;
}
// reserve room
int32_t sizeToReserve = (int32_t)(vertices.getNumElements());
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
// a list of points for each mesh
pointListIndex++;
} else {
// only one list of points
sizeToReserve += (int32_t)((gpu::Size)points.size());
}
points.reserve(sizeToReserve);
// copy points
auto vertexItr = vertices.cbegin();
while (vertexItr != vertices.cend()) {
glm::vec3 point = extractTranslation(localTransform * glm::translate(*vertexItr));
points.push_back(point);
++vertexItr;
}
for (const auto& instanceShapeIndex : instanceShapes) {
const auto& instanceShape = hfmModel.shapes[instanceShapeIndex];
extents.addExtents(instanceShape.transformedExtents);
}
// copy points
uint32_t meshIndexOffset = (uint32_t)points.size();
const glm::mat4& localTransform = localTransforms[meshCount];
gpu::BufferView::Iterator<const glm::vec3> vertexItr = vertices.cbegin<const glm::vec3>();
while (vertexItr != vertices.cend<const glm::vec3>()) {
glm::vec3 point = extractTranslation(localTransform * glm::translate(*vertexItr));
points.push_back(point);
extents.addPoint(point);
++vertexItr;
}
if (type == SHAPE_TYPE_STATIC_MESH) {
// copy into triangleIndices
triangleIndices.reserve((int32_t)((gpu::Size)(triangleIndices.size()) + indices.getNumElements()));
gpu::BufferView::Iterator<const graphics::Mesh::Part> partItr = parts.cbegin<const graphics::Mesh::Part>();
while (partItr != parts.cend<const graphics::Mesh::Part>()) {
auto numIndices = partItr->_numIndices;
if (partItr->_topology == graphics::Mesh::TRIANGLES) {
if (type == SHAPE_TYPE_STATIC_MESH) {
// copy into triangleIndices
triangleIndices.reserve((int32_t)((gpu::Size)(triangleIndices.size()) + indices.size()));
auto partItr = parts.cbegin();
while (partItr != parts.cend()) {
auto numIndices = partItr->y;
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
//assert(numIndices % TRIANGLE_STRIDE == 0);
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
auto indexItr = indices.cbegin<const gpu::BufferView::Index>() + partItr->_startIndex;
auto indexItr = indices.cbegin() + partItr->x;
auto indexEnd = indexItr + numIndices;
while (indexItr != indexEnd) {
triangleIndices.push_back(*indexItr + meshIndexOffset);
++indexItr;
}
} else if (partItr->_topology == graphics::Mesh::TRIANGLE_STRIP) {
// TODO: resurrect assert after we start sanitizing HFMMesh higher up
//assert(numIndices > 2);
uint32_t approxNumIndices = TRIANGLE_STRIDE * numIndices;
if (approxNumIndices > (uint32_t)(triangleIndices.capacity() - triangleIndices.size())) {
// we underestimated the final size of triangleIndices so we pre-emptively expand it
triangleIndices.reserve(triangleIndices.size() + approxNumIndices);
}
auto indexItr = indices.cbegin<const gpu::BufferView::Index>() + partItr->_startIndex;
auto indexEnd = indexItr + (numIndices - 2);
// first triangle uses the first three indices
triangleIndices.push_back(*(indexItr++) + meshIndexOffset);
triangleIndices.push_back(*(indexItr++) + meshIndexOffset);
triangleIndices.push_back(*(indexItr++) + meshIndexOffset);
// the rest use previous and next index
uint32_t triangleCount = 1;
while (indexItr != indexEnd) {
if ((*indexItr) != graphics::Mesh::PRIMITIVE_RESTART_INDEX) {
if (triangleCount % 2 == 0) {
// even triangles use first two indices in order
triangleIndices.push_back(*(indexItr - 2) + meshIndexOffset);
triangleIndices.push_back(*(indexItr - 1) + meshIndexOffset);
} else {
// odd triangles swap order of first two indices
triangleIndices.push_back(*(indexItr - 1) + meshIndexOffset);
triangleIndices.push_back(*(indexItr - 2) + meshIndexOffset);
}
triangleIndices.push_back(*indexItr + meshIndexOffset);
++triangleCount;
}
++indexItr;
}
++partItr;
}
++partItr;
}
} else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
// for each mesh copy unique part indices, separated by special bogus (flag) index values
gpu::BufferView::Iterator<const graphics::Mesh::Part> partItr = parts.cbegin<const graphics::Mesh::Part>();
while (partItr != parts.cend<const graphics::Mesh::Part>()) {
// collect unique list of indices for this part
std::set<int32_t> uniqueIndices;
auto numIndices = partItr->_numIndices;
if (partItr->_topology == graphics::Mesh::TRIANGLES) {
} else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
// for each mesh copy unique part indices, separated by special bogus (flag) index values
auto partItr = parts.cbegin();
while (partItr != parts.cend()) {
// collect unique list of indices for this part
std::set<int32_t> uniqueIndices;
auto numIndices = partItr->y;
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
//assert(numIndices% TRIANGLE_STRIDE == 0);
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
auto indexItr = indices.cbegin<const gpu::BufferView::Index>() + partItr->_startIndex;
auto indexItr = indices.cbegin() + partItr->x;
auto indexEnd = indexItr + numIndices;
while (indexItr != indexEnd) {
uniqueIndices.insert(*indexItr);
++indexItr;
}
} else if (partItr->_topology == graphics::Mesh::TRIANGLE_STRIP) {
// TODO: resurrect assert after we start sanitizing HFMMesh higher up
//assert(numIndices > TRIANGLE_STRIDE - 1);
auto indexItr = indices.cbegin<const gpu::BufferView::Index>() + partItr->_startIndex;
auto indexEnd = indexItr + (numIndices - 2);
// first triangle uses the first three indices
uniqueIndices.insert(*(indexItr++));
uniqueIndices.insert(*(indexItr++));
uniqueIndices.insert(*(indexItr++));
// the rest use previous and next index
uint32_t triangleCount = 1;
while (indexItr != indexEnd) {
if ((*indexItr) != graphics::Mesh::PRIMITIVE_RESTART_INDEX) {
if (triangleCount % 2 == 0) {
// EVEN triangles use first two indices in order
uniqueIndices.insert(*(indexItr - 2));
uniqueIndices.insert(*(indexItr - 1));
} else {
// ODD triangles swap order of first two indices
uniqueIndices.insert(*(indexItr - 1));
uniqueIndices.insert(*(indexItr - 2));
}
uniqueIndices.insert(*indexItr);
++triangleCount;
}
++indexItr;
// store uniqueIndices in triangleIndices
triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size());
for (auto index : uniqueIndices) {
triangleIndices.push_back(index);
}
}
// flag end of part
triangleIndices.push_back(END_OF_MESH_PART);
// store uniqueIndices in triangleIndices
triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size());
for (auto index : uniqueIndices) {
triangleIndices.push_back(index);
++partItr;
}
// flag end of part
triangleIndices.push_back(END_OF_MESH_PART);
++partItr;
// flag end of mesh
triangleIndices.push_back(END_OF_MESH);
}
// flag end of mesh
triangleIndices.push_back(END_OF_MESH);
}
++meshCount;
++shapeCount;
}
// scale and shift
@ -692,7 +632,7 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
}
}
for (auto points : pointCollection) {
for (int32_t i = 0; i < points.size(); ++i) {
for (size_t i = 0; i < points.size(); ++i) {
points[i] = (points[i] * scaleToFit);
}
}
@ -1431,7 +1371,7 @@ void ModelEntityRenderer::doRenderUpdateSynchronousTyped(const ScenePointer& sce
}
}
if (!_texturesLoaded && model->getGeometry() && model->getGeometry()->areTexturesLoaded()) {
if (!_texturesLoaded && model->getNetworkModel() && model->getNetworkModel()->areTexturesLoaded()) {
withWriteLock([&] {
_texturesLoaded = true;
});

View file

@ -120,7 +120,7 @@ private:
bool readyToAnimate() const;
void fetchCollisionGeometryResource();
GeometryResource::Pointer _collisionGeometryResource;
ModelResource::Pointer _collisionGeometryResource;
std::vector<int> _jointMap;
QVariantMap _originalTextures;
bool _jointMapCompleted { false };

View file

@ -194,7 +194,7 @@ float importanceSample3DDimension(float startDim) {
}
ParticleEffectEntityRenderer::CpuParticle ParticleEffectEntityRenderer::createParticle(uint64_t now, const Transform& baseTransform, const particle::Properties& particleProperties,
const ShapeType& shapeType, const GeometryResource::Pointer& geometryResource,
const ShapeType& shapeType, const ModelResource::Pointer& geometryResource,
const TriangleInfo& triangleInfo) {
CpuParticle particle;
@ -379,7 +379,7 @@ void ParticleEffectEntityRenderer::stepSimulation() {
particle::Properties particleProperties;
ShapeType shapeType;
GeometryResource::Pointer geometryResource;
ModelResource::Pointer geometryResource;
withReadLock([&] {
particleProperties = _particleProperties;
shapeType = _shapeType;
@ -482,7 +482,7 @@ void ParticleEffectEntityRenderer::fetchGeometryResource() {
if (hullURL.isEmpty()) {
_geometryResource.reset();
} else {
_geometryResource = DependencyManager::get<ModelCache>()->getCollisionGeometryResource(hullURL);
_geometryResource = DependencyManager::get<ModelCache>()->getCollisionModelResource(hullURL);
}
}
@ -490,7 +490,7 @@ void ParticleEffectEntityRenderer::fetchGeometryResource() {
void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel) {
PROFILE_RANGE(render, __FUNCTION__);
int numberOfMeshes = hfmModel.meshes.size();
uint32_t numberOfMeshes = (uint32_t)hfmModel.meshes.size();
_hasComputedTriangles = true;
_triangleInfo.triangles.clear();
@ -500,11 +500,11 @@ void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel)
float minArea = FLT_MAX;
AABox bounds;
for (int i = 0; i < numberOfMeshes; i++) {
for (uint32_t i = 0; i < numberOfMeshes; i++) {
const HFMMesh& mesh = hfmModel.meshes.at(i);
const int numberOfParts = mesh.parts.size();
for (int j = 0; j < numberOfParts; j++) {
const uint32_t numberOfParts = (uint32_t)mesh.parts.size();
for (uint32_t j = 0; j < numberOfParts; j++) {
const HFMMeshPart& part = mesh.parts.at(j);
const int INDICES_PER_TRIANGLE = 3;

View file

@ -89,7 +89,7 @@ private:
} _triangleInfo;
static CpuParticle createParticle(uint64_t now, const Transform& baseTransform, const particle::Properties& particleProperties,
const ShapeType& shapeType, const GeometryResource::Pointer& geometryResource,
const ShapeType& shapeType, const ModelResource::Pointer& geometryResource,
const TriangleInfo& triangleInfo);
void stepSimulation();
@ -108,7 +108,7 @@ private:
QString _compoundShapeURL;
void fetchGeometryResource();
GeometryResource::Pointer _geometryResource;
ModelResource::Pointer _geometryResource;
NetworkTexturePointer _networkTexture;
ScenePointer _scene;

View file

@ -1429,14 +1429,13 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
QtConcurrent::run([entity, voxelSurfaceStyle, voxelVolumeSize, mesh] {
auto polyVoxEntity = std::static_pointer_cast<RenderablePolyVoxEntityItem>(entity);
QVector<QVector<glm::vec3>> pointCollection;
ShapeInfo::PointCollection pointCollection;
AABox box;
glm::mat4 vtoM = std::static_pointer_cast<RenderablePolyVoxEntityItem>(entity)->voxelToLocalMatrix();
if (voxelSurfaceStyle == PolyVoxEntityItem::SURFACE_MARCHING_CUBES ||
voxelSurfaceStyle == PolyVoxEntityItem::SURFACE_EDGED_MARCHING_CUBES) {
// pull each triangle in the mesh into a polyhedron which can be collided with
unsigned int i = 0;
const gpu::BufferView& vertexBufferView = mesh->getVertexBuffer();
const gpu::BufferView& indexBufferView = mesh->getIndexBuffer();
@ -1465,19 +1464,16 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
box += p2Model;
box += p3Model;
QVector<glm::vec3> pointsInPart;
pointsInPart << p0Model;
pointsInPart << p1Model;
pointsInPart << p2Model;
pointsInPart << p3Model;
// add next convex hull
QVector<glm::vec3> newMeshPoints;
pointCollection << newMeshPoints;
// add points to the new convex hull
pointCollection[i++] << pointsInPart;
ShapeInfo::PointList pointsInPart;
pointsInPart.push_back(p0Model);
pointsInPart.push_back(p1Model);
pointsInPart.push_back(p2Model);
pointsInPart.push_back(p3Model);
// add points to a new convex hull
pointCollection.push_back(pointsInPart);
}
} else {
unsigned int i = 0;
polyVoxEntity->forEachVoxelValue(voxelVolumeSize, [&](const ivec3& v, uint8_t value) {
if (value > 0) {
const auto& x = v.x;
@ -1496,7 +1492,7 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
return;
}
QVector<glm::vec3> pointsInPart;
ShapeInfo::PointList pointsInPart;
float offL = -0.5f;
float offH = 0.5f;
@ -1523,20 +1519,17 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
box += p110;
box += p111;
pointsInPart << p000;
pointsInPart << p001;
pointsInPart << p010;
pointsInPart << p011;
pointsInPart << p100;
pointsInPart << p101;
pointsInPart << p110;
pointsInPart << p111;
pointsInPart.push_back(p000);
pointsInPart.push_back(p001);
pointsInPart.push_back(p010);
pointsInPart.push_back(p011);
pointsInPart.push_back(p100);
pointsInPart.push_back(p101);
pointsInPart.push_back(p110);
pointsInPart.push_back(p111);
// add next convex hull
QVector<glm::vec3> newMeshPoints;
pointCollection << newMeshPoints;
// add points to the new convex hull
pointCollection[i++] << pointsInPart;
// add points to a new convex hull
pointCollection.push_back(pointsInPart);
}
});
}
@ -1546,7 +1539,7 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
void RenderablePolyVoxEntityItem::setCollisionPoints(ShapeInfo::PointCollection pointCollection, AABox box) {
// this catches the payload from computeShapeInfoWorker
if (pointCollection.isEmpty()) {
if (pointCollection.empty()) {
EntityItem::computeShapeInfo(_shapeInfo);
withWriteLock([&] {
_shapeReady = true;

View file

@ -345,7 +345,7 @@ bool ZoneEntityItem::findDetailedParabolaIntersection(const glm::vec3& origin, c
}
bool ZoneEntityItem::contains(const glm::vec3& point) const {
GeometryResource::Pointer resource = _shapeResource;
ModelResource::Pointer resource = _shapeResource;
if (_shapeType == SHAPE_TYPE_COMPOUND && resource) {
if (resource->isLoaded()) {
const HFMModel& hfmModel = resource->getHFMModel();
@ -462,7 +462,7 @@ void ZoneEntityItem::fetchCollisionGeometryResource() {
if (hullURL.isEmpty()) {
_shapeResource.reset();
} else {
_shapeResource = DependencyManager::get<ModelCache>()->getCollisionGeometryResource(hullURL);
_shapeResource = DependencyManager::get<ModelCache>()->getCollisionModelResource(hullURL);
}
}

View file

@ -167,7 +167,7 @@ protected:
static bool _zonesArePickable;
void fetchCollisionGeometryResource();
GeometryResource::Pointer _shapeResource;
ModelResource::Pointer _shapeResource;
};

View file

@ -20,6 +20,7 @@
#include <BlendshapeConstants.h>
#include <hfm/ModelFormatLogging.h>
#include <hfm/HFMModelMath.h>
// TOOL: Uncomment the following line to enable the filtering of all the unkwnon fields of a node so we can break point easily while loading a model with problems...
//#define DEBUG_FBXSERIALIZER
@ -145,8 +146,9 @@ public:
bool isLimbNode; // is this FBXModel transform is a "LimbNode" i.e. a joint
};
glm::mat4 getGlobalTransform(const QMultiMap<QString, QString>& _connectionParentMap,
const QHash<QString, FBXModel>& fbxModels, QString nodeID, bool mixamoHack, const QString& url) {
const QHash<QString, FBXModel>& fbxModels, QString nodeID, bool mixamoHack, const QString& url) {
glm::mat4 globalTransform;
QVector<QString> visitedNodes; // Used to prevent following a cycle
while (!nodeID.isNull()) {
@ -166,12 +168,11 @@ glm::mat4 getGlobalTransform(const QMultiMap<QString, QString>& _connectionParen
}
QList<QString> parentIDs = _connectionParentMap.values(nodeID);
nodeID = QString();
foreach (const QString& parentID, parentIDs) {
foreach(const QString& parentID, parentIDs) {
if (visitedNodes.contains(parentID)) {
qCWarning(modelformat) << "Ignoring loop detected in FBX connection map for" << url;
continue;
}
if (fbxModels.contains(parentID)) {
nodeID = parentID;
break;
@ -181,6 +182,21 @@ glm::mat4 getGlobalTransform(const QMultiMap<QString, QString>& _connectionParen
return globalTransform;
}
std::vector<QString> getModelIDsForMeshID(const QString& meshID, const QHash<QString, FBXModel>& fbxModels, const QMultiMap<QString, QString>& _connectionParentMap) {
std::vector<QString> modelsForMesh;
if (fbxModels.contains(meshID)) {
modelsForMesh.push_back(meshID);
} else {
// This mesh may have more than one parent model, with different material and transform, representing a different instance of the mesh
for (const auto& parentID : _connectionParentMap.values(meshID)) {
if (fbxModels.contains(parentID)) {
modelsForMesh.push_back(parentID);
}
}
}
return modelsForMesh;
}
class ExtractedBlendshape {
public:
QString id;
@ -404,7 +420,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
QVector<ExtractedBlendshape> blendshapes;
QHash<QString, FBXModel> fbxModels;
QHash<QString, Cluster> clusters;
QHash<QString, Cluster> fbxClusters;
QHash<QString, AnimationCurve> animationCurves;
QHash<QString, QString> typeFlags;
@ -515,8 +531,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
if (object.properties.at(2) == "Mesh") {
meshes.insert(getID(object.properties), extractMesh(object, meshIndex, deduplicateIndices));
} else { // object.properties.at(2) == "Shape"
ExtractedBlendshape extracted = { getID(object.properties), extractBlendshape(object) };
blendshapes.append(extracted);
ExtractedBlendshape blendshape = { getID(object.properties), extractBlendshape(object) };
blendshapes.append(blendshape);
}
} else if (object.name == "Model") {
QString name = getModelName(object.properties);
@ -690,8 +706,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
// add the blendshapes included in the model, if any
if (mesh) {
foreach (const ExtractedBlendshape& extracted, blendshapes) {
addBlendshapes(extracted, blendshapeIndices.values(extracted.id.toLatin1()), *mesh);
foreach (const ExtractedBlendshape& blendshape, blendshapes) {
addBlendshapes(blendshape, blendshapeIndices.values(blendshape.id.toLatin1()), *mesh);
}
}
@ -1058,9 +1074,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
}
// skip empty clusters
// skip empty fbxClusters
if (cluster.indices.size() > 0 && cluster.weights.size() > 0) {
clusters.insert(getID(object.properties), cluster);
fbxClusters.insert(getID(object.properties), cluster);
}
} else if (object.properties.last() == "BlendShapeChannel") {
@ -1214,11 +1230,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
// assign the blendshapes to their corresponding meshes
foreach (const ExtractedBlendshape& extracted, blendshapes) {
QString blendshapeChannelID = _connectionParentMap.value(extracted.id);
foreach (const ExtractedBlendshape& blendshape, blendshapes) {
QString blendshapeChannelID = _connectionParentMap.value(blendshape.id);
QString blendshapeID = _connectionParentMap.value(blendshapeChannelID);
QString meshID = _connectionParentMap.value(blendshapeID);
addBlendshapes(extracted, blendshapeChannelIndices.values(blendshapeChannelID), meshes[meshID]);
addBlendshapes(blendshape, blendshapeChannelIndices.values(blendshapeChannelID), meshes[meshID]);
}
// get offset transform from mapping
@ -1233,13 +1249,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
QVector<QString> modelIDs;
QSet<QString> remainingFBXModels;
for (QHash<QString, FBXModel>::const_iterator fbxModel = fbxModels.constBegin(); fbxModel != fbxModels.constEnd(); fbxModel++) {
// models with clusters must be parented to the cluster top
// models with fbxClusters must be parented to the cluster top
// Unless the model is a root node.
bool isARootNode = !modelIDs.contains(_connectionParentMap.value(fbxModel.key()));
if (!isARootNode) {
foreach(const QString& deformerID, _connectionChildMap.values(fbxModel.key())) {
foreach(const QString& clusterID, _connectionChildMap.values(deformerID)) {
if (!clusters.contains(clusterID)) {
if (!fbxClusters.contains(clusterID)) {
continue;
}
QString topID = getTopModelID(_connectionParentMap, fbxModels, _connectionChildMap.value(clusterID), url);
@ -1283,12 +1299,18 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
// convert the models to joints
hfmModel.hasSkeletonJoints = false;
bool needMixamoHack = hfmModel.applicationName == "mixamo.com";
foreach (const QString& modelID, modelIDs) {
std::vector<glm::mat4> transformForClusters;
transformForClusters.reserve((size_t)modelIDs.size());
for (const QString& modelID : modelIDs) {
const FBXModel& fbxModel = fbxModels[modelID];
HFMJoint joint;
joint.parentIndex = fbxModel.parentIndex;
int jointIndex = hfmModel.joints.size();
uint32_t jointIndex = (uint32_t)hfmModel.joints.size();
// Copy default joint parameters from model
joint.translation = fbxModel.translation; // these are usually in centimeters
joint.preTransform = fbxModel.preTransform;
@ -1299,35 +1321,62 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
joint.rotationMin = fbxModel.rotationMin;
joint.rotationMax = fbxModel.rotationMax;
joint.hasGeometricOffset = fbxModel.hasGeometricOffset;
joint.geometricTranslation = fbxModel.geometricTranslation;
joint.geometricRotation = fbxModel.geometricRotation;
joint.geometricScaling = fbxModel.geometricScaling;
if (fbxModel.hasGeometricOffset) {
joint.geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation);
}
joint.isSkeletonJoint = fbxModel.isLimbNode;
hfmModel.hasSkeletonJoints = (hfmModel.hasSkeletonJoints || joint.isSkeletonJoint);
joint.name = fbxModel.name;
joint.bindTransformFoundInCluster = false;
// With the basic joint information, we can start to calculate compound transform information
// modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate
// Make adjustments to the static joint properties, and pre-calculate static transforms
if (applyUpAxisZRotation && joint.parentIndex == -1) {
joint.rotation *= upAxisZRotation;
joint.translation = upAxisZRotation * joint.translation;
}
glm::quat combinedRotation = joint.preRotation * joint.rotation * joint.postRotation;
joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(combinedRotation) * joint.postTransform;
if (joint.parentIndex == -1) {
joint.transform = hfmModel.offset * glm::translate(joint.translation) * joint.preTransform *
glm::mat4_cast(combinedRotation) * joint.postTransform;
joint.transform = joint.localTransform;
joint.globalTransform = hfmModel.offset * joint.localTransform;
joint.inverseDefaultRotation = glm::inverse(combinedRotation);
joint.distanceToParent = 0.0f;
} else {
const HFMJoint& parentJoint = hfmModel.joints.at(joint.parentIndex);
joint.transform = parentJoint.transform * glm::translate(joint.translation) *
joint.preTransform * glm::mat4_cast(combinedRotation) * joint.postTransform;
joint.transform = parentJoint.transform * joint.localTransform;
joint.globalTransform = parentJoint.globalTransform * joint.localTransform;
joint.inverseDefaultRotation = glm::inverse(combinedRotation) * parentJoint.inverseDefaultRotation;
joint.distanceToParent = glm::distance(extractTranslation(parentJoint.transform),
extractTranslation(joint.transform));
joint.distanceToParent = glm::distance(extractTranslation(parentJoint.transform), extractTranslation(joint.transform));
}
joint.inverseBindRotation = joint.inverseDefaultRotation;
joint.name = fbxModel.name;
joint.bindTransformFoundInCluster = false;
// If needed, separately calculate the FBX-specific transform used for inverse bind transform calculations
glm::mat4 transformForCluster;
if (applyUpAxisZRotation) {
const glm::quat jointBindCombinedRotation = fbxModel.preRotation * fbxModel.rotation * fbxModel.postRotation;
const glm::mat4 localTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform;
if (fbxModel.parentIndex != -1 && fbxModel.parentIndex < (int)jointIndex && !needMixamoHack) {
const glm::mat4& parenttransformForCluster = transformForClusters[fbxModel.parentIndex];
transformForCluster = parenttransformForCluster * localTransformForCluster;
} else {
transformForCluster = localTransformForCluster;
}
} else {
transformForCluster = joint.transform;
}
transformForClusters.push_back(transformForCluster);
// Initialize animation information next
// And also get the joint poses from the first frame of the animation, if present
QString rotationID = localRotations.value(modelID);
AnimationCurve xRotCurve = animationCurves.value(xComponents.value(rotationID));
@ -1355,13 +1404,10 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
joint.translation = hfmModel.animationFrames[i].translations[jointIndex];
joint.rotation = hfmModel.animationFrames[i].rotations[jointIndex];
}
}
hfmModel.joints.append(joint);
}
// NOTE: shapeVertices are in joint-frame
hfmModel.shapeVertices.resize(std::max(1, hfmModel.joints.size()) );
hfmModel.joints.push_back(joint);
}
hfmModel.bindExtents.reset();
hfmModel.meshExtents.reset();
@ -1400,233 +1446,202 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
}
#endif
hfmModel.materials = _hfmMaterials;
std::unordered_map<std::string, uint32_t> materialNameToID;
for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) {
materialNameToID[materialIt.key().toStdString()] = (uint32_t)hfmModel.materials.size();
hfmModel.materials.push_back(materialIt.value());
}
// see if any materials have texture children
bool materialsHaveTextures = checkMaterialsHaveTextures(_hfmMaterials, _textureFilenames, _connectionChildMap);
for (QMap<QString, ExtractedMesh>::iterator it = meshes.begin(); it != meshes.end(); it++) {
ExtractedMesh& extracted = it.value();
const QString& meshID = it.key();
const ExtractedMesh& extracted = it.value();
const auto& partMaterialTextures = extracted.partMaterialTextures;
extracted.mesh.meshExtents.reset();
uint32_t meshIndex = (uint32_t)hfmModel.meshes.size();
meshIDsToMeshIndices.insert(meshID, meshIndex);
hfmModel.meshes.push_back(extracted.mesh);
hfm::Mesh& mesh = hfmModel.meshes.back();
// accumulate local transforms
QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key());
glm::mat4 modelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url);
// compute the mesh extents from the transformed vertices
foreach (const glm::vec3& vertex, extracted.mesh.vertices) {
glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f));
hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex);
hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex);
extracted.mesh.meshExtents.minimum = glm::min(extracted.mesh.meshExtents.minimum, transformedVertex);
extracted.mesh.meshExtents.maximum = glm::max(extracted.mesh.meshExtents.maximum, transformedVertex);
extracted.mesh.modelTransform = modelTransform;
}
// look for textures, material properties
// allocate the Part material library
// NOTE: extracted.partMaterialTextures is empty for FBX_DRACO_MESH_VERSION >= 2. In that case, the mesh part's materialID string is already defined.
int materialIndex = 0;
int textureIndex = 0;
QList<QString> children = _connectionChildMap.values(modelID);
for (int i = children.size() - 1; i >= 0; i--) {
const QString& childID = children.at(i);
if (_hfmMaterials.contains(childID)) {
// the pure material associated with this part
HFMMaterial material = _hfmMaterials.value(childID);
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
if (extracted.partMaterialTextures.at(j).first == materialIndex) {
HFMMeshPart& part = extracted.mesh.parts[j];
part.materialID = material.materialID;
}
}
materialIndex++;
} else if (_textureFilenames.contains(childID)) {
// NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale")
// I'm leaving the second parameter blank right now as this code may never be used.
HFMTexture texture = getTexture(childID, "");
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
int partTexture = extracted.partMaterialTextures.at(j).second;
if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) {
// TODO: DO something here that replaces this legacy code
// Maybe create a material just for this part with the correct textures?
// extracted.mesh.parts[j].diffuseTexture = texture;
}
}
textureIndex++;
}
}
// find the clusters with which the mesh is associated
QVector<QString> clusterIDs;
foreach (const QString& childID, _connectionChildMap.values(it.key())) {
foreach (const QString& clusterID, _connectionChildMap.values(childID)) {
if (!clusters.contains(clusterID)) {
continue;
}
HFMCluster hfmCluster;
const Cluster& cluster = clusters[clusterID];
clusterIDs.append(clusterID);
// see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion
// of skinning information in FBX
QString jointID = _connectionChildMap.value(clusterID);
hfmCluster.jointIndex = modelIDs.indexOf(jointID);
if (hfmCluster.jointIndex == -1) {
qCDebug(modelformat) << "Joint not in model list: " << jointID;
hfmCluster.jointIndex = 0;
}
hfmCluster.inverseBindMatrix = glm::inverse(cluster.transformLink) * modelTransform;
// slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and
// sometimes floating point fuzz can be introduced after the inverse.
hfmCluster.inverseBindMatrix[0][3] = 0.0f;
hfmCluster.inverseBindMatrix[1][3] = 0.0f;
hfmCluster.inverseBindMatrix[2][3] = 0.0f;
hfmCluster.inverseBindMatrix[3][3] = 1.0f;
hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix);
extracted.mesh.clusters.append(hfmCluster);
// override the bind rotation with the transform link
HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex];
joint.inverseBindRotation = glm::inverse(extractRotation(cluster.transformLink));
joint.bindTransform = cluster.transformLink;
joint.bindTransformFoundInCluster = true;
// update the bind pose extents
glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform);
hfmModel.bindExtents.addPoint(bindTranslation);
}
}
// the last cluster is the root cluster
{
HFMCluster cluster;
cluster.jointIndex = modelIDs.indexOf(modelID);
if (cluster.jointIndex == -1) {
std::vector<QString> instanceModelIDs = getModelIDsForMeshID(meshID, fbxModels, _connectionParentMap);
// meshShapes will be added to hfmModel at the very end
std::vector<hfm::Shape> meshShapes;
meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size());
for (const QString& modelID : instanceModelIDs) {
// The transform node has the same indexing order as the joints
int indexOfModelID = modelIDs.indexOf(modelID);
if (indexOfModelID == -1) {
qCDebug(modelformat) << "Model not in model list: " << modelID;
cluster.jointIndex = 0;
}
extracted.mesh.clusters.append(cluster);
}
const uint32_t transformIndex = (indexOfModelID == -1) ? 0 : (uint32_t)indexOfModelID;
// whether we're skinned depends on how many clusters are attached
if (clusterIDs.size() > 1) {
// this is a multi-mesh joint
const int WEIGHTS_PER_VERTEX = 4;
int numClusterIndices = extracted.mesh.vertices.size() * WEIGHTS_PER_VERTEX;
extracted.mesh.clusterIndices.fill(extracted.mesh.clusters.size() - 1, numClusterIndices);
QVector<float> weightAccumulators;
weightAccumulators.fill(0.0f, numClusterIndices);
// partShapes will be added to meshShapes at the very end
std::vector<hfm::Shape> partShapes { mesh.parts.size() };
for (uint32_t i = 0; i < (uint32_t)partShapes.size(); ++i) {
hfm::Shape& shape = partShapes[i];
shape.mesh = meshIndex;
shape.meshPart = i;
shape.joint = transformIndex;
}
for (int i = 0; i < clusterIDs.size(); i++) {
QString clusterID = clusterIDs.at(i);
const Cluster& cluster = clusters[clusterID];
const HFMCluster& hfmCluster = extracted.mesh.clusters.at(i);
int jointIndex = hfmCluster.jointIndex;
HFMJoint& joint = hfmModel.joints[jointIndex];
// For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures
if (!partMaterialTextures.empty()) {
int materialIndex = 0;
int textureIndex = 0;
QList<QString> children = _connectionChildMap.values(modelID);
for (int i = children.size() - 1; i >= 0; i--) {
const QString& childID = children.at(i);
if (_hfmMaterials.contains(childID)) {
// the pure material associated with this part
const HFMMaterial& material = _hfmMaterials.value(childID);
for (int j = 0; j < partMaterialTextures.size(); j++) {
if (partMaterialTextures.at(j).first == materialIndex) {
hfm::Shape& shape = partShapes[j];
shape.material = materialNameToID[material.materialID.toStdString()];
}
}
materialIndex++;
} else if (_textureFilenames.contains(childID)) {
// NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale")
// I'm leaving the second parameter blank right now as this code may never be used.
HFMTexture texture = getTexture(childID, "");
for (int j = 0; j < partMaterialTextures.size(); j++) {
int partTexture = partMaterialTextures.at(j).second;
if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) {
// TODO: DO something here that replaces this legacy code
// Maybe create a material just for this part with the correct textures?
// material.albedoTexture = texture;
// partShapes[j].material = materialIndex;
}
}
textureIndex++;
}
}
}
// For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart
if (!extracted.materialIDPerMeshPart.empty()) {
assert(partShapes.size() == extracted.materialIDPerMeshPart.size());
for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) {
hfm::Shape& shape = partShapes[i];
const std::string& materialID = extracted.materialIDPerMeshPart[i];
auto materialIt = materialNameToID.find(materialID);
if (materialIt != materialNameToID.end()) {
shape.material = materialIt->second;
}
}
}
glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform;
ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex);
// find the clusters with which the mesh is associated
QVector<QString> clusterIDs;
for (const QString& childID : _connectionChildMap.values(meshID)) {
for (const QString& clusterID : _connectionChildMap.values(childID)) {
if (!fbxClusters.contains(clusterID)) {
continue;
}
clusterIDs.append(clusterID);
}
}
for (int j = 0; j < cluster.indices.size(); j++) {
int oldIndex = cluster.indices.at(j);
float weight = cluster.weights.at(j);
for (QMultiHash<int, int>::const_iterator it = extracted.newIndices.constFind(oldIndex);
// whether we're skinned depends on how many clusters are attached
if (clusterIDs.size() > 0) {
hfm::SkinDeformer skinDeformer;
auto& clusters = skinDeformer.clusters;
for (const auto& clusterID : clusterIDs) {
HFMCluster hfmCluster;
const Cluster& fbxCluster = fbxClusters[clusterID];
// see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion
// of skinning information in FBX
QString jointID = _connectionChildMap.value(clusterID);
int indexOfJointID = modelIDs.indexOf(jointID);
if (indexOfJointID == -1) {
qCDebug(modelformat) << "Joint not in model list: " << jointID;
hfmCluster.jointIndex = 0;
} else {
hfmCluster.jointIndex = (uint32_t)indexOfJointID;
}
const glm::mat4& transformForCluster = transformForClusters[transformIndex];
hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * transformForCluster;
// slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and
// sometimes floating point fuzz can be introduced after the inverse.
hfmCluster.inverseBindMatrix[0][3] = 0.0f;
hfmCluster.inverseBindMatrix[1][3] = 0.0f;
hfmCluster.inverseBindMatrix[2][3] = 0.0f;
hfmCluster.inverseBindMatrix[3][3] = 1.0f;
hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix);
clusters.push_back(hfmCluster);
// override the bind rotation with the transform link
HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex];
joint.inverseBindRotation = glm::inverse(extractRotation(fbxCluster.transformLink));
joint.bindTransform = fbxCluster.transformLink;
joint.bindTransformFoundInCluster = true;
// update the bind pose extents
glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform);
hfmModel.bindExtents.addPoint(bindTranslation);
}
// the last cluster is the root cluster
HFMCluster cluster;
cluster.jointIndex = transformIndex;
clusters.push_back(cluster);
// Skinned mesh instances have an hfm::SkinDeformer
std::vector<hfm::SkinCluster> skinClusters;
for (const auto& clusterID : clusterIDs) {
const Cluster& fbxCluster = fbxClusters[clusterID];
skinClusters.emplace_back();
hfm::SkinCluster& skinCluster = skinClusters.back();
size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size());
skinCluster.indices.reserve(indexWeightPairs);
skinCluster.weights.reserve(indexWeightPairs);
for (int j = 0; j < fbxCluster.indices.size(); j++) {
int oldIndex = fbxCluster.indices.at(j);
float weight = fbxCluster.weights.at(j);
for (QMultiHash<int, int>::const_iterator it = extracted.newIndices.constFind(oldIndex);
it != extracted.newIndices.end() && it.key() == oldIndex; it++) {
int newIndex = it.value();
int newIndex = it.value();
// remember vertices with at least 1/4 weight
// FIXME: vertices with no weightpainting won't get recorded here
const float EXPANSION_WEIGHT_THRESHOLD = 0.25f;
if (weight >= EXPANSION_WEIGHT_THRESHOLD) {
// transform to joint-frame and save for later
const glm::mat4 vertexTransform = meshToJoint * glm::translate(extracted.mesh.vertices.at(newIndex));
points.push_back(extractTranslation(vertexTransform));
}
// look for an unused slot in the weights vector
int weightIndex = newIndex * WEIGHTS_PER_VERTEX;
int lowestIndex = -1;
float lowestWeight = FLT_MAX;
int k = 0;
for (; k < WEIGHTS_PER_VERTEX; k++) {
if (weightAccumulators[weightIndex + k] == 0.0f) {
extracted.mesh.clusterIndices[weightIndex + k] = i;
weightAccumulators[weightIndex + k] = weight;
break;
}
if (weightAccumulators[weightIndex + k] < lowestWeight) {
lowestIndex = k;
lowestWeight = weightAccumulators[weightIndex + k];
}
}
if (k == WEIGHTS_PER_VERTEX && weight > lowestWeight) {
// no space for an additional weight; we must replace the lowest
weightAccumulators[weightIndex + lowestIndex] = weight;
extracted.mesh.clusterIndices[weightIndex + lowestIndex] = i;
skinCluster.indices.push_back(newIndex);
skinCluster.weights.push_back(weight);
}
}
}
}
// now that we've accumulated the most relevant weights for each vertex
// normalize and compress to 16-bits
extracted.mesh.clusterWeights.fill(0, numClusterIndices);
int numVertices = extracted.mesh.vertices.size();
for (int i = 0; i < numVertices; ++i) {
int j = i * WEIGHTS_PER_VERTEX;
// normalize weights into uint16_t
float totalWeight = 0.0f;
for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) {
totalWeight += weightAccumulators[k];
}
const float ALMOST_HALF = 0.499f;
if (totalWeight > 0.0f) {
float weightScalingFactor = (float)(UINT16_MAX) / totalWeight;
for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) {
extracted.mesh.clusterWeights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF);
// It seems odd that this mesh-related code should be inside of the for loop for instanced model IDs.
// However, in practice, skinned FBX models appear to not be instanced, as the skinning includes both the weights and joints.
{
hfm::ReweightedDeformers reweightedDeformers = hfm::getReweightedDeformers(mesh.vertices.size(), skinClusters);
if (reweightedDeformers.trimmedToMatch) {
qDebug(modelformat) << "FBXSerializer -- The number of indices and weights for a skinning deformer had different sizes and have been trimmed to match";
}
} else {
extracted.mesh.clusterWeights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF);
mesh.clusterIndices = std::move(reweightedDeformers.indices);
mesh.clusterWeights = std::move(reweightedDeformers.weights);
mesh.clusterWeightsPerVertex = reweightedDeformers.weightsPerVertex;
}
// Store the model's dynamic transform, and put its ID in the shapes
uint32_t skinDeformerID = (uint32_t)hfmModel.skinDeformers.size();
hfmModel.skinDeformers.push_back(skinDeformer);
for (hfm::Shape& shape : partShapes) {
shape.skinDeformer = skinDeformerID;
}
}
} else {
// this is a single-joint mesh
const HFMCluster& firstHFMCluster = extracted.mesh.clusters.at(0);
int jointIndex = firstHFMCluster.jointIndex;
HFMJoint& joint = hfmModel.joints[jointIndex];
// transform cluster vertices to joint-frame and save for later
glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform;
ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex);
foreach (const glm::vec3& vertex, extracted.mesh.vertices) {
const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertex);
points.push_back(extractTranslation(vertexTransform));
}
// Apply geometric offset, if present, by transforming the vertices directly
if (joint.hasGeometricOffset) {
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation);
for (int i = 0; i < extracted.mesh.vertices.size(); i++) {
extracted.mesh.vertices[i] = transformPoint(geometricOffset, extracted.mesh.vertices[i]);
}
}
// Store the parts for this mesh (or instance of this mesh, as the case may be)
meshShapes.insert(meshShapes.cend(), partShapes.cbegin(), partShapes.cend());
}
hfmModel.meshes.append(extracted.mesh);
int meshIndex = hfmModel.meshes.size() - 1;
meshIDsToMeshIndices.insert(it.key(), meshIndex);
// Store the shapes for the mesh (or multiple instances of the mesh, as the case may be)
hfmModel.shapes.insert(hfmModel.shapes.cend(), meshShapes.cbegin(), meshShapes.cend());
}
// attempt to map any meshes to a named model
@ -1645,14 +1660,6 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
}
}
if (applyUpAxisZRotation) {
hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation));
hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation));
for (auto &mesh : hfmModelPtr->meshes) {
mesh.modelTransform *= glm::mat4_cast(upAxisZRotation);
mesh.meshExtents.transform(glm::mat4_cast(upAxisZRotation));
}
}
return hfmModelPtr;
}

View file

@ -100,7 +100,15 @@ public:
{}
};
class ExtractedMesh;
class ExtractedMesh {
public:
hfm::Mesh mesh;
std::vector<std::string> materialIDPerMeshPart;
QMultiHash<int, int> newIndices;
QVector<QHash<int, int> > blendshapeIndexMaps;
QVector<QPair<int, int> > partMaterialTextures;
QHash<QString, size_t> texcoordSetMap;
};
class FBXSerializer : public HFMSerializer {
public:

View file

@ -355,7 +355,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
// Check for additional metadata
unsigned int dracoMeshNodeVersion = 1;
std::vector<QString> dracoMaterialList;
std::vector<std::string> dracoMaterialList;
for (const auto& dracoChild : child.children) {
if (dracoChild.name == "FBXDracoMeshVersion") {
if (!dracoChild.properties.isEmpty()) {
@ -364,7 +364,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
} else if (dracoChild.name == "MaterialList") {
dracoMaterialList.reserve(dracoChild.properties.size());
for (const auto& materialID : dracoChild.properties) {
dracoMaterialList.push_back(materialID.toString());
dracoMaterialList.push_back(materialID.toString().toStdString());
}
}
}
@ -486,21 +486,20 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
// grab or setup the HFMMeshPart for the part this face belongs to
int& partIndexPlusOne = materialTextureParts[materialTexture];
if (partIndexPlusOne == 0) {
data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1);
HFMMeshPart& part = data.extracted.mesh.parts.back();
data.extracted.mesh.parts.emplace_back();
// Figure out what material this part is
// Figure out if this is the older way of defining the per-part material for baked FBX
if (dracoMeshNodeVersion >= 2) {
// Define the materialID now
if (materialID < dracoMaterialList.size()) {
part.materialID = dracoMaterialList[materialID];
}
// Define the materialID for this mesh part index
uint16_t safeMaterialID = materialID < dracoMaterialList.size() ? materialID : 0;
data.extracted.materialIDPerMeshPart.push_back(dracoMaterialList[safeMaterialID].c_str());
} else {
// Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap
data.extracted.partMaterialTextures.append(materialTexture);
}
// in dracoMeshNodeVersion >= 2, fbx meshes have their per-part materials already defined in data.extracted.materialIDPerMeshPart
partIndexPlusOne = data.extracted.mesh.parts.size();
partIndexPlusOne = (int)data.extracted.mesh.parts.size();
}
// give the mesh part this index
@ -535,7 +534,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
if (partIndex == 0) {
data.extracted.partMaterialTextures.append(materialTexture);
data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1);
partIndex = data.extracted.mesh.parts.size();
partIndex = (int)data.extracted.mesh.parts.size();
}
HFMMeshPart& part = data.extracted.mesh.parts[partIndex - 1];

View file

@ -77,7 +77,7 @@ FST* FST::createFSTFromModel(const QString& fstPath, const QString& modelFilePat
mapping.insert(JOINT_FIELD, joints);
QVariantHash jointIndices;
for (int i = 0; i < hfmModel.joints.size(); i++) {
for (size_t i = 0; i < (size_t)hfmModel.joints.size(); i++) {
jointIndices.insert(hfmModel.joints.at(i).name, QString::number(i));
}
mapping.insert(JOINT_INDEX_FIELD, jointIndices);

File diff suppressed because it is too large Load diff

View file

@ -38,15 +38,15 @@ struct GLTFAsset {
struct GLTFNode {
QString name;
int camera;
int mesh;
int camera{ -1 };
int mesh{ -1 };
QVector<int> children;
QVector<double> translation;
QVector<double> rotation;
QVector<double> scale;
QVector<double> matrix;
QVector<glm::mat4> transforms;
int skin;
glm::mat4 transform;
int skin { -1 };
QVector<int> skeletons;
QString jointName;
QMap<QString, bool> defined;
@ -85,6 +85,8 @@ struct GLTFNode {
qCDebug(modelformat) << "skeletons: " << skeletons;
}
}
void normalizeTransform();
};
// Meshes
@ -460,15 +462,56 @@ struct GLTFMaterial {
// Accesors
namespace GLTFAccessorType {
enum Values {
SCALAR = 0,
VEC2,
VEC3,
VEC4,
MAT2,
MAT3,
MAT4
enum Value {
SCALAR = 1,
VEC2 = 2,
VEC3 = 3,
VEC4 = 4,
MAT2 = 5,
MAT3 = 9,
MAT4 = 16
};
inline int count(Value value) {
if (value == MAT2) {
return 4;
}
return (int)value;
}
}
namespace GLTFVertexAttribute {
enum Value {
UNKNOWN = -1,
POSITION = 0,
NORMAL,
TANGENT,
TEXCOORD_0,
TEXCOORD_1,
COLOR_0,
JOINTS_0,
WEIGHTS_0,
};
inline Value fromString(const QString& key) {
if (key == "POSITION") {
return POSITION;
} else if (key == "NORMAL") {
return NORMAL;
} else if (key == "TANGENT") {
return TANGENT;
} else if (key == "TEXCOORD_0") {
return TEXCOORD_0;
} else if (key == "TEXCOORD_1") {
return TEXCOORD_1;
} else if (key == "COLOR_0") {
return COLOR_0;
} else if (key == "JOINTS_0") {
return JOINTS_0;
} else if (key == "WEIGHTS_0") {
return WEIGHTS_0;
}
return UNKNOWN;
}
}
namespace GLTFAccessorComponentType {
enum Values {
@ -760,6 +803,13 @@ struct GLTFFile {
foreach(auto tex, textures) tex.dump();
}
}
void populateMaterialNames();
void sortNodes();
void normalizeNodeTransforms();
private:
void reorderNodes(const std::unordered_map<int, int>& reorderMap);
};
class GLTFSerializer : public QObject, public HFMSerializer {
@ -774,7 +824,7 @@ private:
hifi::URL _url;
hifi::ByteArray _glbBinary;
glm::mat4 getModelTransform(const GLTFNode& node);
const glm::mat4& getModelTransform(const GLTFNode& node);
void getSkinInverseBindMatrices(std::vector<std::vector<float>>& inverseBindMatrixValues);
void generateTargetData(int index, float weight, QVector<glm::vec3>& returnVector);
@ -843,6 +893,9 @@ private:
template <typename T>
bool addArrayFromAccessor(GLTFAccessor& accessor, QVector<T>& outarray);
template <typename T>
bool addArrayFromAttribute(GLTFVertexAttribute::Value vertexAttribute, GLTFAccessor& accessor, QVector<T>& outarray);
void retriangulate(const QVector<int>& in_indices, const QVector<glm::vec3>& in_vertices,
const QVector<glm::vec3>& in_normals, QVector<int>& out_indices,
QVector<glm::vec3>& out_vertices, QVector<glm::vec3>& out_normals);

View file

@ -174,11 +174,6 @@ glm::vec2 OBJTokenizer::getVec2() {
return v;
}
void setMeshPartDefaults(HFMMeshPart& meshPart, QString materialID) {
meshPart.materialID = materialID;
}
// OBJFace
// NOTE (trent, 7/20/17): The vertexColors vector being passed-in isn't necessary here, but I'm just
// pairing it with the vertices vector for consistency.
@ -492,8 +487,7 @@ bool OBJSerializer::parseOBJGroup(OBJTokenizer& tokenizer, const hifi::VariantHa
float& scaleGuess, bool combineParts) {
FaceGroup faces;
HFMMesh& mesh = hfmModel.meshes[0];
mesh.parts.append(HFMMeshPart());
HFMMeshPart& meshPart = mesh.parts.last();
mesh.parts.push_back(HFMMeshPart());
bool sawG = false;
bool result = true;
int originalFaceCountForDebugging = 0;
@ -501,8 +495,6 @@ bool OBJSerializer::parseOBJGroup(OBJTokenizer& tokenizer, const hifi::VariantHa
bool anyVertexColor { false };
int vertexCount { 0 };
setMeshPartDefaults(meshPart, QString("dontknow") + QString::number(mesh.parts.count()));
while (true) {
int tokenType = tokenizer.nextToken();
if (tokenType == OBJTokenizer::COMMENT_TOKEN) {
@ -675,17 +667,19 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
_url = url;
bool combineParts = mapping.value("combineParts").toBool();
hfmModel.meshExtents.reset();
hfmModel.meshes.append(HFMMesh());
hfmModel.meshes.push_back(HFMMesh());
std::vector<QString> materialNamePerShape;
try {
// call parseOBJGroup as long as it's returning true. Each successful call will
// add a new meshPart to the model's single mesh.
while (parseOBJGroup(tokenizer, mapping, hfmModel, scaleGuess, combineParts)) {}
HFMMesh& mesh = hfmModel.meshes[0];
mesh.meshIndex = 0;
uint32_t meshIndex = 0;
HFMMesh& mesh = hfmModel.meshes[meshIndex];
mesh.meshIndex = meshIndex;
uint32_t jointIndex = 0;
hfmModel.joints.resize(1);
hfmModel.joints[0].parentIndex = -1;
hfmModel.joints[0].distanceToParent = 0;
@ -697,19 +691,11 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
hfmModel.jointIndices["x"] = 1;
HFMCluster cluster;
cluster.jointIndex = 0;
cluster.inverseBindMatrix = glm::mat4(1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1);
mesh.clusters.append(cluster);
QMap<QString, int> materialMeshIdMap;
QVector<HFMMeshPart> hfmMeshParts;
for (int i = 0, meshPartCount = 0; i < mesh.parts.count(); i++, meshPartCount++) {
HFMMeshPart& meshPart = mesh.parts[i];
FaceGroup faceGroup = faceGroups[meshPartCount];
std::vector<HFMMeshPart> hfmMeshParts;
for (uint32_t meshPartIndex = 0; meshPartIndex < (uint32_t)mesh.parts.size(); ++meshPartIndex) {
HFMMeshPart& meshPart = mesh.parts[meshPartIndex];
FaceGroup faceGroup = faceGroups[meshPartIndex];
bool specifiesUV = false;
foreach(OBJFace face, faceGroup) {
// Go through all of the OBJ faces and determine the number of different materials necessary (each different material will be a unique mesh).
@ -718,12 +704,13 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
// Create a new HFMMesh for this material mapping.
materialMeshIdMap.insert(face.materialName, materialMeshIdMap.count());
hfmMeshParts.append(HFMMeshPart());
HFMMeshPart& meshPartNew = hfmMeshParts.last();
uint32_t partIndex = (int)hfmMeshParts.size();
hfmMeshParts.push_back(HFMMeshPart());
HFMMeshPart& meshPartNew = hfmMeshParts.back();
meshPartNew.quadIndices = QVector<int>(meshPart.quadIndices); // Copy over quad indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway].
meshPartNew.quadTrianglesIndices = QVector<int>(meshPart.quadTrianglesIndices); // Copy over quad triangulated indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway].
meshPartNew.triangleIndices = QVector<int>(meshPart.triangleIndices); // Copy over triangle indices.
// Do some of the material logic (which previously lived below) now.
// All the faces in the same group will have the same name and material.
QString groupMaterialName = face.materialName;
@ -745,19 +732,26 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
needsMaterialLibrary = groupMaterialName != SMART_DEFAULT_MATERIAL_NAME;
}
materials[groupMaterialName] = material;
meshPartNew.materialID = groupMaterialName;
}
materialNamePerShape.push_back(groupMaterialName);
hfm::Shape shape;
shape.mesh = meshIndex;
shape.joint = jointIndex;
shape.meshPart = partIndex;
hfmModel.shapes.push_back(shape);
}
}
}
// clean up old mesh parts.
int unmodifiedMeshPartCount = mesh.parts.count();
auto unmodifiedMeshPartCount = (uint32_t)mesh.parts.size();
mesh.parts.clear();
mesh.parts = QVector<HFMMeshPart>(hfmMeshParts);
mesh.parts = hfmMeshParts;
for (int i = 0, meshPartCount = 0; i < unmodifiedMeshPartCount; i++, meshPartCount++) {
FaceGroup faceGroup = faceGroups[meshPartCount];
for (uint32_t meshPartIndex = 0; meshPartIndex < unmodifiedMeshPartCount; meshPartIndex++) {
FaceGroup faceGroup = faceGroups[meshPartIndex];
// Now that each mesh has been created with its own unique material mappings, fill them with data (vertex data is duplicated, face data is not).
foreach(OBJFace face, faceGroup) {
@ -823,18 +817,13 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
}
}
}
mesh.meshExtents.reset();
foreach(const glm::vec3& vertex, mesh.vertices) {
mesh.meshExtents.addPoint(vertex);
hfmModel.meshExtents.addPoint(vertex);
}
// hfmDebugDump(hfmModel);
} catch(const std::exception& e) {
qCDebug(modelformat) << "OBJSerializer fail: " << e.what();
}
// At this point, the hfmModel joint, mesh, parts and shpaes have been defined
// only no material assigned
QString queryPart = _url.query();
bool suppressMaterialsHack = queryPart.contains("hifiusemat"); // If this appears in query string, don't fetch mtl even if used.
OBJMaterial& preDefinedMaterial = materials[SMART_DEFAULT_MATERIAL_NAME];
@ -886,17 +875,23 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
}
}
// As we are populating the material list in the hfmModel, let s also create the reverse map (from materialName to index)
QMap<QString, uint32_t> materialNameToIndex;
foreach (QString materialID, materials.keys()) {
OBJMaterial& objMaterial = materials[materialID];
if (!objMaterial.used) {
continue;
}
HFMMaterial& hfmMaterial = hfmModel.materials[materialID] = HFMMaterial(objMaterial.diffuseColor,
objMaterial.specularColor,
objMaterial.emissiveColor,
objMaterial.shininess,
objMaterial.opacity);
// capture the name to index map
materialNameToIndex[materialID] = (uint32_t) hfmModel.materials.size();
hfmModel.materials.emplace_back(objMaterial.diffuseColor,
objMaterial.specularColor,
objMaterial.emissiveColor,
objMaterial.shininess,
objMaterial.opacity);
HFMMaterial& hfmMaterial = hfmModel.materials.back();
hfmMaterial.name = materialID;
hfmMaterial.materialID = materialID;
@ -996,77 +991,16 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
modelMaterial->setOpacity(hfmMaterial.opacity);
}
// GO over the shapes once more to assign the material index correctly
for (uint32_t i = 0; i < (uint32_t)hfmModel.shapes.size(); ++i) {
const auto& materialName = materialNamePerShape[i];
if (!materialName.isEmpty()) {
auto foundMaterialIndex = materialNameToIndex.find(materialName);
if (foundMaterialIndex != materialNameToIndex.end()) {
hfmModel.shapes[i].material = foundMaterialIndex.value();
}
}
}
return hfmModelPtr;
}
void hfmDebugDump(const HFMModel& hfmModel) {
qCDebug(modelformat) << "---------------- hfmModel ----------------";
qCDebug(modelformat) << " hasSkeletonJoints =" << hfmModel.hasSkeletonJoints;
qCDebug(modelformat) << " offset =" << hfmModel.offset;
qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.count();
foreach (HFMMesh mesh, hfmModel.meshes) {
qCDebug(modelformat) << " vertices.count() =" << mesh.vertices.count();
qCDebug(modelformat) << " colors.count() =" << mesh.colors.count();
qCDebug(modelformat) << " normals.count() =" << mesh.normals.count();
/*if (mesh.normals.count() == mesh.vertices.count()) {
for (int i = 0; i < mesh.normals.count(); i++) {
qCDebug(modelformat) << " " << mesh.vertices[ i ] << mesh.normals[ i ];
}
}*/
qCDebug(modelformat) << " tangents.count() =" << mesh.tangents.count();
qCDebug(modelformat) << " colors.count() =" << mesh.colors.count();
qCDebug(modelformat) << " texCoords.count() =" << mesh.texCoords.count();
qCDebug(modelformat) << " texCoords1.count() =" << mesh.texCoords1.count();
qCDebug(modelformat) << " clusterIndices.count() =" << mesh.clusterIndices.count();
qCDebug(modelformat) << " clusterWeights.count() =" << mesh.clusterWeights.count();
qCDebug(modelformat) << " meshExtents =" << mesh.meshExtents;
qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform;
qCDebug(modelformat) << " parts.count() =" << mesh.parts.count();
foreach (HFMMeshPart meshPart, mesh.parts) {
qCDebug(modelformat) << " quadIndices.count() =" << meshPart.quadIndices.count();
qCDebug(modelformat) << " triangleIndices.count() =" << meshPart.triangleIndices.count();
/*
qCDebug(modelformat) << " diffuseColor =" << meshPart.diffuseColor << "mat =" << meshPart._material->getDiffuse();
qCDebug(modelformat) << " specularColor =" << meshPart.specularColor << "mat =" << meshPart._material->getMetallic();
qCDebug(modelformat) << " emissiveColor =" << meshPart.emissiveColor << "mat =" << meshPart._material->getEmissive();
qCDebug(modelformat) << " emissiveParams =" << meshPart.emissiveParams;
qCDebug(modelformat) << " gloss =" << meshPart.shininess << "mat =" << meshPart._material->getRoughness();
qCDebug(modelformat) << " opacity =" << meshPart.opacity << "mat =" << meshPart._material->getOpacity();
*/
qCDebug(modelformat) << " materialID =" << meshPart.materialID;
/* qCDebug(modelformat) << " diffuse texture =" << meshPart.diffuseTexture.filename;
qCDebug(modelformat) << " specular texture =" << meshPart.specularTexture.filename;
*/
}
qCDebug(modelformat) << " clusters.count() =" << mesh.clusters.count();
foreach (HFMCluster cluster, mesh.clusters) {
qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex;
qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix;
}
}
qCDebug(modelformat) << " jointIndices =" << hfmModel.jointIndices;
qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.count();
foreach (HFMJoint joint, hfmModel.joints) {
qCDebug(modelformat) << " parentIndex" << joint.parentIndex;
qCDebug(modelformat) << " distanceToParent" << joint.distanceToParent;
qCDebug(modelformat) << " translation" << joint.translation;
qCDebug(modelformat) << " preTransform" << joint.preTransform;
qCDebug(modelformat) << " preRotation" << joint.preRotation;
qCDebug(modelformat) << " rotation" << joint.rotation;
qCDebug(modelformat) << " postRotation" << joint.postRotation;
qCDebug(modelformat) << " postTransform" << joint.postTransform;
qCDebug(modelformat) << " transform" << joint.transform;
qCDebug(modelformat) << " rotationMin" << joint.rotationMin;
qCDebug(modelformat) << " rotationMax" << joint.rotationMax;
qCDebug(modelformat) << " inverseDefaultRotation" << joint.inverseDefaultRotation;
qCDebug(modelformat) << " inverseBindRotation" << joint.inverseBindRotation;
qCDebug(modelformat) << " bindTransform" << joint.bindTransform;
qCDebug(modelformat) << " name" << joint.name;
qCDebug(modelformat) << " isSkeletonJoint" << joint.isSkeletonJoint;
}
qCDebug(modelformat) << "\n";
}

View file

@ -120,6 +120,5 @@ private:
// What are these utilities doing here? One is used by fbx loading code in VHACD Utils, and the other a general debugging utility.
void setMeshPartDefaults(HFMMeshPart& meshPart, QString materialID);
void hfmDebugDump(const HFMModel& hfmModel);
#endif // hifi_OBJSerializer_h

View file

@ -76,7 +76,7 @@ QStringList HFMModel::getJointNames() const {
}
bool HFMModel::hasBlendedMeshes() const {
if (!meshes.isEmpty()) {
if (!meshes.empty()) {
foreach (const HFMMesh& mesh, meshes) {
if (!mesh.blendshapes.isEmpty()) {
return true;
@ -166,16 +166,16 @@ void HFMModel::computeKdops() {
glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3),
glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3)
};
if (joints.size() != (int)shapeVertices.size()) {
if (joints.size() != shapeVertices.size()) {
return;
}
// now that all joints have been scanned compute a k-Dop bounding volume of mesh
for (int i = 0; i < joints.size(); ++i) {
for (size_t i = 0; i < joints.size(); ++i) {
HFMJoint& joint = joints[i];
// NOTE: points are in joint-frame
ShapeVertices& points = shapeVertices.at(i);
glm::quat rotOffset = jointRotationOffsets.contains(i) ? glm::inverse(jointRotationOffsets[i]) : quat();
glm::quat rotOffset = jointRotationOffsets.contains((int)i) ? glm::inverse(jointRotationOffsets[(int)i]) : quat();
if (points.size() > 0) {
// compute average point
glm::vec3 avgPoint = glm::vec3(0.0f);
@ -208,3 +208,164 @@ void HFMModel::computeKdops() {
}
}
}
void hfm::Model::debugDump() const {
qCDebug(modelformat) << "---------------- hfmModel ----------------";
qCDebug(modelformat) << " hasSkeletonJoints =" << hasSkeletonJoints;
qCDebug(modelformat) << " offset =" << offset;
qCDebug(modelformat) << " neckPivot = " << neckPivot;
qCDebug(modelformat) << " bindExtents.size() = " << bindExtents.size();
qCDebug(modelformat) << " meshExtents.size() = " << meshExtents.size();
qCDebug(modelformat) << "---------------- Shapes ----------------";
qCDebug(modelformat) << " shapes.size() =" << shapes.size();
for (const hfm::Shape& shape : shapes) {
qCDebug(modelformat) << "\n";
qCDebug(modelformat) << " mesh =" << shape.mesh;
qCDebug(modelformat) << " meshPart =" << shape.meshPart;
qCDebug(modelformat) << " material =" << shape.material;
qCDebug(modelformat) << " joint =" << shape.joint;
qCDebug(modelformat) << " transformedExtents =" << shape.transformedExtents;
qCDebug(modelformat) << " skinDeformer =" << shape.skinDeformer;
}
qCDebug(modelformat) << " jointIndices.size() =" << jointIndices.size();
qCDebug(modelformat) << " joints.size() =" << joints.size();
qCDebug(modelformat) << "---------------- Meshes ----------------";
qCDebug(modelformat) << " meshes.size() =" << meshes.size();
qCDebug(modelformat) << " blendshapeChannelNames = " << blendshapeChannelNames;
for (const HFMMesh& mesh : meshes) {
qCDebug(modelformat) << "\n";
qCDebug(modelformat) << " meshpointer =" << mesh._mesh.get();
qCDebug(modelformat) << " meshindex =" << mesh.meshIndex;
qCDebug(modelformat) << " vertices.size() =" << mesh.vertices.size();
qCDebug(modelformat) << " colors.size() =" << mesh.colors.size();
qCDebug(modelformat) << " normals.size() =" << mesh.normals.size();
qCDebug(modelformat) << " tangents.size() =" << mesh.tangents.size();
qCDebug(modelformat) << " colors.size() =" << mesh.colors.size();
qCDebug(modelformat) << " texCoords.size() =" << mesh.texCoords.size();
qCDebug(modelformat) << " texCoords1.size() =" << mesh.texCoords1.size();
qCDebug(modelformat) << " clusterIndices.size() =" << mesh.clusterIndices.size();
qCDebug(modelformat) << " clusterWeights.size() =" << mesh.clusterWeights.size();
qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform;
qCDebug(modelformat) << " parts.size() =" << mesh.parts.size();
qCDebug(modelformat) << "---------------- Meshes (blendshapes)--------";
for (HFMBlendshape bshape : mesh.blendshapes) {
qCDebug(modelformat) << "\n";
qCDebug(modelformat) << " bshape.indices.size() =" << bshape.indices.size();
qCDebug(modelformat) << " bshape.vertices.size() =" << bshape.vertices.size();
qCDebug(modelformat) << " bshape.normals.size() =" << bshape.normals.size();
qCDebug(modelformat) << "\n";
}
qCDebug(modelformat) << "---------------- Meshes (meshparts)--------";
for (HFMMeshPart meshPart : mesh.parts) {
qCDebug(modelformat) << "\n";
qCDebug(modelformat) << " quadIndices.size() =" << meshPart.quadIndices.size();
qCDebug(modelformat) << " triangleIndices.size() =" << meshPart.triangleIndices.size();
qCDebug(modelformat) << "\n";
}
}
qCDebug(modelformat) << "---------------- AnimationFrames ----------------";
for (HFMAnimationFrame anim : animationFrames) {
qCDebug(modelformat) << " anim.translations = " << anim.translations;
qCDebug(modelformat) << " anim.rotations = " << anim.rotations;
}
QList<int> mitomona_keys = meshIndicesToModelNames.keys();
for (int key : mitomona_keys) {
qCDebug(modelformat) << " meshIndicesToModelNames key =" << key
<< " val =" << meshIndicesToModelNames[key];
}
qCDebug(modelformat) << "---------------- Materials ----------------";
for (HFMMaterial mat : materials) {
qCDebug(modelformat) << "\n";
qCDebug(modelformat) << " mat.materialID =" << mat.materialID;
qCDebug(modelformat) << " diffuseColor =" << mat.diffuseColor;
qCDebug(modelformat) << " diffuseFactor =" << mat.diffuseFactor;
qCDebug(modelformat) << " specularColor =" << mat.specularColor;
qCDebug(modelformat) << " specularFactor =" << mat.specularFactor;
qCDebug(modelformat) << " emissiveColor =" << mat.emissiveColor;
qCDebug(modelformat) << " emissiveFactor =" << mat.emissiveFactor;
qCDebug(modelformat) << " shininess =" << mat.shininess;
qCDebug(modelformat) << " opacity =" << mat.opacity;
qCDebug(modelformat) << " metallic =" << mat.metallic;
qCDebug(modelformat) << " roughness =" << mat.roughness;
qCDebug(modelformat) << " emissiveIntensity =" << mat.emissiveIntensity;
qCDebug(modelformat) << " ambientFactor =" << mat.ambientFactor;
qCDebug(modelformat) << " materialID =" << mat.materialID;
qCDebug(modelformat) << " name =" << mat.name;
qCDebug(modelformat) << " shadingModel =" << mat.shadingModel;
qCDebug(modelformat) << " _material =" << mat._material.get();
qCDebug(modelformat) << " normalTexture =" << mat.normalTexture.filename;
qCDebug(modelformat) << " albedoTexture =" << mat.albedoTexture.filename;
qCDebug(modelformat) << " opacityTexture =" << mat.opacityTexture.filename;
qCDebug(modelformat) << " lightmapParams =" << mat.lightmapParams;
qCDebug(modelformat) << " isPBSMaterial =" << mat.isPBSMaterial;
qCDebug(modelformat) << " useNormalMap =" << mat.useNormalMap;
qCDebug(modelformat) << " useAlbedoMap =" << mat.useAlbedoMap;
qCDebug(modelformat) << " useOpacityMap =" << mat.useOpacityMap;
qCDebug(modelformat) << " useRoughnessMap =" << mat.useRoughnessMap;
qCDebug(modelformat) << " useSpecularMap =" << mat.useSpecularMap;
qCDebug(modelformat) << " useMetallicMap =" << mat.useMetallicMap;
qCDebug(modelformat) << " useEmissiveMap =" << mat.useEmissiveMap;
qCDebug(modelformat) << " useOcclusionMap =" << mat.useOcclusionMap;
qCDebug(modelformat) << "\n";
}
qCDebug(modelformat) << "---------------- Joints ----------------";
for (const HFMJoint& joint : joints) {
qCDebug(modelformat) << "\n";
qCDebug(modelformat) << " shapeInfo.avgPoint =" << joint.shapeInfo.avgPoint;
qCDebug(modelformat) << " shapeInfo.debugLines =" << joint.shapeInfo.debugLines;
qCDebug(modelformat) << " shapeInfo.dots =" << joint.shapeInfo.dots;
qCDebug(modelformat) << " shapeInfo.points =" << joint.shapeInfo.points;
qCDebug(modelformat) << " ---";
qCDebug(modelformat) << " parentIndex" << joint.parentIndex;
qCDebug(modelformat) << " distanceToParent" << joint.distanceToParent;
qCDebug(modelformat) << " localTransform" << joint.localTransform;
qCDebug(modelformat) << " transform" << joint.transform;
qCDebug(modelformat) << " globalTransform" << joint.globalTransform;
qCDebug(modelformat) << " ---";
qCDebug(modelformat) << " translation" << joint.translation;
qCDebug(modelformat) << " preTransform" << joint.preTransform;
qCDebug(modelformat) << " preRotation" << joint.preRotation;
qCDebug(modelformat) << " rotation" << joint.rotation;
qCDebug(modelformat) << " postRotation" << joint.postRotation;
qCDebug(modelformat) << " postTransform" << joint.postTransform;
qCDebug(modelformat) << " rotationMin" << joint.rotationMin;
qCDebug(modelformat) << " rotationMax" << joint.rotationMax;
qCDebug(modelformat) << " inverseDefaultRotation" << joint.inverseDefaultRotation;
qCDebug(modelformat) << " inverseBindRotation" << joint.inverseBindRotation;
qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.bindTransformFoundInCluster;
qCDebug(modelformat) << " bindTransform" << joint.bindTransform;
qCDebug(modelformat) << " name" << joint.name;
qCDebug(modelformat) << " isSkeletonJoint" << joint.isSkeletonJoint;
qCDebug(modelformat) << " geometricOffset" << joint.geometricOffset;
qCDebug(modelformat) << "\n";
}
qCDebug(modelformat) << "------------- SkinDeformers ------------";
qCDebug(modelformat) << " skinDeformers.size() =" << skinDeformers.size();
for(const hfm::SkinDeformer& skinDeformer : skinDeformers) {
qCDebug(modelformat) << "------- SkinDeformers (Clusters) -------";
for (const hfm::Cluster& cluster : skinDeformer.clusters) {
qCDebug(modelformat) << "\n";
qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex;
qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix;
qCDebug(modelformat) << "\n";
}
}
qCDebug(modelformat) << "\n";
}

View file

@ -66,6 +66,8 @@ static const int DRACO_ATTRIBUTE_ORIGINAL_INDEX = DRACO_BEGIN_CUSTOM_HIFI_ATTRIB
// High Fidelity Model namespace
namespace hfm {
static const uint32_t UNDEFINED_KEY = (uint32_t)-1;
/// A single blendshape.
class Blendshape {
public:
@ -111,19 +113,22 @@ public:
bool isSkeletonJoint;
bool bindTransformFoundInCluster;
// geometric offset is applied in local space but does NOT affect children.
bool hasGeometricOffset;
glm::vec3 geometricTranslation;
glm::quat geometricRotation;
glm::vec3 geometricScaling;
// TODO: Apply hfm::Joint.geometricOffset to transforms in the model preparation step
glm::mat4 geometricOffset;
// globalTransform is the transform of the joint with all parent transforms applied, plus the geometric offset
glm::mat4 localTransform;
glm::mat4 globalTransform;
};
/// A single binding to a joint.
class Cluster {
public:
int jointIndex;
static const uint32_t INVALID_JOINT_INDEX { (uint32_t)-1 };
uint32_t jointIndex { INVALID_JOINT_INDEX };
glm::mat4 inverseBindMatrix;
Transform inverseBindTransform;
};
@ -155,8 +160,6 @@ public:
QVector<int> quadIndices; // original indices from the FBX mesh
QVector<int> quadTrianglesIndices; // original indices from the FBX mesh of the quad converted as triangles
QVector<int> triangleIndices; // original indices from the FBX mesh
QString materialID;
};
class Material {
@ -224,11 +227,20 @@ public:
bool needTangentSpace() const;
};
/// Simple Triangle List Mesh
struct TriangleListMesh {
std::vector<glm::vec3> vertices;
std::vector<uint32_t> indices;
std::vector<glm::ivec2> parts; // Offset in the indices, Number of indices
std::vector<Extents> partExtents; // Extents of each part with no transform applied. Same length as parts.
};
/// A single mesh (with optional blendshapes).
class Mesh {
public:
QVector<MeshPart> parts;
std::vector<MeshPart> parts;
QVector<glm::vec3> vertices;
QVector<glm::vec3> normals;
@ -236,21 +248,27 @@ public:
QVector<glm::vec3> colors;
QVector<glm::vec2> texCoords;
QVector<glm::vec2> texCoords1;
QVector<uint16_t> clusterIndices;
QVector<uint16_t> clusterWeights;
QVector<int32_t> originalIndices;
QVector<Cluster> clusters;
Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents)
glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints)
Extents meshExtents;
glm::mat4 modelTransform;
// Skinning cluster attributes
std::vector<uint16_t> clusterIndices;
std::vector<uint16_t> clusterWeights;
uint16_t clusterWeightsPerVertex { 0 };
// Blendshape attributes
QVector<Blendshape> blendshapes;
// Simple Triangle List Mesh generated during baking
hfm::TriangleListMesh triangleListMesh;
QVector<int32_t> originalIndices; // Original indices of the vertices
unsigned int meshIndex; // the order the meshes appeared in the object file
graphics::MeshPointer _mesh;
bool wasCompressed { false };
};
/// A single animation frame.
@ -287,6 +305,30 @@ public:
bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; }
};
// A different skinning representation, used by FBXSerializer. We convert this to our graphics-optimized runtime representation contained within the mesh.
class SkinCluster {
public:
std::vector<uint32_t> indices;
std::vector<float> weights;
};
class SkinDeformer {
public:
std::vector<Cluster> clusters;
};
// The lightweight model part description.
class Shape {
public:
uint32_t mesh { UNDEFINED_KEY };
uint32_t meshPart { UNDEFINED_KEY };
uint32_t material { UNDEFINED_KEY };
uint32_t joint { UNDEFINED_KEY }; // The hfm::Joint associated with this shape, containing transform information
// TODO: Have all serializers calculate hfm::Shape::transformedExtents in world space where they previously calculated hfm::Mesh::meshExtents. Change all code that uses hfm::Mesh::meshExtents to use this instead.
Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after transform information is applied, while not taking into account rigging/skinning
uint32_t skinDeformer { UNDEFINED_KEY };
};
/// The runtime model format.
class Model {
public:
@ -297,15 +339,18 @@ public:
QString author;
QString applicationName; ///< the name of the application that generated the model
QVector<Joint> joints;
std::vector<Shape> shapes;
std::vector<Mesh> meshes;
std::vector<Material> materials;
std::vector<SkinDeformer> skinDeformers;
std::vector<Joint> joints;
QHash<QString, int> jointIndices; ///< 1-based, so as to more easily detect missing indices
bool hasSkeletonJoints;
QVector<Mesh> meshes;
QVector<QString> scripts;
QHash<QString, Material> materials;
glm::mat4 offset; // This includes offset, rotation, and scale as specified by the FST file
glm::vec3 neckPivot;
@ -337,19 +382,12 @@ public:
QMap<int, glm::quat> jointRotationOffsets;
std::vector<ShapeVertices> shapeVertices;
FlowData flowData;
void debugDump() const;
};
};
class ExtractedMesh {
public:
hfm::Mesh mesh;
QMultiHash<int, int> newIndices;
QVector<QHash<int, int> > blendshapeIndexMaps;
QVector<QPair<int, int> > partMaterialTextures;
QHash<QString, size_t> texcoordSetMap;
};
typedef hfm::Blendshape HFMBlendshape;
typedef hfm::JointShapeInfo HFMJointShapeInfo;
typedef hfm::Joint HFMJoint;
@ -358,8 +396,10 @@ typedef hfm::Texture HFMTexture;
typedef hfm::MeshPart HFMMeshPart;
typedef hfm::Material HFMMaterial;
typedef hfm::Mesh HFMMesh;
typedef hfm::SkinDeformer HFMSkinDeformer;
typedef hfm::AnimationFrame HFMAnimationFrame;
typedef hfm::Light HFMLight;
typedef hfm::Shape HFMShape;
typedef hfm::Model HFMModel;
typedef hfm::FlowData FlowData;

View file

@ -0,0 +1,212 @@
//
// HFMModelMath.cpp
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2019/10/04.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "HFMModelMath.h"
#include <LogHandler.h>
#include <unordered_map>
#include <GLMHelpers.h>
#include <glm/gtx/hash.hpp>
namespace hfm {
void forEachIndex(const hfm::MeshPart& meshPart, std::function<void(uint32_t)> func) {
for (int i = 0; i <= meshPart.quadIndices.size() - 4; i += 4) {
func((uint32_t)meshPart.quadIndices[i]);
func((uint32_t)meshPart.quadIndices[i+1]);
func((uint32_t)meshPart.quadIndices[i+2]);
func((uint32_t)meshPart.quadIndices[i+3]);
}
for (int i = 0; i <= meshPart.triangleIndices.size() - 3; i += 3) {
func((uint32_t)meshPart.triangleIndices[i]);
func((uint32_t)meshPart.triangleIndices[i+1]);
func((uint32_t)meshPart.triangleIndices[i+2]);
}
}
void thickenFlatExtents(Extents& extents) {
// Add epsilon to extents to compensate for flat plane
extents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON);
extents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON);
}
void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh) {
triangleListMesh.partExtents.resize(triangleListMesh.parts.size());
for (size_t partIndex = 0; partIndex < triangleListMesh.parts.size(); ++partIndex) {
const auto& part = triangleListMesh.parts[partIndex];
auto& extents = triangleListMesh.partExtents[partIndex];
int partEnd = part.x + part.y;
for (int i = part.x; i < partEnd; ++i) {
auto index = triangleListMesh.indices[i];
const auto& position = triangleListMesh.vertices[index];
extents.addPoint(position);
}
}
}
void calculateExtentsForShape(hfm::Shape& shape, const std::vector<hfm::TriangleListMesh>& triangleListMeshes, const std::vector<hfm::Joint>& joints) {
auto& shapeExtents = shape.transformedExtents;
shapeExtents.reset();
const auto& triangleListMesh = triangleListMeshes[shape.mesh];
const auto& partExtent = triangleListMesh.partExtents[shape.meshPart];
const glm::mat4& transform = joints[shape.joint].transform;
shapeExtents = partExtent;
shapeExtents.transform(transform);
thickenFlatExtents(shapeExtents);
}
void calculateExtentsForModel(Extents& modelExtents, const std::vector<hfm::Shape>& shapes) {
modelExtents.reset();
for (size_t i = 0; i < shapes.size(); ++i) {
const auto& shape = shapes[i];
const auto& shapeExtents = shape.transformedExtents;
modelExtents.addExtents(shapeExtents);
}
}
ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector<hfm::SkinCluster> skinClusters, const uint16_t weightsPerVertex) {
ReweightedDeformers reweightedDeformers;
if (skinClusters.size() == 0) {
return reweightedDeformers;
}
size_t numClusterIndices = numMeshVertices * weightsPerVertex;
reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(skinClusters.size() - 1));
reweightedDeformers.weights.resize(numClusterIndices, 0);
reweightedDeformers.weightsPerVertex = weightsPerVertex;
std::vector<float> weightAccumulators;
weightAccumulators.resize(numClusterIndices, 0.0f);
for (uint16_t i = 0; i < (uint16_t)skinClusters.size(); ++i) {
const hfm::SkinCluster& skinCluster = skinClusters[i];
if (skinCluster.indices.size() != skinCluster.weights.size()) {
reweightedDeformers.trimmedToMatch = true;
}
size_t numIndicesOrWeights = std::min(skinCluster.indices.size(), skinCluster.weights.size());
for (size_t j = 0; j < numIndicesOrWeights; ++j) {
uint32_t index = skinCluster.indices[j];
float weight = skinCluster.weights[j];
// look for an unused slot in the weights vector
uint32_t weightIndex = index * weightsPerVertex;
uint32_t lowestIndex = -1;
float lowestWeight = FLT_MAX;
uint16_t k = 0;
for (; k < weightsPerVertex; k++) {
if (weightAccumulators[weightIndex + k] == 0.0f) {
reweightedDeformers.indices[weightIndex + k] = i;
weightAccumulators[weightIndex + k] = weight;
break;
}
if (weightAccumulators[weightIndex + k] < lowestWeight) {
lowestIndex = k;
lowestWeight = weightAccumulators[weightIndex + k];
}
}
if (k == weightsPerVertex && weight > lowestWeight) {
// no space for an additional weight; we must replace the lowest
weightAccumulators[weightIndex + lowestIndex] = weight;
reweightedDeformers.indices[weightIndex + lowestIndex] = i;
}
}
}
// now that we've accumulated the most relevant weights for each vertex
// normalize and compress to 16-bits
for (size_t i = 0; i < numMeshVertices; ++i) {
size_t j = i * weightsPerVertex;
// normalize weights into uint16_t
float totalWeight = 0.0f;
for (size_t k = j; k < j + weightsPerVertex; ++k) {
totalWeight += weightAccumulators[k];
}
const float ALMOST_HALF = 0.499f;
if (totalWeight > 0.0f) {
float weightScalingFactor = (float)(UINT16_MAX) / totalWeight;
for (size_t k = j; k < j + weightsPerVertex; ++k) {
reweightedDeformers.weights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF);
}
} else {
reweightedDeformers.weights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF);
}
}
return reweightedDeformers;
}
const TriangleListMesh generateTriangleListMesh(const std::vector<glm::vec3>& srcVertices, const std::vector<HFMMeshPart>& srcParts) {
TriangleListMesh dest;
// copy vertices for now
dest.vertices = srcVertices;
std::vector<uint32_t> oldToNewIndex(srcVertices.size());
{
std::unordered_map<glm::vec3, uint32_t> uniqueVertexToNewIndex;
int oldIndex = 0;
int newIndex = 0;
for (const auto& srcVertex : srcVertices) {
auto foundIndex = uniqueVertexToNewIndex.find(srcVertex);
if (foundIndex != uniqueVertexToNewIndex.end()) {
oldToNewIndex[oldIndex] = foundIndex->second;
} else {
uniqueVertexToNewIndex[srcVertex] = newIndex;
oldToNewIndex[oldIndex] = newIndex;
dest.vertices[newIndex] = srcVertex;
++newIndex;
}
++oldIndex;
}
if (uniqueVertexToNewIndex.size() < srcVertices.size()) {
dest.vertices.resize(uniqueVertexToNewIndex.size());
dest.vertices.shrink_to_fit();
}
}
auto newIndicesCount = 0;
for (const auto& part : srcParts) {
newIndicesCount += part.triangleIndices.size() + part.quadTrianglesIndices.size();
}
{
dest.indices.resize(newIndicesCount);
int i = 0;
for (const auto& part : srcParts) {
glm::ivec2 spart(i, 0);
for (const auto& qti : part.quadTrianglesIndices) {
dest.indices[i] = oldToNewIndex[qti];
++i;
}
for (const auto& ti : part.triangleIndices) {
dest.indices[i] = oldToNewIndex[ti];
++i;
}
spart.y = i - spart.x;
dest.parts.push_back(spart);
}
}
calculateExtentsForTriangleListMesh(dest);
return dest;
}
};

View file

@ -0,0 +1,45 @@
//
// HFMModelMath.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2019/10/04.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_hfm_ModelMath_h
#define hifi_hfm_ModelMath_h
#include "HFM.h"
namespace hfm {
void forEachIndex(const hfm::MeshPart& meshPart, std::function<void(uint32_t)> func);
void initializeExtents(Extents& extents);
void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh);
// This can't be moved to model-baker until
void calculateExtentsForShape(hfm::Shape& shape, const std::vector<hfm::TriangleListMesh>& triangleListMeshes, const std::vector<hfm::Joint>& joints);
void calculateExtentsForModel(Extents& modelExtents, const std::vector<hfm::Shape>& shapes);
struct ReweightedDeformers {
std::vector<uint16_t> indices;
std::vector<uint16_t> weights;
uint16_t weightsPerVertex { 0 };
bool trimmedToMatch { false };
};
const uint16_t DEFAULT_SKINNING_WEIGHTS_PER_VERTEX = 4;
ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector<hfm::SkinCluster> skinClusters, const uint16_t weightsPerVertex = DEFAULT_SKINNING_WEIGHTS_PER_VERTEX);
const TriangleListMesh generateTriangleListMesh(const std::vector<glm::vec3>& srcVertices, const std::vector<HFMMeshPart>& srcParts);
};
#endif // #define hifi_hfm_ModelMath_h

View file

@ -1,5 +1,5 @@
//
// FBXSerializer.h
// HFMSerializer.h
// libraries/hfm/src/hfm
//
// Created by Sabrina Shanman on 2018/11/07.

View file

@ -13,34 +13,61 @@
#include "BakerTypes.h"
#include "ModelMath.h"
#include "CollectShapeVerticesTask.h"
#include "BuildGraphicsMeshTask.h"
#include "CalculateMeshNormalsTask.h"
#include "CalculateMeshTangentsTask.h"
#include "CalculateBlendshapeNormalsTask.h"
#include "CalculateBlendshapeTangentsTask.h"
#include "PrepareJointsTask.h"
#include "CalculateTransformedExtentsTask.h"
#include "BuildDracoMeshTask.h"
#include "ParseFlowDataTask.h"
#include <hfm/HFMModelMath.h>
namespace baker {
class GetModelPartsTask {
public:
using Input = hfm::Model::Pointer;
using Output = VaryingSet5<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector<hfm::Joint>>;
using Output = VaryingSet9<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector<hfm::Joint>, std::vector<hfm::Shape>, std::vector<hfm::SkinDeformer>, Extents, std::vector<hfm::Material>>;
using JobModel = Job::ModelIO<GetModelPartsTask, Input, Output>;
void run(const BakeContextPointer& context, const Input& input, Output& output) {
const auto& hfmModelIn = input;
output.edit0() = hfmModelIn->meshes.toStdVector();
output.edit0() = hfmModelIn->meshes;
output.edit1() = hfmModelIn->originalURL;
output.edit2() = hfmModelIn->meshIndicesToModelNames;
auto& blendshapesPerMesh = output.edit3();
blendshapesPerMesh.reserve(hfmModelIn->meshes.size());
for (int i = 0; i < hfmModelIn->meshes.size(); i++) {
for (size_t i = 0; i < hfmModelIn->meshes.size(); i++) {
blendshapesPerMesh.push_back(hfmModelIn->meshes[i].blendshapes.toStdVector());
}
output.edit4() = hfmModelIn->joints.toStdVector();
output.edit4() = hfmModelIn->joints;
output.edit5() = hfmModelIn->shapes;
output.edit6() = hfmModelIn->skinDeformers;
output.edit7() = hfmModelIn->meshExtents;
output.edit8() = hfmModelIn->materials;
}
};
class BuildMeshTriangleListTask {
public:
using Input = std::vector<hfm::Mesh>;
using Output = std::vector<hfm::TriangleListMesh>;
using JobModel = Job::ModelIO<BuildMeshTriangleListTask, Input, Output>;
void run(const BakeContextPointer& context, const Input& input, Output& output) {
const auto& meshesIn = input;
auto& indexedTrianglesMeshOut = output;
indexedTrianglesMeshOut.clear();
indexedTrianglesMeshOut.resize(meshesIn.size());
for (size_t i = 0; i < meshesIn.size(); i++) {
auto& mesh = meshesIn[i];
const auto verticesStd = mesh.vertices.toStdVector();
indexedTrianglesMeshOut[i] = hfm::generateTriangleListMesh(verticesStd, mesh.parts);
}
}
};
@ -75,21 +102,23 @@ namespace baker {
class BuildMeshesTask {
public:
using Input = VaryingSet5<std::vector<hfm::Mesh>, std::vector<graphics::MeshPointer>, NormalsPerMesh, TangentsPerMesh, BlendshapesPerMesh>;
using Input = VaryingSet6<std::vector<hfm::Mesh>, std::vector<hfm::TriangleListMesh>, std::vector<graphics::MeshPointer>, NormalsPerMesh, TangentsPerMesh, BlendshapesPerMesh>;
using Output = std::vector<hfm::Mesh>;
using JobModel = Job::ModelIO<BuildMeshesTask, Input, Output>;
void run(const BakeContextPointer& context, const Input& input, Output& output) {
auto& meshesIn = input.get0();
int numMeshes = (int)meshesIn.size();
auto& graphicsMeshesIn = input.get1();
auto& normalsPerMeshIn = input.get2();
auto& tangentsPerMeshIn = input.get3();
auto& blendshapesPerMeshIn = input.get4();
auto& triangleListMeshesIn = input.get1();
auto& graphicsMeshesIn = input.get2();
auto& normalsPerMeshIn = input.get3();
auto& tangentsPerMeshIn = input.get4();
auto& blendshapesPerMeshIn = input.get5();
auto meshesOut = meshesIn;
for (int i = 0; i < numMeshes; i++) {
auto& meshOut = meshesOut[i];
meshOut.triangleListMesh = triangleListMeshesIn[i];
meshOut._mesh = safeGet(graphicsMeshesIn, i);
meshOut.normals = QVector<glm::vec3>::fromStdVector(safeGet(normalsPerMeshIn, i));
meshOut.tangents = QVector<glm::vec3>::fromStdVector(safeGet(tangentsPerMeshIn, i));
@ -101,17 +130,22 @@ namespace baker {
class BuildModelTask {
public:
using Input = VaryingSet6<hfm::Model::Pointer, std::vector<hfm::Mesh>, std::vector<hfm::Joint>, QMap<int, glm::quat>, QHash<QString, int>, FlowData>;
using Input = VaryingSet9<hfm::Model::Pointer, std::vector<hfm::Mesh>, std::vector<hfm::Joint>, QMap<int, glm::quat>, QHash<QString, int>, FlowData, std::vector<ShapeVertices>, std::vector<hfm::Shape>, Extents>;
using Output = hfm::Model::Pointer;
using JobModel = Job::ModelIO<BuildModelTask, Input, Output>;
void run(const BakeContextPointer& context, const Input& input, Output& output) {
auto hfmModelOut = input.get0();
hfmModelOut->meshes = QVector<hfm::Mesh>::fromStdVector(input.get1());
hfmModelOut->joints = QVector<hfm::Joint>::fromStdVector(input.get2());
hfmModelOut->meshes = input.get1();
hfmModelOut->joints = input.get2();
hfmModelOut->jointRotationOffsets = input.get3();
hfmModelOut->jointIndices = input.get4();
hfmModelOut->flowData = input.get5();
hfmModelOut->shapeVertices = input.get6();
hfmModelOut->shapes = input.get7();
hfmModelOut->meshExtents = input.get8();
// These depend on the ShapeVertices
// TODO: Create a task for this rather than calculating it here
hfmModelOut->computeKdops();
output = hfmModelOut;
}
@ -134,6 +168,10 @@ namespace baker {
const auto meshIndicesToModelNames = modelPartsIn.getN<GetModelPartsTask::Output>(2);
const auto blendshapesPerMeshIn = modelPartsIn.getN<GetModelPartsTask::Output>(3);
const auto jointsIn = modelPartsIn.getN<GetModelPartsTask::Output>(4);
const auto shapesIn = modelPartsIn.getN<GetModelPartsTask::Output>(5);
const auto skinDeformersIn = modelPartsIn.getN<GetModelPartsTask::Output>(6);
const auto modelExtentsIn = modelPartsIn.getN<GetModelPartsTask::Output>(7);
const auto materialsIn = modelPartsIn.getN<GetModelPartsTask::Output>(8);
// Calculate normals and tangents for meshes and blendshapes if they do not exist
// Note: Normals are never calculated here for OBJ models. OBJ files optionally define normals on a per-face basis, so for consistency normals are calculated beforehand in OBJSerializer.
@ -145,8 +183,15 @@ namespace baker {
const auto calculateBlendshapeTangentsInputs = CalculateBlendshapeTangentsTask::Input(normalsPerBlendshapePerMesh, blendshapesPerMeshIn, meshesIn).asVarying();
const auto tangentsPerBlendshapePerMesh = model.addJob<CalculateBlendshapeTangentsTask>("CalculateBlendshapeTangents", calculateBlendshapeTangentsInputs);
// Calculate shape vertices. These rely on the weight-normalized clusterIndices/clusterWeights in the mesh, and are used later for computing the joint kdops
const auto collectShapeVerticesInputs = CollectShapeVerticesTask::Input(meshesIn, shapesIn, jointsIn, skinDeformersIn).asVarying();
const auto shapeVerticesPerJoint = model.addJob<CollectShapeVerticesTask>("CollectShapeVertices", collectShapeVerticesInputs);
// Build the slim triangle list mesh for each hfm::mesh
const auto triangleListMeshes = model.addJob<BuildMeshTriangleListTask>("BuildMeshTriangleListTask", meshesIn);
// Build the graphics::MeshPointer for each hfm::Mesh
const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh).asVarying();
const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, skinDeformersIn).asVarying();
const auto graphicsMeshes = model.addJob<BuildGraphicsMeshTask>("BuildGraphicsMesh", buildGraphicsMeshInputs);
// Prepare joint information
@ -156,6 +201,12 @@ namespace baker {
const auto jointRotationOffsets = jointInfoOut.getN<PrepareJointsTask::Output>(1);
const auto jointIndices = jointInfoOut.getN<PrepareJointsTask::Output>(2);
// Use transform information to compute extents
const auto calculateExtentsInputs = CalculateTransformedExtentsTask::Input(modelExtentsIn, triangleListMeshes, shapesIn, jointsOut).asVarying();
const auto calculateExtentsOutputs = model.addJob<CalculateTransformedExtentsTask>("CalculateExtents", calculateExtentsInputs);
const auto modelExtentsOut = calculateExtentsOutputs.getN<CalculateTransformedExtentsTask::Output>(0);
const auto shapesOut = calculateExtentsOutputs.getN<CalculateTransformedExtentsTask::Output>(1);
// Parse material mapping
const auto parseMaterialMappingInputs = ParseMaterialMappingTask::Input(mapping, materialMappingBaseURL).asVarying();
const auto materialMapping = model.addJob<ParseMaterialMappingTask>("ParseMaterialMapping", parseMaterialMappingInputs);
@ -165,7 +216,7 @@ namespace baker {
// TODO: Tangent support (Needs changes to FBXSerializer_Mesh as well)
// NOTE: Due to an unresolved linker error, BuildDracoMeshTask is not functional on Android
// TODO: Figure out why BuildDracoMeshTask.cpp won't link with draco on Android
const auto buildDracoMeshInputs = BuildDracoMeshTask::Input(meshesIn, normalsPerMesh, tangentsPerMesh).asVarying();
const auto buildDracoMeshInputs = BuildDracoMeshTask::Input(shapesOut, meshesIn, materialsIn, normalsPerMesh, tangentsPerMesh).asVarying();
const auto buildDracoMeshOutputs = model.addJob<BuildDracoMeshTask>("BuildDracoMesh", buildDracoMeshInputs);
const auto dracoMeshes = buildDracoMeshOutputs.getN<BuildDracoMeshTask::Output>(0);
const auto dracoErrors = buildDracoMeshOutputs.getN<BuildDracoMeshTask::Output>(1);
@ -177,9 +228,9 @@ namespace baker {
// Combine the outputs into a new hfm::Model
const auto buildBlendshapesInputs = BuildBlendshapesTask::Input(blendshapesPerMeshIn, normalsPerBlendshapePerMesh, tangentsPerBlendshapePerMesh).asVarying();
const auto blendshapesPerMeshOut = model.addJob<BuildBlendshapesTask>("BuildBlendshapes", buildBlendshapesInputs);
const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, graphicsMeshes, normalsPerMesh, tangentsPerMesh, blendshapesPerMeshOut).asVarying();
const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, triangleListMeshes, graphicsMeshes, normalsPerMesh, tangentsPerMesh, blendshapesPerMeshOut).asVarying();
const auto meshesOut = model.addJob<BuildMeshesTask>("BuildMeshes", buildMeshesInputs);
const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData).asVarying();
const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData, shapeVerticesPerJoint, shapesOut, modelExtentsOut).asVarying();
const auto hfmModelOut = model.addJob<BuildModelTask>("BuildModel", buildModelInputs);
output = Output(hfmModelOut, materialMapping, dracoMeshes, dracoErrors, materialList);

View file

@ -36,6 +36,14 @@ namespace baker {
using TangentsPerBlendshape = std::vector<std::vector<glm::vec3>>;
using MeshIndicesToModelNames = QHash<int, QString>;
class ReweightedDeformers {
public:
std::vector<uint16_t> indices;
std::vector<uint16_t> weights;
uint16_t weightsPerVertex { 0 };
bool trimmedToMatch { false };
};
};
#endif // hifi_BakerTypes_h

View file

@ -39,19 +39,47 @@
#include "ModelMath.h"
#ifndef Q_OS_ANDROID
std::vector<hifi::ByteArray> createMaterialList(const hfm::Mesh& mesh) {
std::vector<hifi::ByteArray> materialList;
for (const auto& meshPart : mesh.parts) {
auto materialID = QVariant(meshPart.materialID).toByteArray();
const auto materialIt = std::find(materialList.cbegin(), materialList.cend(), materialID);
if (materialIt == materialList.cend()) {
materialList.push_back(materialID);
void reindexMaterials(const std::vector<uint32_t>& originalMaterialIndices, std::vector<uint32_t>& materials, std::vector<uint16_t>& materialIndices) {
materialIndices.resize(originalMaterialIndices.size());
for (size_t i = 0; i < originalMaterialIndices.size(); ++i) {
uint32_t material = originalMaterialIndices[i];
auto foundMaterial = std::find(materials.cbegin(), materials.cend(), material);
if (foundMaterial == materials.cend()) {
materials.push_back(material);
materialIndices[i] = (uint16_t)(materials.size() - 1);
} else {
materialIndices[i] = (uint16_t)(foundMaterial - materials.cbegin());
}
}
return materialList;
}
std::tuple<std::unique_ptr<draco::Mesh>, bool> createDracoMesh(const hfm::Mesh& mesh, const std::vector<glm::vec3>& normals, const std::vector<glm::vec3>& tangents, const std::vector<hifi::ByteArray>& materialList) {
void createMaterialLists(const std::vector<hfm::Shape>& shapes, const std::vector<hfm::Mesh>& meshes, const std::vector<hfm::Material>& hfmMaterials, std::vector<std::vector<hifi::ByteArray>>& materialIndexLists, std::vector<std::vector<uint16_t>>& partMaterialIndicesPerMesh) {
std::vector<std::vector<uint32_t>> materialsPerMesh;
for (const auto& mesh : meshes) {
materialsPerMesh.emplace_back(mesh.parts.size(), hfm::UNDEFINED_KEY);
}
for (const auto& shape : shapes) {
materialsPerMesh[shape.mesh][shape.meshPart] = shape.material;
}
materialIndexLists.resize(materialsPerMesh.size());
partMaterialIndicesPerMesh.resize(materialsPerMesh.size());
for (size_t i = 0; i < materialsPerMesh.size(); ++i) {
const std::vector<uint32_t>& materials = materialsPerMesh[i];
std::vector<uint32_t> uniqueMaterials;
reindexMaterials(materials, uniqueMaterials, partMaterialIndicesPerMesh[i]);
materialIndexLists[i].reserve(uniqueMaterials.size());
for (const uint32_t material : uniqueMaterials) {
const auto& hfmMaterial = hfmMaterials[material];
materialIndexLists[i].push_back(QVariant(hfmMaterial.materialID).toByteArray());
}
}
}
std::tuple<std::unique_ptr<draco::Mesh>, bool> createDracoMesh(const hfm::Mesh& mesh, const std::vector<glm::vec3>& normals, const std::vector<glm::vec3>& tangents, const std::vector<uint16_t>& partMaterialIndices) {
Q_ASSERT(normals.size() == 0 || (int)normals.size() == mesh.vertices.size());
Q_ASSERT(mesh.colors.size() == 0 || mesh.colors.size() == mesh.vertices.size());
Q_ASSERT(mesh.texCoords.size() == 0 || mesh.texCoords.size() == mesh.vertices.size());
@ -122,11 +150,9 @@ std::tuple<std::unique_ptr<draco::Mesh>, bool> createDracoMesh(const hfm::Mesh&
auto partIndex = 0;
draco::FaceIndex face;
uint16_t materialID;
for (auto& part : mesh.parts) {
auto materialIt = std::find(materialList.cbegin(), materialList.cend(), QVariant(part.materialID).toByteArray());
materialID = (uint16_t)(materialIt - materialList.cbegin());
uint16_t materialID = partMaterialIndices[partIndex];
auto addFace = [&](const QVector<int>& indices, int index, draco::FaceIndex face) {
int32_t idx0 = indices[index];
@ -214,30 +240,33 @@ void BuildDracoMeshTask::run(const baker::BakeContextPointer& context, const Inp
#ifdef Q_OS_ANDROID
qCWarning(model_baker) << "BuildDracoMesh is disabled on Android. Output meshes will be empty.";
#else
const auto& meshes = input.get0();
const auto& normalsPerMesh = input.get1();
const auto& tangentsPerMesh = input.get2();
const auto& shapes = input.get0();
const auto& meshes = input.get1();
const auto& materials = input.get2();
const auto& normalsPerMesh = input.get3();
const auto& tangentsPerMesh = input.get4();
auto& dracoBytesPerMesh = output.edit0();
auto& dracoErrorsPerMesh = output.edit1();
auto& materialLists = output.edit2();
std::vector<std::vector<uint16_t>> partMaterialIndicesPerMesh;
createMaterialLists(shapes, meshes, materials, materialLists, partMaterialIndicesPerMesh);
dracoBytesPerMesh.reserve(meshes.size());
// vector<bool> is an exception to the std::vector conventions as it is a bit field
// So a bool reference to an element doesn't work
dracoErrorsPerMesh.resize(meshes.size());
materialLists.reserve(meshes.size());
for (size_t i = 0; i < meshes.size(); i++) {
const auto& mesh = meshes[i];
const auto& normals = baker::safeGet(normalsPerMesh, i);
const auto& tangents = baker::safeGet(tangentsPerMesh, i);
dracoBytesPerMesh.emplace_back();
auto& dracoBytes = dracoBytesPerMesh.back();
materialLists.push_back(createMaterialList(mesh));
const auto& materialList = materialLists.back();
const auto& partMaterialIndices = partMaterialIndicesPerMesh[i];
bool dracoError;
std::unique_ptr<draco::Mesh> dracoMesh;
std::tie(dracoMesh, dracoError) = createDracoMesh(mesh, normals, tangents, materialList);
std::tie(dracoMesh, dracoError) = createDracoMesh(mesh, normals, tangents, partMaterialIndices);
dracoErrorsPerMesh[i] = dracoError;
if (dracoMesh) {

View file

@ -33,7 +33,7 @@ public:
class BuildDracoMeshTask {
public:
using Config = BuildDracoMeshConfig;
using Input = baker::VaryingSet3<std::vector<hfm::Mesh>, baker::NormalsPerMesh, baker::TangentsPerMesh>;
using Input = baker::VaryingSet5<std::vector<hfm::Shape>, std::vector<hfm::Mesh>, std::vector<hfm::Material>, baker::NormalsPerMesh, baker::TangentsPerMesh>;
using Output = baker::VaryingSet3<std::vector<hifi::ByteArray>, std::vector<bool>, std::vector<std::vector<hifi::ByteArray>>>;
using JobModel = baker::Job::ModelIO<BuildDracoMeshTask, Input, Output, Config>;

View file

@ -2,8 +2,8 @@
// BuildGraphicsMeshTask.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2018/12/06.
// Copyright 2018 High Fidelity, Inc.
// Created by Sabrina Shanman on 2019/09/16.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -15,6 +15,7 @@
#include <LogHandler.h>
#include "ModelBakerLogging.h"
#include <hfm/HFMModelMath.h>
#include "ModelMath.h"
using vec2h = glm::tvec2<glm::detail::hdata>;
@ -27,7 +28,7 @@ glm::vec3 normalizeDirForPacking(const glm::vec3& dir) {
return dir;
}
void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn) {
void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn, uint16_t numDeformerControllers) {
auto graphicsMesh = std::make_shared<graphics::Mesh>();
// Fill tangents with a dummy value to force tangents to be present if there are normals
@ -86,25 +87,24 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics
// Support for 4 skinning clusters:
// 4 Indices are uint8 ideally, uint16 if more than 256.
const auto clusterIndiceElement = (hfmMesh.clusters.size() < UINT8_MAX ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW));
const auto clusterIndiceElement = ((numDeformerControllers < (uint16_t)UINT8_MAX) ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW));
// 4 Weights are normalized 16bits
const auto clusterWeightElement = gpu::Element(gpu::VEC4, gpu::NUINT16, gpu::XYZW);
// Cluster indices and weights must be the same sizes
const int NUM_CLUSTERS_PER_VERT = 4;
const int numVertClusters = (hfmMesh.clusterIndices.size() == hfmMesh.clusterWeights.size() ? hfmMesh.clusterIndices.size() / NUM_CLUSTERS_PER_VERT : 0);
const int clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize();
const int clusterWeightsSize = numVertClusters * clusterWeightElement.getSize();
// Record cluster sizes
const size_t numVertClusters = hfmMesh.clusterWeightsPerVertex == 0 ? 0 : hfmMesh.clusterIndices.size() / hfmMesh.clusterWeightsPerVertex;
const size_t clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize();
const size_t clusterWeightsSize = numVertClusters * clusterWeightElement.getSize();
// Decide on where to put what seequencially in a big buffer:
const int positionsOffset = 0;
const int normalsAndTangentsOffset = positionsOffset + positionsSize;
const int colorsOffset = normalsAndTangentsOffset + normalsAndTangentsSize;
const int texCoordsOffset = colorsOffset + colorsSize;
const int texCoords1Offset = texCoordsOffset + texCoordsSize;
const int clusterIndicesOffset = texCoords1Offset + texCoords1Size;
const int clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize;
const int totalVertsSize = clusterWeightsOffset + clusterWeightsSize;
const size_t positionsOffset = 0;
const size_t normalsAndTangentsOffset = positionsOffset + positionsSize;
const size_t colorsOffset = normalsAndTangentsOffset + normalsAndTangentsSize;
const size_t texCoordsOffset = colorsOffset + colorsSize;
const size_t texCoords1Offset = texCoordsOffset + texCoordsSize;
const size_t clusterIndicesOffset = texCoords1Offset + texCoords1Size;
const size_t clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize;
const size_t totalVertsSize = clusterWeightsOffset + clusterWeightsSize;
// Copy all vertex data in a single buffer
auto vertBuffer = std::make_shared<gpu::Buffer>();
@ -181,22 +181,22 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics
// Clusters data
if (clusterIndicesSize > 0) {
if (hfmMesh.clusters.size() < UINT8_MAX) {
if (numDeformerControllers < (uint16_t)UINT8_MAX) {
// yay! we can fit the clusterIndices within 8-bits
int32_t numIndices = hfmMesh.clusterIndices.size();
QVector<uint8_t> clusterIndices;
clusterIndices.resize(numIndices);
int32_t numIndices = (int32_t)hfmMesh.clusterIndices.size();
std::vector<uint8_t> packedDeformerIndices;
packedDeformerIndices.resize(numIndices);
for (int32_t i = 0; i < numIndices; ++i) {
assert(hfmMesh.clusterIndices[i] <= UINT8_MAX);
clusterIndices[i] = (uint8_t)(hfmMesh.clusterIndices[i]);
packedDeformerIndices[i] = (uint8_t)(hfmMesh.clusterIndices[i]);
}
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) clusterIndices.constData());
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) packedDeformerIndices.data());
} else {
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) hfmMesh.clusterIndices.constData());
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) hfmMesh.clusterIndices.data());
}
}
if (clusterWeightsSize > 0) {
vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) hfmMesh.clusterWeights.constData());
vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) hfmMesh.clusterWeights.data());
}
@ -206,7 +206,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics
auto vertexBufferStream = std::make_shared<gpu::BufferStream>();
gpu::BufferPointer attribBuffer;
int totalAttribBufferSize = totalVertsSize;
size_t totalAttribBufferSize = totalVertsSize;
gpu::uint8 posChannel = 0;
gpu::uint8 tangentChannel = posChannel;
gpu::uint8 attribChannel = posChannel;
@ -377,6 +377,17 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const
const auto& meshIndicesToModelNames = input.get2();
const auto& normalsPerMesh = input.get3();
const auto& tangentsPerMesh = input.get4();
const auto& shapes = input.get5();
const auto& skinDeformers = input.get6();
// Currently, there is only (at most) one skinDeformer per mesh
// An undefined shape.skinDeformer has the value hfm::UNDEFINED_KEY
std::vector<uint32_t> skinDeformerPerMesh;
skinDeformerPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY);
for (const auto& shape : shapes) {
uint32_t skinDeformerIndex = shape.skinDeformer;
skinDeformerPerMesh[shape.mesh] = skinDeformerIndex;
}
auto& graphicsMeshes = output;
@ -384,9 +395,16 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const
for (int i = 0; i < n; i++) {
graphicsMeshes.emplace_back();
auto& graphicsMesh = graphicsMeshes[i];
uint16_t numDeformerControllers = 0;
uint32_t skinDeformerIndex = skinDeformerPerMesh[i];
if (skinDeformerIndex != hfm::UNDEFINED_KEY) {
const hfm::SkinDeformer& skinDeformer = skinDeformers[skinDeformerIndex];
numDeformerControllers = (uint16_t)skinDeformer.clusters.size();
}
// Try to create the graphics::Mesh
buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i));
buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i), numDeformerControllers);
// Choose a name for the mesh
if (graphicsMesh) {

View file

@ -2,8 +2,8 @@
// BuildGraphicsMeshTask.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2018/12/06.
// Copyright 2018 High Fidelity, Inc.
// Created by Sabrina Shanman on 2019/09/16.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
@ -20,7 +20,7 @@
class BuildGraphicsMeshTask {
public:
using Input = baker::VaryingSet5<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh>;
using Input = baker::VaryingSet7<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector<hfm::Shape>, std::vector<hfm::SkinDeformer>>;
using Output = std::vector<graphics::MeshPointer>;
using JobModel = baker::Job::ModelIO<BuildGraphicsMeshTask, Input, Output>;

View file

@ -30,7 +30,7 @@ void CalculateMeshTangentsTask::run(const baker::BakeContextPointer& context, co
// Otherwise confirm if we have the normals and texcoords needed
if (!tangentsIn.empty()) {
tangentsOut = tangentsIn.toStdVector();
} else if (!normals.empty() && mesh.vertices.size() == mesh.texCoords.size()) {
} else if (!normals.empty() && mesh.vertices.size() <= mesh.texCoords.size()) {
tangentsOut.resize(normals.size());
baker::calculateTangents(mesh,
[&mesh, &normals, &tangentsOut](int firstIndex, int secondIndex, glm::vec3* outVertices, glm::vec2* outTexCoords, glm::vec3& outNormal) {

View file

@ -0,0 +1,41 @@
//
// CalculateTransformedExtentsTask.cpp
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2019/10/04.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "CalculateTransformedExtentsTask.h"
#include "hfm/HFMModelMath.h"
void CalculateTransformedExtentsTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) {
const auto& modelExtentsIn = input.get0();
const auto& triangleListMeshes = input.get1();
const auto& shapesIn = input.get2();
const auto& joints = input.get3();
auto& modelExtentsOut = output.edit0();
auto& shapesOut = output.edit1();
shapesOut.reserve(shapesIn.size());
for (size_t i = 0; i < shapesIn.size(); ++i) {
shapesOut.push_back(shapesIn[i]);
auto& shapeOut = shapesOut.back();
auto& shapeExtents = shapeOut.transformedExtents;
if (shapeExtents.isValid()) {
continue;
}
hfm::calculateExtentsForShape(shapeOut, triangleListMeshes, joints);
}
modelExtentsOut = modelExtentsIn;
if (!modelExtentsOut.isValid()) {
hfm::calculateExtentsForModel(modelExtentsOut, shapesOut);
}
}

View file

@ -0,0 +1,29 @@
//
// CalculateTransformedExtentsTask.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2019/10/04.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_CalculateExtentsTask_h
#define hifi_CalculateExtentsTask_h
#include "Engine.h"
#include "hfm/HFM.h"
// Calculates any undefined extents in the shapes and the model. Precalculated extents will be left alone.
// Bind extents will currently not be calculated
class CalculateTransformedExtentsTask {
public:
using Input = baker::VaryingSet4<Extents, std::vector<hfm::TriangleListMesh>, std::vector<hfm::Shape>, std::vector<hfm::Joint>>;
using Output = baker::VaryingSet2<Extents, std::vector<hfm::Shape>>;
using JobModel = baker::Job::ModelIO<CalculateTransformedExtentsTask, Input, Output>;
void run(const baker::BakeContextPointer& context, const Input& input, Output& output);
};
#endif // hifi_CalculateExtentsTask_h

View file

@ -0,0 +1,91 @@
//
// CollectShapeVerticesTask.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2019/09/27.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "CollectShapeVerticesTask.h"
#include <glm/gtx/transform.hpp>
#include <hfm/HFMModelMath.h>
// Used to track and avoid duplicate shape vertices, as multiple shapes can have the same mesh and skinDeformer
class VertexSource {
public:
uint32_t mesh;
uint32_t skinDeformer;
bool operator==(const VertexSource& other) const {
return mesh == other.mesh &&
skinDeformer == other.skinDeformer;
}
};
void CollectShapeVerticesTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) {
const auto& meshes = input.get0();
const auto& shapes = input.get1();
const auto& joints = input.get2();
const auto& skinDeformers = input.get3();
auto& shapeVerticesPerJoint = output;
shapeVerticesPerJoint.resize(joints.size());
std::vector<std::vector<VertexSource>> vertexSourcesPerJoint;
vertexSourcesPerJoint.resize(joints.size());
for (size_t i = 0; i < shapes.size(); ++i) {
const auto& shape = shapes[i];
const uint32_t skinDeformerKey = shape.skinDeformer;
if (skinDeformerKey == hfm::UNDEFINED_KEY) {
continue;
}
VertexSource vertexSource;
vertexSource.mesh = shape.mesh;
vertexSource.skinDeformer = skinDeformerKey;
const auto& skinDeformer = skinDeformers[skinDeformerKey];
for (size_t j = 0; j < skinDeformer.clusters.size(); ++j) {
const auto& cluster = skinDeformer.clusters[j];
const uint32_t jointIndex = cluster.jointIndex;
auto& vertexSources = vertexSourcesPerJoint[jointIndex];
if (std::find(vertexSources.cbegin(), vertexSources.cend(), vertexSource) == vertexSources.cend()) {
vertexSources.push_back(vertexSource);
auto& shapeVertices = shapeVerticesPerJoint[jointIndex];
const auto& mesh = meshes[shape.mesh];
const auto& vertices = mesh.vertices;
const glm::mat4 meshToJoint = cluster.inverseBindMatrix;
const uint16_t weightsPerVertex = mesh.clusterWeightsPerVertex;
if (weightsPerVertex == 0) {
for (int vertexIndex = 0; vertexIndex < (int)vertices.size(); ++vertexIndex) {
const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertices[vertexIndex]);
shapeVertices.push_back(extractTranslation(vertexTransform));
}
} else {
for (int vertexIndex = 0; vertexIndex < (int)vertices.size(); ++vertexIndex) {
for (uint16_t weightIndex = 0; weightIndex < weightsPerVertex; ++weightIndex) {
const size_t index = vertexIndex*weightsPerVertex + weightIndex;
const uint16_t clusterIndex = mesh.clusterIndices[index];
const uint16_t clusterWeight = mesh.clusterWeights[index];
// Remember vertices associated with this joint with at least 1/4 weight
const uint16_t EXPANSION_WEIGHT_THRESHOLD = std::numeric_limits<uint16_t>::max() / 4;
if (clusterIndex != j || clusterWeight < EXPANSION_WEIGHT_THRESHOLD) {
continue;
}
const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertices[vertexIndex]);
shapeVertices.push_back(extractTranslation(vertexTransform));
}
}
}
}
}
}
}

View file

@ -0,0 +1,30 @@
//
// CollectShapeVerticesTask.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2019/09/27.
// Copyright 2019 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_CollectShapeVerticesTask_h
#define hifi_CollectShapeVerticesTask_h
#include <hfm/HFM.h>
#include "Engine.h"
#include "BakerTypes.h"
class CollectShapeVerticesTask {
public:
using Input = baker::VaryingSet4<std::vector<hfm::Mesh>, std::vector<hfm::Shape>, std::vector<hfm::Joint>, std::vector<hfm::SkinDeformer>>;
using Output = std::vector<ShapeVertices>;
using JobModel = baker::Job::ModelIO<CollectShapeVerticesTask, Input, Output>;
void run(const baker::BakeContextPointer& context, const Input& input, Output& output);
};
#endif // hifi_CollectShapeVerticesTask_h

View file

@ -203,23 +203,23 @@ QUrl resolveTextureBaseUrl(const QUrl& url, const QUrl& textureBaseUrl) {
return textureBaseUrl.isValid() ? textureBaseUrl : url;
}
GeometryResource::GeometryResource(const GeometryResource& other) :
ModelResource::ModelResource(const ModelResource& other) :
Resource(other),
Geometry(other),
NetworkModel(other),
_modelLoader(other._modelLoader),
_mappingPair(other._mappingPair),
_textureBaseURL(other._textureBaseURL),
_combineParts(other._combineParts),
_isCacheable(other._isCacheable)
{
if (other._geometryResource) {
if (other._modelResource) {
_startedLoading = false;
}
}
void GeometryResource::downloadFinished(const QByteArray& data) {
void ModelResource::downloadFinished(const QByteArray& data) {
if (_effectiveBaseURL.fileName().toLower().endsWith(".fst")) {
PROFILE_ASYNC_BEGIN(resource_parse_geometry, "GeometryResource::downloadFinished", _url.toString(), { { "url", _url.toString() } });
PROFILE_ASYNC_BEGIN(resource_parse_geometry, "ModelResource::downloadFinished", _url.toString(), { { "url", _url.toString() } });
// store parsed contents of FST file
_mapping = FSTReader::readMapping(data);
@ -267,19 +267,19 @@ void GeometryResource::downloadFinished(const QByteArray& data) {
auto modelCache = DependencyManager::get<ModelCache>();
GeometryExtra extra { GeometryMappingPair(base, _mapping), _textureBaseURL, false };
// Get the raw GeometryResource
_geometryResource = modelCache->getResource(url, QUrl(), &extra, std::hash<GeometryExtra>()(extra)).staticCast<GeometryResource>();
// Get the raw ModelResource
_modelResource = modelCache->getResource(url, QUrl(), &extra, std::hash<GeometryExtra>()(extra)).staticCast<ModelResource>();
// Avoid caching nested resources - their references will be held by the parent
_geometryResource->_isCacheable = false;
_modelResource->_isCacheable = false;
if (_geometryResource->isLoaded()) {
onGeometryMappingLoaded(!_geometryResource->getURL().isEmpty());
if (_modelResource->isLoaded()) {
onGeometryMappingLoaded(!_modelResource->getURL().isEmpty());
} else {
if (_connection) {
disconnect(_connection);
}
_connection = connect(_geometryResource.data(), &Resource::finished, this, &GeometryResource::onGeometryMappingLoaded);
_connection = connect(_modelResource.data(), &Resource::finished, this, &ModelResource::onGeometryMappingLoaded);
}
}
} else {
@ -291,32 +291,31 @@ void GeometryResource::downloadFinished(const QByteArray& data) {
}
}
void GeometryResource::onGeometryMappingLoaded(bool success) {
if (success && _geometryResource) {
_hfmModel = _geometryResource->_hfmModel;
_materialMapping = _geometryResource->_materialMapping;
_meshParts = _geometryResource->_meshParts;
_meshes = _geometryResource->_meshes;
_materials = _geometryResource->_materials;
void ModelResource::onGeometryMappingLoaded(bool success) {
if (success && _modelResource) {
_hfmModel = _modelResource->_hfmModel;
_materialMapping = _modelResource->_materialMapping;
_meshes = _modelResource->_meshes;
_materials = _modelResource->_materials;
// Avoid holding onto extra references
_geometryResource.reset();
_modelResource.reset();
// Make sure connection will not trigger again
disconnect(_connection); // FIXME Should not have to do this
}
PROFILE_ASYNC_END(resource_parse_geometry, "GeometryResource::downloadFinished", _url.toString());
PROFILE_ASYNC_END(resource_parse_geometry, "ModelResource::downloadFinished", _url.toString());
finishedLoading(success);
}
void GeometryResource::setExtra(void* extra) {
void ModelResource::setExtra(void* extra) {
const GeometryExtra* geometryExtra = static_cast<const GeometryExtra*>(extra);
_mappingPair = geometryExtra ? geometryExtra->mapping : GeometryMappingPair(QUrl(), QVariantHash());
_textureBaseURL = geometryExtra ? resolveTextureBaseUrl(_url, geometryExtra->textureBaseUrl) : QUrl();
_combineParts = geometryExtra ? geometryExtra->combineParts : true;
}
void GeometryResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const MaterialMapping& materialMapping) {
void ModelResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const MaterialMapping& materialMapping) {
// Assume ownership of the processed HFMModel
_hfmModel = hfmModel;
_materialMapping = materialMapping;
@ -329,31 +328,23 @@ void GeometryResource::setGeometryDefinition(HFMModel::Pointer hfmModel, const M
}
std::shared_ptr<GeometryMeshes> meshes = std::make_shared<GeometryMeshes>();
std::shared_ptr<GeometryMeshParts> parts = std::make_shared<GeometryMeshParts>();
int meshID = 0;
for (const HFMMesh& mesh : _hfmModel->meshes) {
// Copy mesh pointers
meshes->emplace_back(mesh._mesh);
int partID = 0;
for (const HFMMeshPart& part : mesh.parts) {
// Construct local parts
parts->push_back(std::make_shared<MeshPart>(meshID, partID, (int)materialIDAtlas[part.materialID]));
partID++;
}
meshID++;
}
_meshes = meshes;
_meshParts = parts;
finishedLoading(true);
}
void GeometryResource::deleter() {
void ModelResource::deleter() {
resetTextures();
Resource::deleter();
}
void GeometryResource::setTextures() {
void ModelResource::setTextures() {
if (_hfmModel) {
for (const HFMMaterial& material : _hfmModel->materials) {
_materials.push_back(std::make_shared<NetworkMaterial>(material, _textureBaseURL));
@ -361,7 +352,7 @@ void GeometryResource::setTextures() {
}
}
void GeometryResource::resetTextures() {
void ModelResource::resetTextures() {
_materials.clear();
}
@ -377,17 +368,17 @@ ModelCache::ModelCache() {
}
QSharedPointer<Resource> ModelCache::createResource(const QUrl& url) {
return QSharedPointer<Resource>(new GeometryResource(url, _modelLoader), &GeometryResource::deleter);
return QSharedPointer<Resource>(new ModelResource(url, _modelLoader), &ModelResource::deleter);
}
QSharedPointer<Resource> ModelCache::createResourceCopy(const QSharedPointer<Resource>& resource) {
return QSharedPointer<Resource>(new GeometryResource(*resource.staticCast<GeometryResource>()), &GeometryResource::deleter);
return QSharedPointer<Resource>(new ModelResource(*resource.staticCast<ModelResource>()), &ModelResource::deleter);
}
GeometryResource::Pointer ModelCache::getGeometryResource(const QUrl& url, const GeometryMappingPair& mapping, const QUrl& textureBaseUrl) {
ModelResource::Pointer ModelCache::getModelResource(const QUrl& url, const GeometryMappingPair& mapping, const QUrl& textureBaseUrl) {
bool combineParts = true;
GeometryExtra geometryExtra = { mapping, textureBaseUrl, combineParts };
GeometryResource::Pointer resource = getResource(url, QUrl(), &geometryExtra, std::hash<GeometryExtra>()(geometryExtra)).staticCast<GeometryResource>();
ModelResource::Pointer resource = getResource(url, QUrl(), &geometryExtra, std::hash<GeometryExtra>()(geometryExtra)).staticCast<ModelResource>();
if (resource) {
if (resource->isLoaded() && resource->shouldSetTextures()) {
resource->setTextures();
@ -396,12 +387,12 @@ GeometryResource::Pointer ModelCache::getGeometryResource(const QUrl& url, const
return resource;
}
GeometryResource::Pointer ModelCache::getCollisionGeometryResource(const QUrl& url,
ModelResource::Pointer ModelCache::getCollisionModelResource(const QUrl& url,
const GeometryMappingPair& mapping,
const QUrl& textureBaseUrl) {
bool combineParts = false;
GeometryExtra geometryExtra = { mapping, textureBaseUrl, combineParts };
GeometryResource::Pointer resource = getResource(url, QUrl(), &geometryExtra, std::hash<GeometryExtra>()(geometryExtra)).staticCast<GeometryResource>();
ModelResource::Pointer resource = getResource(url, QUrl(), &geometryExtra, std::hash<GeometryExtra>()(geometryExtra)).staticCast<ModelResource>();
if (resource) {
if (resource->isLoaded() && resource->shouldSetTextures()) {
resource->setTextures();
@ -410,7 +401,7 @@ GeometryResource::Pointer ModelCache::getCollisionGeometryResource(const QUrl& u
return resource;
}
const QVariantMap Geometry::getTextures() const {
const QVariantMap NetworkModel::getTextures() const {
QVariantMap textures;
for (const auto& material : _materials) {
for (const auto& texture : material->_textures) {
@ -424,22 +415,21 @@ const QVariantMap Geometry::getTextures() const {
}
// FIXME: The materials should only be copied when modified, but the Model currently caches the original
Geometry::Geometry(const Geometry& geometry) {
_hfmModel = geometry._hfmModel;
_materialMapping = geometry._materialMapping;
_meshes = geometry._meshes;
_meshParts = geometry._meshParts;
NetworkModel::NetworkModel(const NetworkModel& networkModel) {
_hfmModel = networkModel._hfmModel;
_materialMapping = networkModel._materialMapping;
_meshes = networkModel._meshes;
_materials.reserve(geometry._materials.size());
for (const auto& material : geometry._materials) {
_materials.reserve(networkModel._materials.size());
for (const auto& material : networkModel._materials) {
_materials.push_back(std::make_shared<NetworkMaterial>(*material));
}
_animGraphOverrideUrl = geometry._animGraphOverrideUrl;
_mapping = geometry._mapping;
_animGraphOverrideUrl = networkModel._animGraphOverrideUrl;
_mapping = networkModel._mapping;
}
void Geometry::setTextures(const QVariantMap& textureMap) {
void NetworkModel::setTextures(const QVariantMap& textureMap) {
if (_meshes->size() > 0) {
for (auto& material : _materials) {
// Check if any material textures actually changed
@ -447,7 +437,7 @@ void Geometry::setTextures(const QVariantMap& textureMap) {
[&textureMap](const NetworkMaterial::Textures::value_type& it) { return it.second.texture && textureMap.contains(it.second.name); })) {
// FIXME: The Model currently caches the materials (waste of space!)
// so they must be copied in the Geometry copy-ctor
// so they must be copied in the NetworkModel copy-ctor
// if (material->isOriginal()) {
// // Copy the material to avoid mutating the cached version
// material = std::make_shared<NetworkMaterial>(*material);
@ -461,11 +451,11 @@ void Geometry::setTextures(const QVariantMap& textureMap) {
// If we only use cached textures, they should all be loaded
areTexturesLoaded();
} else {
qCWarning(modelnetworking) << "Ignoring setTextures(); geometry not ready";
qCWarning(modelnetworking) << "Ignoring setTextures(); NetworkModel not ready";
}
}
bool Geometry::areTexturesLoaded() const {
bool NetworkModel::areTexturesLoaded() const {
if (!_areTexturesLoaded) {
for (auto& material : _materials) {
if (material->isMissingTexture()) {
@ -500,30 +490,28 @@ bool Geometry::areTexturesLoaded() const {
return true;
}
const std::shared_ptr<NetworkMaterial> Geometry::getShapeMaterial(int partID) const {
if ((partID >= 0) && (partID < (int)_meshParts->size())) {
int materialID = _meshParts->at(partID)->materialID;
if ((materialID >= 0) && (materialID < (int)_materials.size())) {
return _materials[materialID];
}
const std::shared_ptr<NetworkMaterial> NetworkModel::getShapeMaterial(int shapeID) const {
uint32_t materialID = getHFMModel().shapes[shapeID].material;
if (materialID < (uint32_t)_materials.size()) {
return _materials[materialID];
}
return nullptr;
}
void GeometryResourceWatcher::startWatching() {
connect(_resource.data(), &Resource::finished, this, &GeometryResourceWatcher::resourceFinished);
connect(_resource.data(), &Resource::onRefresh, this, &GeometryResourceWatcher::resourceRefreshed);
void ModelResourceWatcher::startWatching() {
connect(_resource.data(), &Resource::finished, this, &ModelResourceWatcher::resourceFinished);
connect(_resource.data(), &Resource::onRefresh, this, &ModelResourceWatcher::resourceRefreshed);
if (_resource->isLoaded()) {
resourceFinished(!_resource->getURL().isEmpty());
}
}
void GeometryResourceWatcher::stopWatching() {
disconnect(_resource.data(), &Resource::finished, this, &GeometryResourceWatcher::resourceFinished);
disconnect(_resource.data(), &Resource::onRefresh, this, &GeometryResourceWatcher::resourceRefreshed);
void ModelResourceWatcher::stopWatching() {
disconnect(_resource.data(), &Resource::finished, this, &ModelResourceWatcher::resourceFinished);
disconnect(_resource.data(), &Resource::onRefresh, this, &ModelResourceWatcher::resourceRefreshed);
}
void GeometryResourceWatcher::setResource(GeometryResource::Pointer resource) {
void ModelResourceWatcher::setResource(ModelResource::Pointer resource) {
if (_resource) {
stopWatching();
}
@ -537,14 +525,14 @@ void GeometryResourceWatcher::setResource(GeometryResource::Pointer resource) {
}
}
void GeometryResourceWatcher::resourceFinished(bool success) {
void ModelResourceWatcher::resourceFinished(bool success) {
if (success) {
_geometryRef = std::make_shared<Geometry>(*_resource);
_networkModelRef = std::make_shared<NetworkModel>(*_resource);
}
emit finished(success);
}
void GeometryResourceWatcher::resourceRefreshed() {
void ModelResourceWatcher::resourceRefreshed() {
// FIXME: Model is not set up to handle a refresh
// _instance.reset();
}

View file

@ -22,23 +22,20 @@
#include <material-networking/TextureCache.h>
#include "ModelLoader.h"
class MeshPart;
using GeometryMappingPair = std::pair<QUrl, QVariantHash>;
Q_DECLARE_METATYPE(GeometryMappingPair)
class Geometry {
class NetworkModel {
public:
using Pointer = std::shared_ptr<Geometry>;
using WeakPointer = std::weak_ptr<Geometry>;
using Pointer = std::shared_ptr<NetworkModel>;
using WeakPointer = std::weak_ptr<NetworkModel>;
Geometry() = default;
Geometry(const Geometry& geometry);
virtual ~Geometry() = default;
NetworkModel() = default;
NetworkModel(const NetworkModel& geometry);
virtual ~NetworkModel() = default;
// Immutable over lifetime
using GeometryMeshes = std::vector<std::shared_ptr<const graphics::Mesh>>;
using GeometryMeshParts = std::vector<std::shared_ptr<const MeshPart>>;
// Mutable, but must retain structure of vector
using NetworkMaterials = std::vector<std::shared_ptr<NetworkMaterial>>;
@ -63,7 +60,6 @@ protected:
HFMModel::ConstPointer _hfmModel;
MaterialMapping _materialMapping;
std::shared_ptr<const GeometryMeshes> _meshes;
std::shared_ptr<const GeometryMeshParts> _meshParts;
// Copied to each geometry, mutable throughout lifetime via setTextures
NetworkMaterials _materials;
@ -76,22 +72,22 @@ private:
};
/// A geometry loaded from the network.
class GeometryResource : public Resource, public Geometry {
class ModelResource : public Resource, public NetworkModel {
Q_OBJECT
public:
using Pointer = QSharedPointer<GeometryResource>;
using Pointer = QSharedPointer<ModelResource>;
GeometryResource(const QUrl& url, const ModelLoader& modelLoader) : Resource(url), _modelLoader(modelLoader) {}
GeometryResource(const GeometryResource& other);
ModelResource(const QUrl& url, const ModelLoader& modelLoader) : Resource(url), _modelLoader(modelLoader) {}
ModelResource(const ModelResource& other);
QString getType() const override { return "Geometry"; }
QString getType() const override { return "Model"; }
virtual void deleter() override;
virtual void downloadFinished(const QByteArray& data) override;
void setExtra(void* extra) override;
virtual bool areTexturesLoaded() const override { return isLoaded() && Geometry::areTexturesLoaded(); }
virtual bool areTexturesLoaded() const override { return isLoaded() && NetworkModel::areTexturesLoaded(); }
private slots:
void onGeometryMappingLoaded(bool success);
@ -115,21 +111,21 @@ private:
QUrl _textureBaseURL;
bool _combineParts;
GeometryResource::Pointer _geometryResource;
ModelResource::Pointer _modelResource;
QMetaObject::Connection _connection;
bool _isCacheable{ true };
};
class GeometryResourceWatcher : public QObject {
class ModelResourceWatcher : public QObject {
Q_OBJECT
public:
using Pointer = std::shared_ptr<GeometryResourceWatcher>;
using Pointer = std::shared_ptr<ModelResourceWatcher>;
GeometryResourceWatcher() = delete;
GeometryResourceWatcher(Geometry::Pointer& geometryPtr) : _geometryRef(geometryPtr) {}
ModelResourceWatcher() = delete;
ModelResourceWatcher(NetworkModel::Pointer& geometryPtr) : _networkModelRef(geometryPtr) {}
void setResource(GeometryResource::Pointer resource);
void setResource(ModelResource::Pointer resource);
QUrl getURL() const { return (bool)_resource ? _resource->getURL() : QUrl(); }
int getResourceDownloadAttempts() { return _resource ? _resource->getDownloadAttempts() : 0; }
@ -147,8 +143,8 @@ private slots:
void resourceRefreshed();
private:
GeometryResource::Pointer _resource;
Geometry::Pointer& _geometryRef;
ModelResource::Pointer _resource;
NetworkModel::Pointer& _networkModelRef;
};
/// Stores cached model geometries.
@ -158,18 +154,18 @@ class ModelCache : public ResourceCache, public Dependency {
public:
GeometryResource::Pointer getGeometryResource(const QUrl& url,
ModelResource::Pointer getModelResource(const QUrl& url,
const GeometryMappingPair& mapping =
GeometryMappingPair(QUrl(), QVariantHash()),
const QUrl& textureBaseUrl = QUrl());
GeometryResource::Pointer getCollisionGeometryResource(const QUrl& url,
ModelResource::Pointer getCollisionModelResource(const QUrl& url,
const GeometryMappingPair& mapping =
GeometryMappingPair(QUrl(), QVariantHash()),
const QUrl& textureBaseUrl = QUrl());
protected:
friend class GeometryResource;
friend class ModelResource;
virtual QSharedPointer<Resource> createResource(const QUrl& url) override;
QSharedPointer<Resource> createResourceCopy(const QSharedPointer<Resource>& resource) override;
@ -180,12 +176,4 @@ private:
ModelLoader _modelLoader;
};
class MeshPart {
public:
MeshPart(int mesh, int part, int material) : meshID { mesh }, partID { part }, materialID { material } {}
int meshID { -1 };
int partID { -1 };
int materialID { -1 };
};
#endif // hifi_ModelCache_h

View file

@ -109,7 +109,7 @@ btConvexHullShape* createConvexHull(const ShapeInfo::PointList& points) {
glm::vec3 center = points[0];
glm::vec3 maxCorner = center;
glm::vec3 minCorner = center;
for (int i = 1; i < points.size(); i++) {
for (size_t i = 1; i < points.size(); i++) {
center += points[i];
maxCorner = glm::max(maxCorner, points[i]);
minCorner = glm::min(minCorner, points[i]);
@ -149,7 +149,7 @@ btConvexHullShape* createConvexHull(const ShapeInfo::PointList& points) {
// add the points, correcting for margin
glm::vec3 relativeScale = (diagonal - glm::vec3(2.0f * margin)) / diagonal;
glm::vec3 correctedPoint;
for (int i = 0; i < points.size(); ++i) {
for (size_t i = 0; i < points.size(); ++i) {
correctedPoint = (points[i] - center) * relativeScale + center;
hull->addPoint(btVector3(correctedPoint[0], correctedPoint[1], correctedPoint[2]), false);
}
@ -217,7 +217,7 @@ btTriangleIndexVertexArray* createStaticMeshArray(const ShapeInfo& info) {
}
const ShapeInfo::TriangleIndices& triangleIndices = info.getTriangleIndices();
int32_t numIndices = triangleIndices.size();
int32_t numIndices = (int32_t)triangleIndices.size();
if (numIndices < 3) {
// not enough indices to make a single triangle
return nullptr;
@ -237,7 +237,7 @@ btTriangleIndexVertexArray* createStaticMeshArray(const ShapeInfo& info) {
mesh.m_indexType = PHY_INTEGER;
mesh.m_triangleIndexStride = VERTICES_PER_TRIANGLE * sizeof(int32_t);
}
mesh.m_numVertices = pointList.size();
mesh.m_numVertices = (int)pointList.size();
mesh.m_vertexBase = new unsigned char[VERTICES_PER_TRIANGLE * sizeof(btScalar) * (size_t)mesh.m_numVertices];
mesh.m_vertexStride = VERTICES_PER_TRIANGLE * sizeof(btScalar);
mesh.m_vertexType = PHY_FLOAT;
@ -362,7 +362,7 @@ const btCollisionShape* ShapeFactory::createShapeFromInfo(const ShapeInfo& info)
const ShapeInfo::PointCollection& pointCollection = info.getPointCollection();
uint32_t numSubShapes = info.getNumSubShapes();
if (numSubShapes == 1) {
if (!pointCollection.isEmpty()) {
if (!pointCollection.empty()) {
shape = createConvexHull(pointCollection[0]);
}
} else {
@ -380,7 +380,7 @@ const btCollisionShape* ShapeFactory::createShapeFromInfo(const ShapeInfo& info)
case SHAPE_TYPE_SIMPLE_COMPOUND: {
const ShapeInfo::PointCollection& pointCollection = info.getPointCollection();
const ShapeInfo::TriangleIndices& triangleIndices = info.getTriangleIndices();
uint32_t numIndices = triangleIndices.size();
uint32_t numIndices = (uint32_t)triangleIndices.size();
uint32_t numMeshes = info.getNumSubShapes();
const uint32_t MIN_NUM_SIMPLE_COMPOUND_INDICES = 2; // END_OF_MESH_PART + END_OF_MESH
if (numMeshes > 0 && numIndices > MIN_NUM_SIMPLE_COMPOUND_INDICES) {

View file

@ -50,7 +50,7 @@ public:
Textures getTextures() { return _textures; }
protected:
friend class Geometry;
friend class NetworkModel;
Textures _textures;

View file

@ -32,16 +32,11 @@ bool CauterizedModel::updateGeometry() {
bool needsFullUpdate = Model::updateGeometry();
if (_isCauterized && needsFullUpdate) {
assert(_cauterizeMeshStates.empty());
const HFMModel& hfmModel = getHFMModel();
foreach (const HFMMesh& mesh, hfmModel.meshes) {
Model::MeshState state;
if (_useDualQuaternionSkinning) {
state.clusterDualQuaternions.resize(mesh.clusters.size());
_cauterizeMeshStates.append(state);
} else {
state.clusterMatrices.resize(mesh.clusters.size());
_cauterizeMeshStates.append(state);
}
// initialize the cauterizedDeforemrStates as a copy of the standard deformerStates
_cauterizeMeshStates.resize(_meshStates.size());
for (int i = 0; i < (int) _meshStates.size(); ++i) {
_cauterizeMeshStates[i] = _meshStates[i];
}
}
return needsFullUpdate;
@ -50,20 +45,12 @@ bool CauterizedModel::updateGeometry() {
void CauterizedModel::createRenderItemSet() {
if (_isCauterized) {
assert(isLoaded());
const auto& meshes = _renderGeometry->getMeshes();
// all of our mesh vectors must match in size
if (meshes.size() != _meshStates.size()) {
qCDebug(renderutils) << "WARNING!!!! Mesh Sizes don't match! We will not segregate mesh groups yet.";
return;
}
// We should not have any existing renderItems if we enter this section of code
Q_ASSERT(_modelMeshRenderItems.isEmpty());
_modelMeshRenderItems.clear();
_modelMeshMaterialNames.clear();
_modelMeshRenderItemShapes.clear();
Transform transform;
transform.setTranslation(_translation);
@ -73,25 +60,17 @@ void CauterizedModel::createRenderItemSet() {
offset.setScale(_scale);
offset.postTranslate(_offset);
// Run through all of the meshes, and place them into their segregated, but unsorted buckets
int shapeID = 0;
uint32_t numMeshes = (uint32_t)meshes.size();
for (uint32_t i = 0; i < numMeshes; i++) {
const auto& mesh = meshes.at(i);
if (!mesh) {
continue;
}
Transform::mult(transform, transform, offset);
// Create the render payloads
int numParts = (int)mesh->getNumParts();
for (int partIndex = 0; partIndex < numParts; partIndex++) {
auto ptr = std::make_shared<CauterizedMeshPartPayload>(shared_from_this(), i, partIndex, shapeID, transform, offset, _created);
_modelMeshRenderItems << std::static_pointer_cast<ModelMeshPartPayload>(ptr);
auto material = getGeometry()->getShapeMaterial(shapeID);
_modelMeshMaterialNames.push_back(material ? material->getName() : "");
_modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i });
shapeID++;
}
// Run through all of the meshes, and place them into their segregated, but unsorted buckets
const auto& shapes = _renderGeometry->getHFMModel().shapes;
for (int shapeID = 0; shapeID < (int) shapes.size(); shapeID++) {
const auto& shape = shapes[shapeID];
_modelMeshRenderItems << std::make_shared<CauterizedMeshPartPayload>(shared_from_this(), shape.mesh, shape.meshPart, shapeID, transform, offset, _created);
auto material = getNetworkModel()->getShapeMaterial(shapeID);
_modelMeshMaterialNames.push_back(material ? material->getName() : "");
}
} else {
Model::createRenderItemSet();
@ -104,28 +83,26 @@ void CauterizedModel::updateClusterMatrices() {
if (!_needsUpdateClusterMatrices || !isLoaded()) {
return;
}
updateShapeStatesFromRig();
_needsUpdateClusterMatrices = false;
const HFMModel& hfmModel = getHFMModel();
for (int i = 0; i < (int)_meshStates.size(); i++) {
Model::MeshState& state = _meshStates[i];
const HFMMesh& mesh = hfmModel.meshes.at(i);
int meshIndex = i;
for (int j = 0; j < mesh.clusters.size(); j++) {
const HFMCluster& cluster = mesh.clusters.at(j);
int clusterIndex = j;
for (int skinDeformerIndex = 0; skinDeformerIndex < (int)_meshStates.size(); skinDeformerIndex++) {
MeshState& state = _meshStates[skinDeformerIndex];
auto numClusters = state.getNumClusters();
for (uint32_t clusterIndex = 0; clusterIndex < numClusters; clusterIndex++) {
const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(skinDeformerIndex, clusterIndex);
if (_useDualQuaternionSkinning) {
auto jointPose = _rig.getJointPose(cluster.jointIndex);
auto jointPose = _rig.getJointPose(cbmov.jointIndex);
Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans());
Transform clusterTransform;
Transform::mult(clusterTransform, jointTransform, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindTransform);
state.clusterDualQuaternions[j] = Model::TransformDualQuaternion(clusterTransform);
state.clusterDualQuaternions[j].setCauterizationParameters(0.0f, jointPose.trans());
Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform);
state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform);
} else {
auto jointMatrix = _rig.getJointTransform(cluster.jointIndex);
glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]);
auto jointMatrix = _rig.getJointTransform(cbmov.jointIndex);
glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]);
}
}
}
@ -135,6 +112,7 @@ void CauterizedModel::updateClusterMatrices() {
AnimPose cauterizePose = _rig.getJointPose(_rig.indexOfJoint("Neck"));
cauterizePose.scale() = glm::vec3(0.0001f, 0.0001f, 0.0001f);
Transform cauterizedDQTransform(cauterizePose.rot(), cauterizePose.scale(), cauterizePose.trans());
static const glm::mat4 zeroScale(
glm::vec4(0.0001f, 0.0f, 0.0f, 0.0f),
@ -143,32 +121,29 @@ void CauterizedModel::updateClusterMatrices() {
glm::vec4(0.0f, 0.0f, 0.0f, 1.0f));
auto cauterizeMatrix = _rig.getJointTransform(_rig.indexOfJoint("Neck")) * zeroScale;
for (int i = 0; i < _cauterizeMeshStates.size(); i++) {
Model::MeshState& state = _cauterizeMeshStates[i];
const HFMMesh& mesh = hfmModel.meshes.at(i);
int meshIndex = i;
for (int skinDeformerIndex = 0; skinDeformerIndex < (int) _cauterizeMeshStates.size(); skinDeformerIndex++) {
Model::MeshState& nonCauterizedState = _meshStates[skinDeformerIndex];
Model::MeshState& state = _cauterizeMeshStates[skinDeformerIndex];
for (int j = 0; j < mesh.clusters.size(); j++) {
const HFMCluster& cluster = mesh.clusters.at(j);
int clusterIndex = j;
if (_useDualQuaternionSkinning) {
if (_cauterizeBoneSet.find(cluster.jointIndex) == _cauterizeBoneSet.end()) {
// not cauterized so just copy the value from the non-cauterized version.
state.clusterDualQuaternions[j] = _meshStates[i].clusterDualQuaternions[j];
} else {
Transform jointTransform(cauterizePose.rot(), cauterizePose.scale(), cauterizePose.trans());
// Just reset cauterized state with normal state memcpy style
if (_useDualQuaternionSkinning) {
state.clusterDualQuaternions = nonCauterizedState.clusterDualQuaternions;
} else {
state.clusterMatrices = nonCauterizedState.clusterMatrices;
}
// ANd only cauterize affected joints
auto numClusters = state.getNumClusters();
for (uint32_t clusterIndex = 0; clusterIndex < numClusters; clusterIndex++) {
const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(skinDeformerIndex, clusterIndex);
if (_cauterizeBoneSet.find(cbmov.jointIndex) != _cauterizeBoneSet.end()) {
if (_useDualQuaternionSkinning) {
Transform clusterTransform;
Transform::mult(clusterTransform, jointTransform, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindTransform);
state.clusterDualQuaternions[j] = Model::TransformDualQuaternion(clusterTransform);
state.clusterDualQuaternions[j].setCauterizationParameters(1.0f, cauterizePose.trans());
}
} else {
if (_cauterizeBoneSet.find(cluster.jointIndex) == _cauterizeBoneSet.end()) {
// not cauterized so just copy the value from the non-cauterized version.
state.clusterMatrices[j] = _meshStates[i].clusterMatrices[j];
Transform::mult(clusterTransform, cauterizedDQTransform, cbmov.inverseBindTransform);
state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform);
state.clusterDualQuaternions[clusterIndex].setCauterizationParameters(1.0f, cauterizePose.trans());
} else {
glm_mat4u_mul(cauterizeMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]);
glm_mat4u_mul(cauterizeMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]);
}
}
}
@ -177,7 +152,7 @@ void CauterizedModel::updateClusterMatrices() {
// post the blender if we're not currently waiting for one to finish
auto modelBlender = DependencyManager::get<ModelBlender>();
if (modelBlender->shouldComputeBlendshapes() && hfmModel.hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) {
if (modelBlender->shouldComputeBlendshapes() && getHFMModel().hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) {
_blendedBlendshapeCoefficients = _blendshapeCoefficients;
modelBlender->noteRequiresBlend(getThisPointer());
}
@ -217,65 +192,60 @@ void CauterizedModel::updateRenderItems() {
render::Transaction transaction;
for (int i = 0; i < (int)self->_modelMeshRenderItemIDs.size(); i++) {
auto itemID = self->_modelMeshRenderItemIDs[i];
auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex;
const auto& meshState = self->getMeshState(meshIndex);
const auto& cauterizedMeshState = self->getCauterizeMeshState(meshIndex);
const auto& shapeState = self->getShapeState(i);
bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex);
auto skinDeformerIndex = shapeState._skinDeformerIndex;
bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(shapeState._meshIndex);
bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning();
transaction.updateItem<ModelMeshPartPayload>(itemID, [modelTransform, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey,
primitiveMode, renderItemKeyGlobalFlags, enableCauterization](ModelMeshPartPayload& mmppData) {
CauterizedMeshPartPayload& data = static_cast<CauterizedMeshPartPayload&>(mmppData);
if (useDualQuaternionSkinning) {
data.updateClusterBuffer(meshState.clusterDualQuaternions,
cauterizedMeshState.clusterDualQuaternions);
data.computeAdjustedLocalBound(meshState.clusterDualQuaternions);
} else {
data.updateClusterBuffer(meshState.clusterMatrices,
cauterizedMeshState.clusterMatrices);
data.computeAdjustedLocalBound(meshState.clusterMatrices);
}
if (skinDeformerIndex != hfm::UNDEFINED_KEY) {
Transform renderTransform = modelTransform;
if (useDualQuaternionSkinning) {
if (meshState.clusterDualQuaternions.size() == 1 || meshState.clusterDualQuaternions.size() == 2) {
const auto& dq = meshState.clusterDualQuaternions[0];
Transform transform(dq.getRotation(),
dq.getScale(),
dq.getTranslation());
renderTransform = modelTransform.worldTransform(transform);
}
} else {
if (meshState.clusterMatrices.size() == 1 || meshState.clusterMatrices.size() == 2) {
renderTransform = modelTransform.worldTransform(Transform(meshState.clusterMatrices[0]));
}
}
data.updateTransformForSkinnedMesh(renderTransform, modelTransform);
const auto& meshState = self->getMeshState(skinDeformerIndex);
const auto& cauterizedMeshState = self->getCauterizeMeshState(skinDeformerIndex);
renderTransform = modelTransform;
if (useDualQuaternionSkinning) {
if (cauterizedMeshState.clusterDualQuaternions.size() == 1 || cauterizedMeshState.clusterDualQuaternions.size() == 2) {
const auto& dq = cauterizedMeshState.clusterDualQuaternions[0];
Transform transform(dq.getRotation(),
dq.getScale(),
dq.getTranslation());
renderTransform = modelTransform.worldTransform(Transform(transform));
transaction.updateItem<ModelMeshPartPayload>(itemID,
[modelTransform, shapeState, meshState, useDualQuaternionSkinning, cauterizedMeshState, invalidatePayloadShapeKey,
primitiveMode, renderItemKeyGlobalFlags, enableCauterization](ModelMeshPartPayload& mmppData) {
CauterizedMeshPartPayload& data = static_cast<CauterizedMeshPartPayload&>(mmppData);
if (useDualQuaternionSkinning) {
data.updateClusterBuffer(meshState.clusterDualQuaternions, cauterizedMeshState.clusterDualQuaternions);
} else {
data.updateClusterBuffer(meshState.clusterMatrices, cauterizedMeshState.clusterMatrices);
}
} else {
if (cauterizedMeshState.clusterMatrices.size() == 1 || cauterizedMeshState.clusterMatrices.size() == 2) {
renderTransform = modelTransform.worldTransform(Transform(cauterizedMeshState.clusterMatrices[0]));
}
}
data.updateTransformForCauterizedMesh(renderTransform);
data.setEnableCauterization(enableCauterization);
data.updateKey(renderItemKeyGlobalFlags);
data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning);
});
Transform renderTransform = modelTransform;
// if (meshState.clusterMatrices.size() <= 2) {
// renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform);
// }
data.updateTransform(renderTransform);
data.updateTransformForCauterizedMesh(renderTransform);
data.updateTransformAndBound(modelTransform.worldTransform(shapeState._rootFromJointTransform));
data.setEnableCauterization(enableCauterization);
data.updateKey(renderItemKeyGlobalFlags);
data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning);
});
} else {
transaction.updateItem<ModelMeshPartPayload>(itemID,
[modelTransform, shapeState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, enableCauterization]
(ModelMeshPartPayload& mmppData) {
CauterizedMeshPartPayload& data = static_cast<CauterizedMeshPartPayload&>(mmppData);
Transform renderTransform = modelTransform;
renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform);
data.updateTransform(renderTransform);
data.updateTransformForCauterizedMesh(renderTransform);
data.setEnableCauterization(enableCauterization);
data.updateKey(renderItemKeyGlobalFlags);
data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, false);
});
}
}
scene->enqueueTransaction(transaction);

View file

@ -40,7 +40,7 @@ public:
protected:
std::unordered_set<int> _cauterizeBoneSet;
QVector<Model::MeshState> _cauterizeMeshStates;
std::vector<Model::MeshState> _cauterizeMeshStates;
bool _isCauterized { false };
bool _enableCauterization { false };
};

View file

@ -116,7 +116,7 @@ static const uint SHAPE_TANGENT_OFFSET = offsetof(GeometryCache::ShapeVertex, ta
std::map<std::pair<bool, bool>, gpu::PipelinePointer> GeometryCache::_webPipelines;
std::map<std::pair<bool, bool>, gpu::PipelinePointer> GeometryCache::_gridPipelines;
void GeometryCache::computeSimpleHullPointListForShape(const int entityShape, const glm::vec3 &entityExtents, QVector<glm::vec3> &outPointList) {
void GeometryCache::computeSimpleHullPointListForShape(const int entityShape, const glm::vec3 &entityExtents, ShapeInfo::PointList &outPointList) {
auto geometryCache = DependencyManager::get<GeometryCache>();
const GeometryCache::Shape geometryShape = GeometryCache::getShapeForEntityShape( entityShape );

View file

@ -155,7 +155,7 @@ public:
static GeometryCache::Shape getShapeForEntityShape(int entityShapeEnum);
static QString stringFromShape(GeometryCache::Shape geoShape);
static void computeSimpleHullPointListForShape(int entityShape, const glm::vec3 &entityExtents, QVector<glm::vec3> &outPointList);
static void computeSimpleHullPointListForShape(int entityShape, const glm::vec3 &entityExtents, ShapeInfo::PointList &outPointList);
int allocateID() { return _nextID++; }
void releaseID(int id);

View file

@ -74,11 +74,15 @@ void MeshPartPayload::updateMeshPart(const std::shared_ptr<const graphics::Mesh>
}
}
void MeshPartPayload::updateTransform(const Transform& transform, const Transform& offsetTransform) {
_transform = transform;
Transform::mult(_drawTransform, _transform, offsetTransform);
void MeshPartPayload::updateTransform(const Transform& transform) {
_worldFromLocalTransform = transform;
_worldBound = _localBound;
_worldBound.transform(_drawTransform);
_worldBound.transform(_worldFromLocalTransform);
}
void MeshPartPayload::updateTransformAndBound(const Transform& transform) {
_worldBound = _localBound;
_worldBound.transform(transform);
}
void MeshPartPayload::addMaterial(graphics::MaterialLayer material) {
@ -154,7 +158,7 @@ void MeshPartPayload::bindMesh(gpu::Batch& batch) {
}
void MeshPartPayload::bindTransform(gpu::Batch& batch, RenderArgs::RenderMode renderMode) const {
batch.setModelTransform(_drawTransform);
batch.setModelTransform(_worldFromLocalTransform);
}
@ -179,7 +183,7 @@ void MeshPartPayload::render(RenderArgs* args) {
auto& schema = _drawMaterials.getSchemaBuffer().get<graphics::MultiMaterial::Schema>();
glm::vec4 outColor = glm::vec4(ColorUtils::tosRGBVec3(schema._albedo), schema._opacity);
outColor = procedural->getColor(outColor);
procedural->prepare(batch, _drawTransform.getTranslation(), _drawTransform.getScale(), _drawTransform.getRotation(), _created,
procedural->prepare(batch, _worldFromLocalTransform.getTranslation(), _worldFromLocalTransform.getScale(), _worldFromLocalTransform.getRotation(), _created,
ProceduralProgramKey(outColor.a < 1.0f));
batch._glColor4f(outColor.r, outColor.g, outColor.b, outColor.a);
} else {
@ -234,36 +238,21 @@ ModelMeshPartPayload::ModelMeshPartPayload(ModelPointer model, int meshIndex, in
assert(model && model->isLoaded());
bool useDualQuaternionSkinning = model->getUseDualQuaternionSkinning();
auto shape = model->getHFMModel().shapes[shapeIndex];
assert(shape.mesh == meshIndex);
assert(shape.meshPart == partIndex);
auto& modelMesh = model->getGeometry()->getMeshes().at(_meshIndex);
auto& modelMesh = model->getNetworkModel()->getMeshes().at(_meshIndex);
_meshNumVertices = (int)modelMesh->getNumVertices();
const Model::MeshState& state = model->getMeshState(_meshIndex);
updateMeshPart(modelMesh, partIndex);
if (useDualQuaternionSkinning) {
computeAdjustedLocalBound(state.clusterDualQuaternions);
} else {
computeAdjustedLocalBound(state.clusterMatrices);
}
Transform renderTransform = transform;
const Model::ShapeState& shapeState = model->getShapeState(shapeIndex);
renderTransform = transform.worldTransform(shapeState._rootFromJointTransform);
updateTransform(renderTransform);
updateTransform(transform, offsetTransform);
Transform renderTransform = transform;
if (useDualQuaternionSkinning) {
if (state.clusterDualQuaternions.size() == 1) {
const auto& dq = state.clusterDualQuaternions[0];
Transform transform(dq.getRotation(),
dq.getScale(),
dq.getTranslation());
renderTransform = transform.worldTransform(Transform(transform));
}
} else {
if (state.clusterMatrices.size() == 1) {
renderTransform = transform.worldTransform(Transform(state.clusterMatrices[0]));
}
}
updateTransformForSkinnedMesh(renderTransform, transform);
_deformerIndex = shape.skinDeformer;
initCache(model);
@ -287,7 +276,9 @@ void ModelMeshPartPayload::initCache(const ModelPointer& model) {
if (_drawMesh) {
auto vertexFormat = _drawMesh->getVertexFormat();
_hasColorAttrib = vertexFormat->hasAttribute(gpu::Stream::COLOR);
_isSkinned = vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT) && vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_INDEX);
if (_deformerIndex != hfm::UNDEFINED_KEY) {
_isSkinned = vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT) && vertexFormat->hasAttribute(gpu::Stream::SKIN_CLUSTER_INDEX);
}
const HFMModel& hfmModel = model->getHFMModel();
const HFMMesh& mesh = hfmModel.meshes.at(_meshIndex);
@ -296,7 +287,7 @@ void ModelMeshPartPayload::initCache(const ModelPointer& model) {
_hasTangents = !mesh.tangents.isEmpty();
}
auto networkMaterial = model->getGeometry()->getShapeMaterial(_shapeID);
auto networkMaterial = model->getNetworkModel()->getShapeMaterial(_shapeID);
if (networkMaterial) {
addMaterial(graphics::MaterialLayer(networkMaterial, 0));
}
@ -346,12 +337,6 @@ void ModelMeshPartPayload::updateClusterBuffer(const std::vector<Model::Transfor
}
}
void ModelMeshPartPayload::updateTransformForSkinnedMesh(const Transform& renderTransform, const Transform& boundTransform) {
_transform = renderTransform;
_worldBound = _adjustedLocalBound;
_worldBound.transform(boundTransform);
}
// Note that this method is called for models but not for shapes
void ModelMeshPartPayload::updateKey(const render::ItemKey& key) {
ItemKey::Builder builder(key);
@ -446,7 +431,7 @@ void ModelMeshPartPayload::bindTransform(gpu::Batch& batch, RenderArgs::RenderMo
if (_clusterBuffer) {
batch.setUniformBuffer(graphics::slot::buffer::Skinning, _clusterBuffer);
}
batch.setModelTransform(_transform);
batch.setModelTransform(_worldFromLocalTransform);
}
void ModelMeshPartPayload::render(RenderArgs* args) {
@ -478,7 +463,7 @@ void ModelMeshPartPayload::render(RenderArgs* args) {
auto& schema = _drawMaterials.getSchemaBuffer().get<graphics::MultiMaterial::Schema>();
glm::vec4 outColor = glm::vec4(ColorUtils::tosRGBVec3(schema._albedo), schema._opacity);
outColor = procedural->getColor(outColor);
procedural->prepare(batch, _drawTransform.getTranslation(), _drawTransform.getScale(), _drawTransform.getRotation(), _created,
procedural->prepare(batch, _worldFromLocalTransform.getTranslation(), _worldFromLocalTransform.getScale(), _worldFromLocalTransform.getRotation(), _created,
ProceduralProgramKey(outColor.a < 1.0f, _shapeKey.isDeformed(), _shapeKey.isDualQuatSkinned()));
batch._glColor4f(outColor.r, outColor.g, outColor.b, outColor.a);
} else {
@ -498,38 +483,6 @@ void ModelMeshPartPayload::render(RenderArgs* args) {
args->_details._trianglesRendered += _drawPart._numIndices / INDICES_PER_TRIANGLE;
}
void ModelMeshPartPayload::computeAdjustedLocalBound(const std::vector<glm::mat4>& clusterMatrices) {
_adjustedLocalBound = _localBound;
if (clusterMatrices.size() > 0) {
_adjustedLocalBound.transform(clusterMatrices.back());
for (int i = 0; i < (int)clusterMatrices.size() - 1; ++i) {
AABox clusterBound = _localBound;
clusterBound.transform(clusterMatrices[i]);
_adjustedLocalBound += clusterBound;
}
}
}
void ModelMeshPartPayload::computeAdjustedLocalBound(const std::vector<Model::TransformDualQuaternion>& clusterDualQuaternions) {
_adjustedLocalBound = _localBound;
if (clusterDualQuaternions.size() > 0) {
Transform rootTransform(clusterDualQuaternions.back().getRotation(),
clusterDualQuaternions.back().getScale(),
clusterDualQuaternions.back().getTranslation());
_adjustedLocalBound.transform(rootTransform);
for (int i = 0; i < (int)clusterDualQuaternions.size() - 1; ++i) {
AABox clusterBound = _localBound;
Transform transform(clusterDualQuaternions[i].getRotation(),
clusterDualQuaternions[i].getScale(),
clusterDualQuaternions[i].getTranslation());
clusterBound.transform(transform);
_adjustedLocalBound += clusterBound;
}
}
}
void ModelMeshPartPayload::setBlendshapeBuffer(const std::unordered_map<int, gpu::BufferPointer>& blendshapeBuffers, const QVector<int>& blendedMeshSizes) {
if (_meshIndex < blendedMeshSizes.length() && blendedMeshSizes.at(_meshIndex) == _meshNumVertices) {
auto blendshapeBuffer = blendshapeBuffers.find(_meshIndex);

View file

@ -38,7 +38,8 @@ public:
virtual void updateMeshPart(const std::shared_ptr<const graphics::Mesh>& drawMesh, int partIndex);
virtual void notifyLocationChanged() {}
void updateTransform(const Transform& transform, const Transform& offsetTransform);
void updateTransform(const Transform& transform);
void updateTransformAndBound(const Transform& transform );
// Render Item interface
virtual render::ItemKey getKey() const;
@ -52,13 +53,11 @@ public:
virtual void bindTransform(gpu::Batch& batch, RenderArgs::RenderMode renderMode) const;
// Payload resource cached values
Transform _drawTransform;
Transform _transform;
Transform _worldFromLocalTransform;
int _partIndex = 0;
bool _hasColorAttrib { false };
graphics::Box _localBound;
graphics::Box _adjustedLocalBound;
mutable graphics::Box _worldBound;
std::shared_ptr<const graphics::Mesh> _drawMesh;
@ -103,7 +102,6 @@ public:
// dual quaternion skinning
void updateClusterBuffer(const std::vector<Model::TransformDualQuaternion>& clusterDualQuaternions);
void updateTransformForSkinnedMesh(const Transform& renderTransform, const Transform& boundTransform);
// Render Item interface
render::ShapeKey getShapeKey() const override; // shape interface
@ -116,12 +114,6 @@ public:
void bindMesh(gpu::Batch& batch) override;
void bindTransform(gpu::Batch& batch, RenderArgs::RenderMode renderMode) const override;
// matrix palette skinning
void computeAdjustedLocalBound(const std::vector<glm::mat4>& clusterMatrices);
// dual quaternion skinning
void computeAdjustedLocalBound(const std::vector<Model::TransformDualQuaternion>& clusterDualQuaternions);
gpu::BufferPointer _clusterBuffer;
enum class ClusterBufferType { Matrices, DualQuaternions };
@ -129,6 +121,7 @@ public:
int _meshIndex;
int _shapeID;
uint32_t _deformerIndex;
bool _isSkinned{ false };
bool _isBlendShaped { false };

View file

@ -44,7 +44,7 @@
using namespace std;
int nakedModelPointerTypeId = qRegisterMetaType<ModelPointer>();
int weakGeometryResourceBridgePointerTypeId = qRegisterMetaType<Geometry::WeakPointer>();
int weakGeometryResourceBridgePointerTypeId = qRegisterMetaType<NetworkModel::WeakPointer>();
int vec3VectorTypeId = qRegisterMetaType<QVector<glm::vec3>>();
int normalTypeVecTypeId = qRegisterMetaType<QVector<NormalType>>("QVector<NormalType>");
float Model::FAKE_DIMENSION_PLACEHOLDER = -1.0f;
@ -74,7 +74,7 @@ Model::Model(QObject* parent, SpatiallyNestable* spatiallyNestableOverride, uint
setSnapModelToRegistrationPoint(true, glm::vec3(0.5f));
connect(&_renderWatcher, &GeometryResourceWatcher::finished, this, &Model::loadURLFinished);
connect(&_renderWatcher, &ModelResourceWatcher::finished, this, &Model::loadURLFinished);
}
Model::~Model() {
@ -154,7 +154,7 @@ void Model::setOffset(const glm::vec3& offset) {
}
void Model::calculateTextureInfo() {
if (!_hasCalculatedTextureInfo && isLoaded() && getGeometry()->areTexturesLoaded() && !_modelMeshRenderItemsMap.isEmpty()) {
if (!_hasCalculatedTextureInfo && isLoaded() && getNetworkModel()->areTexturesLoaded() && !_modelMeshRenderItemIDs.empty()) {
size_t textureSize = 0;
int textureCount = 0;
bool allTexturesLoaded = true;
@ -181,15 +181,15 @@ int Model::getRenderInfoTextureCount() {
}
bool Model::shouldInvalidatePayloadShapeKey(int meshIndex) {
if (!getGeometry()) {
if (!getNetworkModel()) {
return true;
}
const HFMModel& hfmModel = getHFMModel();
const auto& networkMeshes = getGeometry()->getMeshes();
const auto& networkMeshes = getNetworkModel()->getMeshes();
// if our index is ever out of range for either meshes or networkMeshes, then skip it, and set our _meshGroupsKnown
// to false to rebuild out mesh groups.
if (meshIndex < 0 || meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)hfmModel.meshes.size() || meshIndex >= (int)_meshStates.size()) {
if (meshIndex < 0 || meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)hfmModel.meshes.size()) {
_needsFixupInScene = true; // trigger remove/add cycle
invalidCalculatedMeshBoxes(); // if we have to reload, we need to assume our mesh boxes are all invalid
return true;
@ -231,46 +231,45 @@ void Model::updateRenderItems() {
render::Transaction transaction;
for (int i = 0; i < (int) self->_modelMeshRenderItemIDs.size(); i++) {
auto itemID = self->_modelMeshRenderItemIDs[i];
auto meshIndex = self->_modelMeshRenderItemShapes[i].meshIndex;
const auto& meshState = self->getMeshState(meshIndex);
const auto& shapeState = self->getShapeState(i);
bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(meshIndex);
bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning();
auto skinDeformerIndex = shapeState._skinDeformerIndex;
transaction.updateItem<ModelMeshPartPayload>(itemID, [modelTransform, meshState, useDualQuaternionSkinning,
invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, cauterized](ModelMeshPartPayload& data) {
if (useDualQuaternionSkinning) {
data.updateClusterBuffer(meshState.clusterDualQuaternions);
data.computeAdjustedLocalBound(meshState.clusterDualQuaternions);
} else {
data.updateClusterBuffer(meshState.clusterMatrices);
data.computeAdjustedLocalBound(meshState.clusterMatrices);
}
bool invalidatePayloadShapeKey = self->shouldInvalidatePayloadShapeKey(shapeState._meshIndex);
Transform renderTransform = modelTransform;
if (skinDeformerIndex != hfm::UNDEFINED_KEY) {
const auto& meshState = self->getMeshState(skinDeformerIndex);
bool useDualQuaternionSkinning = self->getUseDualQuaternionSkinning();
if (useDualQuaternionSkinning) {
if (meshState.clusterDualQuaternions.size() == 1 || meshState.clusterDualQuaternions.size() == 2) {
const auto& dq = meshState.clusterDualQuaternions[0];
Transform transform(dq.getRotation(),
dq.getScale(),
dq.getTranslation());
renderTransform = modelTransform.worldTransform(Transform(transform));
transaction.updateItem<ModelMeshPartPayload>(itemID, [modelTransform, shapeState, meshState, useDualQuaternionSkinning,
invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags, cauterized](ModelMeshPartPayload& data) {
if (useDualQuaternionSkinning) {
data.updateClusterBuffer(meshState.clusterDualQuaternions);
} else {
data.updateClusterBuffer(meshState.clusterMatrices);
}
} else {
if (meshState.clusterMatrices.size() == 1 || meshState.clusterMatrices.size() == 2) {
renderTransform = modelTransform.worldTransform(Transform(meshState.clusterMatrices[0]));
}
}
data.updateTransformForSkinnedMesh(renderTransform, modelTransform);
data.setCauterized(cauterized);
data.updateKey(renderItemKeyGlobalFlags);
data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning);
});
Transform renderTransform = modelTransform;
data.updateTransform(renderTransform);
data.updateTransformAndBound(modelTransform.worldTransform(shapeState._rootFromJointTransform));
data.setCauterized(cauterized);
data.updateKey(renderItemKeyGlobalFlags);
data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning);
});
} else {
transaction.updateItem<ModelMeshPartPayload>(itemID, [modelTransform, shapeState, invalidatePayloadShapeKey, primitiveMode, renderItemKeyGlobalFlags](ModelMeshPartPayload& data) {
Transform renderTransform = modelTransform;
renderTransform = modelTransform.worldTransform(shapeState._rootFromJointTransform);
data.updateTransform(renderTransform);
data.updateKey(renderItemKeyGlobalFlags);
data.setShapeKey(invalidatePayloadShapeKey, primitiveMode, false);
});
}
}
AbstractViewStateInterface::instance()->getMain3DScene()->enqueueTransaction(transaction);
@ -296,6 +295,15 @@ void Model::reset() {
}
}
void Model::updateShapeStatesFromRig() {
for (auto& shape : _shapeStates) {
uint32_t jointId = shape._jointIndex;
if (jointId < (uint32_t) _rig.getJointStateCount()) {
shape._rootFromJointTransform = _rig.getJointTransform(jointId);
}
}
}
bool Model::updateGeometry() {
bool needFullUpdate = false;
@ -311,14 +319,27 @@ bool Model::updateGeometry() {
assert(_meshStates.empty());
const HFMModel& hfmModel = getHFMModel();
int i = 0;
foreach (const HFMMesh& mesh, hfmModel.meshes) {
MeshState state;
state.clusterDualQuaternions.resize(mesh.clusters.size());
state.clusterMatrices.resize(mesh.clusters.size());
_meshStates.push_back(state);
i++;
const auto& shapes = hfmModel.shapes;
_shapeStates.resize(shapes.size());
for (uint32_t s = 0; s < (uint32_t) shapes.size(); ++s) {
auto& shapeState = _shapeStates[s];
shapeState._jointIndex = shapes[s].joint;
shapeState._meshIndex = shapes[s].mesh;
shapeState._meshPartIndex = shapes[s].meshPart;
shapeState._skinDeformerIndex = shapes[s].skinDeformer;
}
updateShapeStatesFromRig();
const auto& hfmSkinDeformers = hfmModel.skinDeformers;
for (uint32_t i = 0; i < (uint32_t) hfmSkinDeformers.size(); i++) {
const auto& dynT = hfmSkinDeformers[i];
MeshState state;
state.clusterDualQuaternions.resize(dynT.clusters.size());
state.clusterMatrices.resize(dynT.clusters.size());
_meshStates.push_back(state);
}
needFullUpdate = true;
emit rigReady();
}
@ -646,8 +667,8 @@ glm::mat4 Model::getWorldToHFMMatrix() const {
// TODO: deprecate and remove
MeshProxyList Model::getMeshes() const {
MeshProxyList result;
const Geometry::Pointer& renderGeometry = getGeometry();
const Geometry::GeometryMeshes& meshes = renderGeometry->getMeshes();
const NetworkModel::Pointer& renderGeometry = getNetworkModel();
const NetworkModel::GeometryMeshes& meshes = renderGeometry->getMeshes();
if (!isLoaded()) {
return result;
@ -716,9 +737,9 @@ bool Model::replaceScriptableModelMeshPart(scriptable::ScriptableModelBasePointe
render::Transaction transaction;
for (int i = 0; i < (int) _modelMeshRenderItemIDs.size(); i++) {
auto itemID = _modelMeshRenderItemIDs[i];
auto shape = _modelMeshRenderItemShapes[i];
auto& shape = _shapeStates[i];
// TODO: check to see if .partIndex matches too
if (shape.meshIndex == meshIndex) {
if (shape._meshIndex == (uint32_t) meshIndex) {
transaction.updateItem<ModelMeshPartPayload>(itemID, [=](ModelMeshPartPayload& data) {
data.updateMeshPart(mesh, partIndex);
});
@ -737,7 +758,7 @@ bool Model::replaceScriptableModelMeshPart(scriptable::ScriptableModelBasePointe
for (int partID = 0; partID < numParts; partID++) {
HFMMeshPart part;
part.triangleIndices = buffer_helpers::bufferToVector<int>(mesh._mesh->getIndexBuffer(), "part.triangleIndices");
mesh.parts << part;
mesh.parts.push_back(part);
}
{
foreach (const glm::vec3& vertex, mesh.vertices) {
@ -748,7 +769,7 @@ bool Model::replaceScriptableModelMeshPart(scriptable::ScriptableModelBasePointe
mesh.meshExtents.maximum = glm::max(mesh.meshExtents.maximum, transformedVertex);
}
}
hfmModel.meshes << mesh;
hfmModel.meshes.push_back(mesh);
}
calculateTriangleSets(hfmModel);
}
@ -765,9 +786,9 @@ scriptable::ScriptableModelBase Model::getScriptableModel() {
}
const HFMModel& hfmModel = getHFMModel();
int numberOfMeshes = hfmModel.meshes.size();
uint32_t numberOfMeshes = (uint32_t)hfmModel.meshes.size();
int shapeID = 0;
for (int i = 0; i < numberOfMeshes; i++) {
for (uint32_t i = 0; i < numberOfMeshes; i++) {
const HFMMesh& hfmMesh = hfmModel.meshes.at(i);
if (auto mesh = hfmMesh._mesh) {
result.append(mesh);
@ -775,7 +796,7 @@ scriptable::ScriptableModelBase Model::getScriptableModel() {
int numParts = (int)mesh->getNumParts();
for (int partIndex = 0; partIndex < numParts; partIndex++) {
auto& materialName = _modelMeshMaterialNames[shapeID];
result.appendMaterial(graphics::MaterialLayer(getGeometry()->getShapeMaterial(shapeID), 0), shapeID, materialName);
result.appendMaterial(graphics::MaterialLayer(getNetworkModel()->getShapeMaterial(shapeID), 0), shapeID, materialName);
{
std::unique_lock<std::mutex> lock(_materialMappingMutex);
@ -798,77 +819,69 @@ scriptable::ScriptableModelBase Model::getScriptableModel() {
void Model::calculateTriangleSets(const HFMModel& hfmModel) {
PROFILE_RANGE(render, __FUNCTION__);
int numberOfMeshes = hfmModel.meshes.size();
uint32_t meshInstanceCount = 0;
uint32_t lastMeshForInstanceCount = hfm::UNDEFINED_KEY;
for (const auto& shape : hfmModel.shapes) {
if (shape.mesh != lastMeshForInstanceCount) {
++meshInstanceCount;
}
lastMeshForInstanceCount = shape.mesh;
}
_triangleSetsValid = true;
_modelSpaceMeshTriangleSets.clear();
_modelSpaceMeshTriangleSets.resize(numberOfMeshes);
_modelSpaceMeshTriangleSets.reserve(meshInstanceCount);
for (int i = 0; i < numberOfMeshes; i++) {
const HFMMesh& mesh = hfmModel.meshes.at(i);
uint32_t lastMeshForTriangleBuilding = hfm::UNDEFINED_KEY;
glm::mat4 lastTransformForTriangleBuilding { 0 };
std::vector<glm::vec3> transformedPoints;
for (const auto& shape : hfmModel.shapes) {
const uint32_t meshIndex = shape.mesh;
const hfm::Mesh& mesh = hfmModel.meshes.at(meshIndex);
const auto& triangleListMesh = mesh.triangleListMesh;
const glm::vec2 part = triangleListMesh.parts[shape.meshPart];
glm::mat4 worldFromMeshTransform;
if (shape.joint != hfm::UNDEFINED_KEY) {
// globalTransform includes hfmModel.offset,
// which includes the scaling, rotation, and translation specified by the FST,
// and the scaling from the unit conversion in FBX.
// This can't change at runtime, so we can safely store these in our TriangleSet.
worldFromMeshTransform = hfmModel.joints[shape.joint].globalTransform;
}
const int numberOfParts = mesh.parts.size();
auto& meshTriangleSets = _modelSpaceMeshTriangleSets[i];
meshTriangleSets.resize(numberOfParts);
if (meshIndex != lastMeshForTriangleBuilding || worldFromMeshTransform != lastTransformForTriangleBuilding) {
lastMeshForTriangleBuilding = meshIndex;
lastTransformForTriangleBuilding = worldFromMeshTransform;
_modelSpaceMeshTriangleSets.emplace_back();
_modelSpaceMeshTriangleSets.back().reserve(mesh.parts.size());
for (int j = 0; j < numberOfParts; j++) {
const HFMMeshPart& part = mesh.parts.at(j);
auto& partTriangleSet = meshTriangleSets[j];
const int INDICES_PER_TRIANGLE = 3;
const int INDICES_PER_QUAD = 4;
const int TRIANGLES_PER_QUAD = 2;
// tell our triangleSet how many triangles to expect.
int numberOfQuads = part.quadIndices.size() / INDICES_PER_QUAD;
int numberOfTris = part.triangleIndices.size() / INDICES_PER_TRIANGLE;
int totalTriangles = (numberOfQuads * TRIANGLES_PER_QUAD) + numberOfTris;
partTriangleSet.reserve(totalTriangles);
auto meshTransform = hfmModel.offset * mesh.modelTransform;
if (part.quadIndices.size() > 0) {
int vIndex = 0;
for (int q = 0; q < numberOfQuads; q++) {
int i0 = part.quadIndices[vIndex++];
int i1 = part.quadIndices[vIndex++];
int i2 = part.quadIndices[vIndex++];
int i3 = part.quadIndices[vIndex++];
// track the model space version... these points will be transformed by the FST's offset,
// which includes the scaling, rotation, and translation specified by the FST/FBX,
// this can't change at runtime, so we can safely store these in our TriangleSet
glm::vec3 v0 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i0], 1.0f));
glm::vec3 v1 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i1], 1.0f));
glm::vec3 v2 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i2], 1.0f));
glm::vec3 v3 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i3], 1.0f));
Triangle tri1 = { v0, v1, v3 };
Triangle tri2 = { v1, v2, v3 };
partTriangleSet.insert(tri1);
partTriangleSet.insert(tri2);
transformedPoints = triangleListMesh.vertices;
if (worldFromMeshTransform != glm::mat4()) {
for (auto& point : transformedPoints) {
point = glm::vec3(worldFromMeshTransform * glm::vec4(point, 1.0f));
}
}
}
auto& meshTriangleSets = _modelSpaceMeshTriangleSets.back();
meshTriangleSets.emplace_back();
auto& partTriangleSet = meshTriangleSets.back();
if (part.triangleIndices.size() > 0) {
int vIndex = 0;
for (int t = 0; t < numberOfTris; t++) {
int i0 = part.triangleIndices[vIndex++];
int i1 = part.triangleIndices[vIndex++];
int i2 = part.triangleIndices[vIndex++];
const static size_t INDICES_PER_TRIANGLE = 3;
const size_t triangleCount = (size_t)(part.y) / INDICES_PER_TRIANGLE;
partTriangleSet.reserve(triangleCount);
const size_t indexStart = (uint32_t)part.x;
const size_t indexEnd = indexStart + (triangleCount * INDICES_PER_TRIANGLE);
for (size_t i = indexStart; i < indexEnd; i += INDICES_PER_TRIANGLE) {
const int i0 = triangleListMesh.indices[i];
const int i1 = triangleListMesh.indices[i + 1];
const int i2 = triangleListMesh.indices[i + 2];
// track the model space version... these points will be transformed by the FST's offset,
// which includes the scaling, rotation, and translation specified by the FST/FBX,
// this can't change at runtime, so we can safely store these in our TriangleSet
glm::vec3 v0 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i0], 1.0f));
glm::vec3 v1 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i1], 1.0f));
glm::vec3 v2 = glm::vec3(meshTransform * glm::vec4(mesh.vertices[i2], 1.0f));
const glm::vec3 v0 = transformedPoints[i0];
const glm::vec3 v1 = transformedPoints[i1];
const glm::vec3 v2 = transformedPoints[i2];
Triangle tri = { v0, v1, v2 };
partTriangleSet.insert(tri);
}
}
const Triangle tri = { v0, v1, v2 };
partTriangleSet.insert(tri);
}
}
}
@ -880,8 +893,8 @@ void Model::updateRenderItemsKey(const render::ScenePointer& scene) {
}
auto renderItemsKey = _renderItemKeyGlobalFlags;
render::Transaction transaction;
foreach(auto item, _modelMeshRenderItemsMap.keys()) {
transaction.updateItem<ModelMeshPartPayload>(item, [renderItemsKey](ModelMeshPartPayload& data) {
for(auto itemID: _modelMeshRenderItemIDs) {
transaction.updateItem<ModelMeshPartPayload>(itemID, [renderItemsKey](ModelMeshPartPayload& data) {
data.updateKey(renderItemsKey);
});
}
@ -951,8 +964,8 @@ void Model::setCauterized(bool cauterized, const render::ScenePointer& scene) {
return;
}
render::Transaction transaction;
foreach (auto item, _modelMeshRenderItemsMap.keys()) {
transaction.updateItem<ModelMeshPartPayload>(item, [cauterized](ModelMeshPartPayload& data) {
for (auto itemID : _modelMeshRenderItemIDs) {
transaction.updateItem<ModelMeshPartPayload>(itemID, [cauterized](ModelMeshPartPayload& data) {
data.setCauterized(cauterized);
});
}
@ -979,26 +992,25 @@ bool Model::addToScene(const render::ScenePointer& scene,
bool somethingAdded = false;
if (_modelMeshRenderItemsMap.empty()) {
if (_modelMeshRenderItemIDs.empty()) {
bool hasTransparent = false;
size_t verticesCount = 0;
foreach(auto renderItem, _modelMeshRenderItems) {
auto item = scene->allocateID();
auto renderPayload = std::make_shared<ModelMeshPartPayload::Payload>(renderItem);
if (_modelMeshRenderItemsMap.empty() && statusGetters.size()) {
if (_modelMeshRenderItemIDs.empty() && statusGetters.size()) {
renderPayload->addStatusGetters(statusGetters);
}
transaction.resetItem(item, renderPayload);
hasTransparent = hasTransparent || renderItem.get()->getShapeKey().isTranslucent();
verticesCount += renderItem.get()->getVerticesCount();
_modelMeshRenderItemsMap.insert(item, renderPayload);
_modelMeshRenderItemIDs.emplace_back(item);
}
somethingAdded = !_modelMeshRenderItemsMap.empty();
somethingAdded = !_modelMeshRenderItemIDs.empty();
_renderInfoVertexCount = verticesCount;
_renderInfoDrawCalls = _modelMeshRenderItemsMap.count();
_renderInfoDrawCalls = (uint32_t) _modelMeshRenderItemIDs.size();
_renderInfoHasTransparent = hasTransparent;
}
@ -1013,14 +1025,12 @@ bool Model::addToScene(const render::ScenePointer& scene,
}
void Model::removeFromScene(const render::ScenePointer& scene, render::Transaction& transaction) {
foreach (auto item, _modelMeshRenderItemsMap.keys()) {
transaction.removeItem(item);
for (auto itemID: _modelMeshRenderItemIDs) {
transaction.removeItem(itemID);
}
_modelMeshRenderItemIDs.clear();
_modelMeshRenderItemsMap.clear();
_modelMeshRenderItems.clear();
_modelMeshMaterialNames.clear();
_modelMeshRenderItemShapes.clear();
_priorityMap.clear();
_addedToScene = false;
@ -1199,7 +1209,7 @@ void Model::setURL(const QUrl& url) {
invalidCalculatedMeshBoxes();
deleteGeometry();
auto resource = DependencyManager::get<ModelCache>()->getGeometryResource(url);
auto resource = DependencyManager::get<ModelCache>()->getModelResource(url);
if (resource) {
resource->setLoadPriority(this, _loadingPriority);
_renderWatcher.setResource(resource);
@ -1388,32 +1398,32 @@ void Model::updateClusterMatrices() {
return;
}
updateShapeStatesFromRig();
_needsUpdateClusterMatrices = false;
const HFMModel& hfmModel = getHFMModel();
for (int i = 0; i < (int) _meshStates.size(); i++) {
MeshState& state = _meshStates[i];
int meshIndex = i;
const HFMMesh& mesh = hfmModel.meshes.at(i);
for (int j = 0; j < mesh.clusters.size(); j++) {
const HFMCluster& cluster = mesh.clusters.at(j);
int clusterIndex = j;
for (int skinDeformerIndex = 0; skinDeformerIndex < (int)_meshStates.size(); skinDeformerIndex++) {
MeshState& state = _meshStates[skinDeformerIndex];
auto numClusters = state.getNumClusters();
for (uint32_t clusterIndex = 0; clusterIndex < numClusters; clusterIndex++) {
const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(skinDeformerIndex, clusterIndex);
if (_useDualQuaternionSkinning) {
auto jointPose = _rig.getJointPose(cluster.jointIndex);
auto jointPose = _rig.getJointPose(cbmov.jointIndex);
Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans());
Transform clusterTransform;
Transform::mult(clusterTransform, jointTransform, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindTransform);
state.clusterDualQuaternions[j] = Model::TransformDualQuaternion(clusterTransform);
Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform);
state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform);
} else {
auto jointMatrix = _rig.getJointTransform(cluster.jointIndex);
glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]);
auto jointMatrix = _rig.getJointTransform(cbmov.jointIndex);
glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]);
}
}
}
// post the blender if we're not currently waiting for one to finish
auto modelBlender = DependencyManager::get<ModelBlender>();
if (modelBlender->shouldComputeBlendshapes() && hfmModel.hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) {
if (modelBlender->shouldComputeBlendshapes() && getHFMModel().hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) {
_blendedBlendshapeCoefficients = _blendshapeCoefficients;
modelBlender->noteRequiresBlend(getThisPointer());
}
@ -1421,6 +1431,7 @@ void Model::updateClusterMatrices() {
void Model::deleteGeometry() {
_deleteGeometryCounter++;
_shapeStates.clear();
_meshStates.clear();
_rig.destroyAnimGraph();
_blendedBlendshapeCoefficients.clear();
@ -1454,20 +1465,12 @@ const render::ItemIDs& Model::fetchRenderItemIDs() const {
void Model::createRenderItemSet() {
assert(isLoaded());
const auto& meshes = _renderGeometry->getMeshes();
// all of our mesh vectors must match in size
if (meshes.size() != _meshStates.size()) {
qCDebug(renderutils) << "WARNING!!!! Mesh Sizes don't match! " << meshes.size() << _meshStates.size() << " We will not segregate mesh groups yet.";
return;
}
// We should not have any existing renderItems if we enter this section of code
Q_ASSERT(_modelMeshRenderItems.isEmpty());
_modelMeshRenderItems.clear();
_modelMeshMaterialNames.clear();
_modelMeshRenderItemShapes.clear();
Transform transform;
transform.setTranslation(_translation);
@ -1478,28 +1481,19 @@ void Model::createRenderItemSet() {
offset.postTranslate(_offset);
// Run through all of the meshes, and place them into their segregated, but unsorted buckets
int shapeID = 0;
uint32_t numMeshes = (uint32_t)meshes.size();
for (uint32_t i = 0; i < numMeshes; i++) {
const auto& mesh = meshes.at(i);
if (!mesh) {
continue;
}
const auto& shapes = _renderGeometry->getHFMModel().shapes;
for (uint32_t shapeID = 0; shapeID < shapes.size(); shapeID++) {
const auto& shape = shapes[shapeID];
// Create the render payloads
int numParts = (int)mesh->getNumParts();
for (int partIndex = 0; partIndex < numParts; partIndex++) {
_modelMeshRenderItems << std::make_shared<ModelMeshPartPayload>(shared_from_this(), i, partIndex, shapeID, transform, offset, _created);
auto material = getGeometry()->getShapeMaterial(shapeID);
_modelMeshMaterialNames.push_back(material ? material->getName() : "");
_modelMeshRenderItemShapes.emplace_back(ShapeInfo{ (int)i });
shapeID++;
}
_modelMeshRenderItems << std::make_shared<ModelMeshPartPayload>(shared_from_this(), shape.mesh, shape.meshPart, shapeID, transform, offset, _created);
auto material = getNetworkModel()->getShapeMaterial(shapeID);
_modelMeshMaterialNames.push_back(material ? material->getName() : "");
}
}
bool Model::isRenderable() const {
return !_meshStates.empty() || (isLoaded() && _renderGeometry->getMeshes().empty());
return (!_shapeStates.empty()) || (isLoaded() && _renderGeometry->getMeshes().empty());
}
std::set<unsigned int> Model::getMeshIDsFromMaterialID(QString parentMaterialName) {
@ -1555,11 +1549,11 @@ void Model::applyMaterialMapping() {
PrimitiveMode primitiveMode = getPrimitiveMode();
bool useDualQuaternionSkinning = _useDualQuaternionSkinning;
auto modelMeshRenderItemIDs = _modelMeshRenderItemIDs;
auto modelMeshRenderItemShapes = _modelMeshRenderItemShapes;
auto shapeStates = _shapeStates;
std::unordered_map<int, bool> shouldInvalidatePayloadShapeKeyMap;
for (auto& shape : _modelMeshRenderItemShapes) {
shouldInvalidatePayloadShapeKeyMap[shape.meshIndex] = shouldInvalidatePayloadShapeKey(shape.meshIndex);
for (auto& shape : _shapeStates) {
shouldInvalidatePayloadShapeKeyMap[shape._meshIndex] = shouldInvalidatePayloadShapeKey(shape._meshIndex);
}
auto& materialMapping = getMaterialMapping();
@ -1582,7 +1576,7 @@ void Model::applyMaterialMapping() {
std::weak_ptr<Model> weakSelf = shared_from_this();
auto materialLoaded = [networkMaterialResource, shapeIDs, priorityMapPerResource, renderItemsKey, primitiveMode, useDualQuaternionSkinning,
modelMeshRenderItemIDs, modelMeshRenderItemShapes, shouldInvalidatePayloadShapeKeyMap, weakSelf]() {
modelMeshRenderItemIDs, shapeStates, shouldInvalidatePayloadShapeKeyMap, weakSelf]() {
std::shared_ptr<Model> self = weakSelf.lock();
if (!self || networkMaterialResource->isFailed() || networkMaterialResource->parsedMaterials.names.size() == 0) {
return;
@ -1608,7 +1602,7 @@ void Model::applyMaterialMapping() {
for (auto shapeID : shapeIDs) {
if (shapeID < modelMeshRenderItemIDs.size()) {
auto itemID = modelMeshRenderItemIDs[shapeID];
auto meshIndex = modelMeshRenderItemShapes[shapeID].meshIndex;
auto meshIndex = shapeStates[shapeID]._meshIndex;
bool invalidatePayloadShapeKey = shouldInvalidatePayloadShapeKeyMap.at(meshIndex);
graphics::MaterialLayer material = graphics::MaterialLayer(networkMaterial, priorityMapPerResource.at(shapeID));
{
@ -1646,7 +1640,7 @@ void Model::addMaterial(graphics::MaterialLayer material, const std::string& par
for (auto shapeID : shapeIDs) {
if (shapeID < _modelMeshRenderItemIDs.size()) {
auto itemID = _modelMeshRenderItemIDs[shapeID];
auto meshIndex = _modelMeshRenderItemShapes[shapeID].meshIndex;
auto meshIndex = _shapeStates[shapeID]._meshIndex;
bool invalidatePayloadShapeKey = shouldInvalidatePayloadShapeKey(meshIndex);
transaction.updateItem<ModelMeshPartPayload>(itemID, [material, renderItemsKey,
invalidatePayloadShapeKey, primitiveMode, useDualQuaternionSkinning](ModelMeshPartPayload& data) {
@ -1668,7 +1662,7 @@ void Model::removeMaterial(graphics::MaterialPointer material, const std::string
auto itemID = _modelMeshRenderItemIDs[shapeID];
auto renderItemsKey = _renderItemKeyGlobalFlags;
PrimitiveMode primitiveMode = getPrimitiveMode();
auto meshIndex = _modelMeshRenderItemShapes[shapeID].meshIndex;
auto meshIndex = _shapeStates[shapeID]._meshIndex;
bool invalidatePayloadShapeKey = shouldInvalidatePayloadShapeKey(meshIndex);
bool useDualQuaternionSkinning = _useDualQuaternionSkinning;
transaction.updateItem<ModelMeshPartPayload>(itemID, [material, renderItemsKey,
@ -1683,14 +1677,13 @@ void Model::removeMaterial(graphics::MaterialPointer material, const std::string
AbstractViewStateInterface::instance()->getMain3DScene()->enqueueTransaction(transaction);
}
class CollisionRenderGeometry : public Geometry {
class CollisionRenderGeometry : public NetworkModel {
public:
CollisionRenderGeometry(graphics::MeshPointer mesh) {
_hfmModel = std::make_shared<HFMModel>();
std::shared_ptr<GeometryMeshes> meshes = std::make_shared<GeometryMeshes>();
meshes->push_back(mesh);
_meshes = meshes;
_meshParts = std::shared_ptr<const GeometryMeshParts>();
}
};
@ -1841,7 +1834,7 @@ void Blender::run() {
bool Model::maybeStartBlender() {
if (isLoaded()) {
QThreadPool::globalInstance()->start(new Blender(getThisPointer(), getGeometry()->getConstHFMModelPointer(),
QThreadPool::globalInstance()->start(new Blender(getThisPointer(), getNetworkModel()->getConstHFMModelPointer(),
++_blendNumber, _blendshapeCoefficients));
return true;
}

View file

@ -178,7 +178,7 @@ public:
virtual void updateClusterMatrices();
/// Returns a reference to the shared geometry.
const Geometry::Pointer& getGeometry() const { return _renderGeometry; }
const NetworkModel::Pointer& getNetworkModel() const { return _renderGeometry; }
const QVariantMap getTextures() const { assert(isLoaded()); return _renderGeometry->getTextures(); }
Q_INVOKABLE virtual void setTextures(const QVariantMap& textures);
@ -297,6 +297,16 @@ public:
int getRenderInfoDrawCalls() const { return _renderInfoDrawCalls; }
bool getRenderInfoHasTransparent() const { return _renderInfoHasTransparent; }
class ShapeState {
public:
glm::mat4 _rootFromJointTransform;
uint32_t _jointIndex{ hfm::UNDEFINED_KEY };
uint32_t _meshIndex{ hfm::UNDEFINED_KEY };
uint32_t _meshPartIndex{ hfm::UNDEFINED_KEY };
uint32_t _skinDeformerIndex{ hfm::UNDEFINED_KEY };
};
const ShapeState& getShapeState(int index) { return _shapeStates.at(index); }
class TransformDualQuaternion {
public:
TransformDualQuaternion() {}
@ -339,12 +349,13 @@ public:
public:
std::vector<TransformDualQuaternion> clusterDualQuaternions;
std::vector<glm::mat4> clusterMatrices;
};
uint32_t getNumClusters() const { return (uint32_t) std::max(clusterMatrices.size(), clusterMatrices.size()); }
};
const MeshState& getMeshState(int index) { return _meshStates.at(index); }
uint32_t getGeometryCounter() const { return _deleteGeometryCounter; }
const QMap<render::ItemID, render::PayloadPointer>& getRenderItems() const { return _modelMeshRenderItemsMap; }
BlendShapeOperator getModelBlendshapeOperator() const { return _modelBlendshapeOperator; }
void renderDebugMeshBoxes(gpu::Batch& batch, bool forward);
@ -391,9 +402,9 @@ protected:
/// \return true if joint exists
bool getJointPosition(int jointIndex, glm::vec3& position) const;
Geometry::Pointer _renderGeometry; // only ever set by its watcher
NetworkModel::Pointer _renderGeometry; // only ever set by its watcher
GeometryResourceWatcher _renderWatcher;
ModelResourceWatcher _renderWatcher;
SpatiallyNestable* _spatiallyNestableOverride;
@ -419,6 +430,10 @@ protected:
bool _snappedToRegistrationPoint; /// are we currently snapped to a registration point
glm::vec3 _registrationPoint = glm::vec3(0.5f); /// the point in model space our center is snapped to
std::vector<ShapeState> _shapeStates;
void updateShapeStatesFromRig();
std::vector<MeshState> _meshStates;
virtual void initJointStates();
@ -463,10 +478,7 @@ protected:
static AbstractViewStateInterface* _viewState;
QVector<std::shared_ptr<ModelMeshPartPayload>> _modelMeshRenderItems;
QMap<render::ItemID, render::PayloadPointer> _modelMeshRenderItemsMap;
render::ItemIDs _modelMeshRenderItemIDs;
using ShapeInfo = struct { int meshIndex; };
std::vector<ShapeInfo> _modelMeshRenderItemShapes;
std::vector<std::string> _modelMeshMaterialNames;
bool _addedToScene { false }; // has been added to scene
@ -517,7 +529,7 @@ private:
};
Q_DECLARE_METATYPE(ModelPointer)
Q_DECLARE_METATYPE(Geometry::WeakPointer)
Q_DECLARE_METATYPE(NetworkModel::WeakPointer)
Q_DECLARE_METATYPE(BlendshapeOffset)
/// Handle management of pending models that need blending

View file

@ -41,37 +41,36 @@ void SoftAttachmentModel::updateClusterMatrices() {
_needsUpdateClusterMatrices = false;
const HFMModel& hfmModel = getHFMModel();
for (int i = 0; i < (int) _meshStates.size(); i++) {
MeshState& state = _meshStates[i];
const HFMMesh& mesh = hfmModel.meshes.at(i);
int meshIndex = i;
for (int j = 0; j < mesh.clusters.size(); j++) {
const HFMCluster& cluster = mesh.clusters.at(j);
for (int skinDeformerIndex = 0; skinDeformerIndex < (int)_meshStates.size(); skinDeformerIndex++) {
MeshState& state = _meshStates[skinDeformerIndex];
auto numClusters = state.getNumClusters();
for (uint32_t clusterIndex = 0; clusterIndex < numClusters; clusterIndex++) {
const auto& cbmov = _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(skinDeformerIndex, clusterIndex);
int clusterIndex = j;
// TODO: cache these look-ups as an optimization
int jointIndexOverride = getJointIndexOverride(cluster.jointIndex);
glm::mat4 jointMatrix;
int jointIndexOverride = getJointIndexOverride(cbmov.jointIndex);
auto rig = &_rigOverride;
if (jointIndexOverride >= 0 && jointIndexOverride < _rigOverride.getJointStateCount()) {
jointMatrix = _rigOverride.getJointTransform(jointIndexOverride);
} else {
jointMatrix = _rig.getJointTransform(cluster.jointIndex);
rig = &_rig;
}
if (_useDualQuaternionSkinning) {
glm::mat4 m;
glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, m);
state.clusterDualQuaternions[j] = Model::TransformDualQuaternion(m);
auto jointPose = rig->getJointPose(cbmov.jointIndex);
Transform jointTransform(jointPose.rot(), jointPose.scale(), jointPose.trans());
Transform clusterTransform;
Transform::mult(clusterTransform, jointTransform, cbmov.inverseBindTransform);
state.clusterDualQuaternions[clusterIndex] = Model::TransformDualQuaternion(clusterTransform);
} else {
glm_mat4u_mul(jointMatrix, _rig.getAnimSkeleton()->getClusterBindMatricesOriginalValues(meshIndex, clusterIndex).inverseBindMatrix, state.clusterMatrices[j]);
auto jointMatrix = rig->getJointTransform(cbmov.jointIndex);
glm_mat4u_mul(jointMatrix, cbmov.inverseBindMatrix, state.clusterMatrices[clusterIndex]);
}
}
}
// post the blender if we're not currently waiting for one to finish
auto modelBlender = DependencyManager::get<ModelBlender>();
if (modelBlender->shouldComputeBlendshapes() && hfmModel.hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) {
if (modelBlender->shouldComputeBlendshapes() && getHFMModel().hasBlendedMeshes() && _blendshapeCoefficients != _blendedBlendshapeCoefficients) {
_blendedBlendshapeCoefficients = _blendshapeCoefficients;
modelBlender->noteRequiresBlend(getThisPointer());
}

View file

@ -392,4 +392,14 @@ inline glm::vec4 extractFov( const glm::mat4& m) {
return result;
}
inline bool operator<(const glm::vec3& lhs, const glm::vec3& rhs) {
return (lhs.x < rhs.x) || (
(lhs.x == rhs.x) && (
(lhs.y < rhs.y) || (
(lhs.y == rhs.y) && (lhs.z < rhs.z)
)
)
);
}
#endif // hifi_GLMHelpers_h

View file

@ -189,7 +189,7 @@ uint32_t ShapeInfo::getNumSubShapes() const {
return 0;
case SHAPE_TYPE_COMPOUND:
case SHAPE_TYPE_SIMPLE_COMPOUND:
return _pointCollection.size();
return (uint32_t)_pointCollection.size();
case SHAPE_TYPE_MULTISPHERE:
case SHAPE_TYPE_SIMPLE_HULL:
case SHAPE_TYPE_STATIC_MESH:
@ -200,10 +200,10 @@ uint32_t ShapeInfo::getNumSubShapes() const {
}
}
int ShapeInfo::getLargestSubshapePointCount() const {
int numPoints = 0;
for (int i = 0; i < _pointCollection.size(); ++i) {
int n = _pointCollection[i].size();
uint32_t ShapeInfo::getLargestSubshapePointCount() const {
uint32_t numPoints = 0;
for (uint32_t i = 0; i < (uint32_t)_pointCollection.size(); ++i) {
uint32_t n = (uint32_t)_pointCollection[i].size();
if (n > numPoints) {
numPoints = n;
}

View file

@ -12,7 +12,7 @@
#ifndef hifi_ShapeInfo_h
#define hifi_ShapeInfo_h
#include <QVector>
#include <vector>
#include <QString>
#include <QUrl>
#include <glm/glm.hpp>
@ -53,11 +53,11 @@ class ShapeInfo {
public:
using PointList = QVector<glm::vec3>;
using PointCollection = QVector<PointList>;
using TriangleIndices = QVector<int32_t>;
using PointList = std::vector<glm::vec3>;
using PointCollection = std::vector<PointList>;
using TriangleIndices = std::vector<int32_t>;
using SphereData = glm::vec4;
using SphereCollection = QVector<SphereData>;
using SphereCollection = std::vector<SphereData>;
static QString getNameForShapeType(ShapeType type);
static ShapeType getShapeTypeForName(QString string);
@ -85,7 +85,7 @@ public:
TriangleIndices& getTriangleIndices() { return _triangleIndices; }
const TriangleIndices& getTriangleIndices() const { return _triangleIndices; }
int getLargestSubshapePointCount() const;
uint32_t getLargestSubshapePointCount() const;
float computeVolume() const;

View file

@ -149,12 +149,11 @@ void vhacd::VHACDUtil::fattenMesh(const HFMMesh& mesh, const glm::mat4& modelOff
result.vertices << p3; // add the new point to the result mesh
HFMMeshPart newMeshPart;
setMeshPartDefaults(newMeshPart, "unknown");
newMeshPart.triangleIndices << index0 << index1 << index2;
newMeshPart.triangleIndices << index0 << index3 << index1;
newMeshPart.triangleIndices << index1 << index3 << index2;
newMeshPart.triangleIndices << index2 << index3 << index0;
result.parts.append(newMeshPart);
result.parts.push_back(newMeshPart);
}
}
@ -259,8 +258,8 @@ void vhacd::VHACDUtil::getConvexResults(VHACD::IVHACD* convexifier, HFMMesh& res
VHACD::IVHACD::ConvexHull hull;
convexifier->GetConvexHull(j, hull);
resultMesh.parts.append(HFMMeshPart());
HFMMeshPart& resultMeshPart = resultMesh.parts.last();
resultMesh.parts.push_back(HFMMeshPart());
HFMMeshPart& resultMeshPart = resultMesh.parts.back();
int hullIndexStart = resultMesh.vertices.size();
resultMesh.vertices.reserve(hullIndexStart + hull.m_nPoints);
@ -300,8 +299,8 @@ bool vhacd::VHACDUtil::computeVHACD(HFMModel& hfmModel,
}
// count the mesh-parts
int numParts = 0;
foreach (const HFMMesh& mesh, hfmModel.meshes) {
size_t numParts = 0;
for (const HFMMesh& mesh : hfmModel.meshes) {
numParts += mesh.parts.size();
}
if (_verbose) {
@ -311,8 +310,8 @@ bool vhacd::VHACDUtil::computeVHACD(HFMModel& hfmModel,
VHACD::IVHACD * convexifier = VHACD::CreateVHACD();
result.meshExtents.reset();
result.meshes.append(HFMMesh());
HFMMesh &resultMesh = result.meshes.last();
result.meshes.push_back(HFMMesh());
HFMMesh &resultMesh = result.meshes.back();
const uint32_t POINT_STRIDE = 3;
const uint32_t TRIANGLE_STRIDE = 3;
@ -348,7 +347,7 @@ bool vhacd::VHACDUtil::computeVHACD(HFMModel& hfmModel,
if (_verbose) {
qDebug() << "mesh" << meshIndex << ": "
<< " parts =" << mesh.parts.size() << " clusters =" << mesh.clusters.size()
<< " parts =" << mesh.parts.size()
<< " vertices =" << numVertices;
}
++meshIndex;

View file

@ -387,7 +387,7 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
}
if (verbose) {
int totalHulls = result.meshes[0].parts.size();
auto totalHulls = result.meshes[0].parts.size();
qDebug() << "output file =" << outputFilename;
qDebug() << "vertices =" << totalVertices;
qDebug() << "triangles =" << totalTriangles;
@ -402,7 +402,7 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
HFMMesh result;
// count the mesh-parts
unsigned int meshCount = 0;
size_t meshCount = 0;
foreach (const HFMMesh& mesh, fbx.meshes) {
meshCount += mesh.parts.size();
}
@ -412,7 +412,7 @@ VHACDUtilApp::VHACDUtilApp(int argc, char* argv[]) :
vUtil.fattenMesh(mesh, fbx.offset, result);
}
newFbx.meshes.append(result);
newFbx.meshes.push_back(result);
writeOBJ(outputFilename, newFbx, outputCentimeters);
}
}