quiet compiler

This commit is contained in:
Seth Alves 2015-08-11 10:12:46 -07:00
parent 60b74af2a8
commit d6322903a2
4 changed files with 107 additions and 124 deletions

View file

@ -154,9 +154,9 @@ public:
DEFINE_PROPERTY_REF(PROP_HREF, Href, href, QString);
DEFINE_PROPERTY_REF(PROP_DESCRIPTION, Description, description, QString);
DEFINE_PROPERTY(PROP_FACE_CAMERA, FaceCamera, faceCamera, bool);
DEFINE_PROPERTY_REF(PROP_ACTION_DATA, ActionData, actionData, QByteArray);
DEFINE_PROPERTY(PROP_NORMALS, Normals, normals, QVector<glm::vec3>);
DEFINE_PROPERTY(PROP_STROKE_WIDTHS, StrokeWidths, strokeWidths, QVector<float>);
DEFINE_PROPERTY_REF(PROP_ACTION_DATA, ActionData, actionData, QByteArray);
DEFINE_PROPERTY_REF(PROP_X_TEXTURE_URL, XTextureURL, xTextureURL, QString);
DEFINE_PROPERTY_REF(PROP_Y_TEXTURE_URL, YTextureURL, yTextureURL, QString);
DEFINE_PROPERTY_REF(PROP_Z_TEXTURE_URL, ZTextureURL, zTextureURL, QString);

View file

@ -170,9 +170,10 @@ bool PolyLineEntityItem::setLinePoints(const QVector<glm::vec3>& points) {
for (int i = 0; i < points.size(); i++) {
glm::vec3 point = points.at(i);
glm::vec3 pos = getPosition();
glm::vec3 halfBox = getDimensions() * 0.5f;
if ( (point.x < - halfBox.x || point.x > halfBox.x) || (point.y < -halfBox.y || point.y > halfBox.y) || (point.z < - halfBox.z || point.z > halfBox.z) ) {
if ((point.x < - halfBox.x || point.x > halfBox.x) ||
(point.y < -halfBox.y || point.y > halfBox.y) ||
(point.z < - halfBox.z || point.z > halfBox.z)) {
qDebug() << "Point is outside entity's bounding box";
return false;
}

View file

@ -976,7 +976,7 @@ ExtractedMesh extractMesh(const FBXNode& object, unsigned int& meshIndex) {
data.extracted.mesh.meshIndex = meshIndex++;
QVector<int> materials;
QVector<int> textures;
bool isMaterialPerPolygon = false;
// bool isMaterialPerPolygon = false;
foreach (const FBXNode& child, object.children) {
if (child.name == "Vertices") {
@ -1107,13 +1107,13 @@ ExtractedMesh extractMesh(const FBXNode& object, unsigned int& meshIndex) {
foreach (const FBXNode& subdata, child.children) {
if (subdata.name == "Materials") {
materials = getIntVector(subdata);
} else if (subdata.name == "MappingInformationType") {
if (subdata.properties.at(0) == "ByPolygon") {
isMaterialPerPolygon = true;
} else {
isMaterialPerPolygon = false;
}
}
} // else if (subdata.name == "MappingInformationType") {
// if (subdata.properties.at(0) == "ByPolygon")
// isMaterialPerPolygon = true;
// } else {
// isMaterialPerPolygon = false;
// }
// }
}
@ -1126,12 +1126,6 @@ ExtractedMesh extractMesh(const FBXNode& object, unsigned int& meshIndex) {
}
}
bool isMultiMaterial = false;
if (isMaterialPerPolygon) {
isMultiMaterial = true;
}
// convert the polygons to quads and triangles
int polygonIndex = 0;
QHash<QPair<int, int>, int> materialTextureParts;

View file

@ -112,14 +112,14 @@ void Model::RenderPipelineLib::addRenderPipeline(Model::RenderKey key,
gpu::ShaderPointer program = gpu::ShaderPointer(gpu::Shader::createProgram(vertexShader, pixelShader));
gpu::Shader::makeProgram(*program, slotBindings);
auto locations = std::make_shared<Locations>();
initLocations(program, *locations);
auto state = std::make_shared<gpu::State>();
// Backface on shadow
if (key.isShadow()) {
state->setCullMode(gpu::State::CULL_FRONT);
@ -140,36 +140,36 @@ void Model::RenderPipelineLib::addRenderPipeline(Model::RenderKey key,
// Good to go add the brand new pipeline
auto pipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, state));
insert(value_type(key.getRaw(), RenderPipeline(pipeline, locations)));
if (!key.isWireFrame()) {
RenderKey wireframeKey(key.getRaw() | RenderKey::IS_WIREFRAME);
auto wireframeState = std::make_shared<gpu::State>(state->getValues());
wireframeState->setFillMode(gpu::State::FILL_LINE);
// create a new RenderPipeline with the same shader side and the mirrorState
auto wireframePipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, wireframeState));
insert(value_type(wireframeKey.getRaw(), RenderPipeline(wireframePipeline, locations)));
}
// If not a shadow pass, create the mirror version from the same state, just change the FrontFace
if (!key.isShadow()) {
RenderKey mirrorKey(key.getRaw() | RenderKey::IS_MIRROR);
auto mirrorState = std::make_shared<gpu::State>(state->getValues());
// create a new RenderPipeline with the same shader side and the mirrorState
auto mirrorPipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, mirrorState));
insert(value_type(mirrorKey.getRaw(), RenderPipeline(mirrorPipeline, locations)));
if (!key.isWireFrame()) {
RenderKey wireframeKey(key.getRaw() | RenderKey::IS_MIRROR | RenderKey::IS_WIREFRAME);
auto wireframeState = std::make_shared<gpu::State>(state->getValues());
wireframeState->setFillMode(gpu::State::FILL_LINE);
// create a new RenderPipeline with the same shader side and the mirrorState
auto wireframePipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, wireframeState));
insert(value_type(wireframeKey.getRaw(), RenderPipeline(wireframePipeline, locations)));
@ -214,12 +214,12 @@ void Model::setScaleInternal(const glm::vec3& scale) {
}
}
void Model::setOffset(const glm::vec3& offset) {
_offset = offset;
void Model::setOffset(const glm::vec3& offset) {
_offset = offset;
// if someone manually sets our offset, then we are no longer snapped to center
_snapModelToRegistrationPoint = false;
_snappedToRegistrationPoint = false;
_snapModelToRegistrationPoint = false;
_snappedToRegistrationPoint = false;
}
QVector<JointState> Model::createJointStates(const FBXGeometry& geometry) {
@ -267,7 +267,7 @@ void Model::init() {
auto modelLightmapNormalSpecularMapPixel = gpu::ShaderPointer(gpu::Shader::createPixel(std::string(model_lightmap_normal_specular_map_frag)));
// Fill the renderPipelineLib
_renderPipelineLib.addRenderPipeline(
RenderKey(0),
modelVertex, modelPixel);
@ -284,7 +284,7 @@ void Model::init() {
RenderKey(RenderKey::HAS_TANGENTS | RenderKey::HAS_SPECULAR),
modelNormalMapVertex, modelNormalSpecularMapPixel);
_renderPipelineLib.addRenderPipeline(
RenderKey(RenderKey::IS_TRANSLUCENT),
modelVertex, modelTranslucentPixel);
@ -292,7 +292,7 @@ void Model::init() {
_renderPipelineLib.addRenderPipeline(
RenderKey(RenderKey::IS_TRANSLUCENT | RenderKey::HAS_LIGHTMAP),
modelVertex, modelTranslucentPixel);
_renderPipelineLib.addRenderPipeline(
RenderKey(RenderKey::HAS_TANGENTS | RenderKey::IS_TRANSLUCENT),
modelNormalMapVertex, modelTranslucentPixel);
@ -440,15 +440,15 @@ bool Model::updateGeometry() {
}
_geometry->setLoadPriority(this, -_lodDistance);
_geometry->ensureLoading();
if (needToRebuild) {
const FBXGeometry& fbxGeometry = geometry->getFBXGeometry();
foreach (const FBXMesh& mesh, fbxGeometry.meshes) {
MeshState state;
state.clusterMatrices.resize(mesh.clusters.size());
state.cauterizedClusterMatrices.resize(mesh.clusters.size());
_meshStates.append(state);
_meshStates.append(state);
auto buffer = std::make_shared<gpu::Buffer>();
if (!mesh.blendshapes.isEmpty()) {
buffer->resize((mesh.vertices.size() + mesh.normals.size()) * sizeof(glm::vec3));
@ -486,7 +486,7 @@ void Model::initJointStates(QVector<JointState> states) {
rightShoulderJointIndex);
}
bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const glm::vec3& direction, float& distance,
bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const glm::vec3& direction, float& distance,
BoxFace& face, QString& extraInfo, bool pickAgainstTriangles) {
bool intersectedSomething = false;
@ -495,7 +495,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
if (!isActive()) {
return intersectedSomething;
}
// extents is the entity relative, scaled, centered extents of the entity
glm::vec3 position = _translation;
glm::mat4 rotation = glm::mat4_cast(_rotation);
@ -504,7 +504,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
glm::mat4 worldToModelMatrix = glm::inverse(modelToWorldMatrix);
Extents modelExtents = getMeshExtents(); // NOTE: unrotated
glm::vec3 dimensions = modelExtents.maximum - modelExtents.minimum;
glm::vec3 corner = -(dimensions * _registrationPoint); // since we're going to do the ray picking in the model frame of reference
AABox modelFrameBox(corner, dimensions);
@ -543,7 +543,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
int t = 0;
foreach (const Triangle& triangle, meshTriangles) {
t++;
float thisTriangleDistance;
if (findRayTriangleIntersection(origin, direction, triangle, thisTriangleDistance)) {
if (thisTriangleDistance < bestDistance) {
@ -562,7 +562,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
extraInfo = geometry.getModelNameOfMesh(subMeshIndex);
}
}
}
}
subMeshIndex++;
}
_mutex.unlock();
@ -570,7 +570,7 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
if (intersectedSomething) {
distance = bestDistance;
}
return intersectedSomething;
}
@ -582,22 +582,22 @@ bool Model::convexHullContains(glm::vec3 point) {
if (!isActive()) {
return false;
}
// extents is the entity relative, scaled, centered extents of the entity
glm::vec3 position = _translation;
glm::mat4 rotation = glm::mat4_cast(_rotation);
glm::mat4 translation = glm::translate(position);
glm::mat4 modelToWorldMatrix = translation * rotation;
glm::mat4 worldToModelMatrix = glm::inverse(modelToWorldMatrix);
Extents modelExtents = getMeshExtents(); // NOTE: unrotated
glm::vec3 dimensions = modelExtents.maximum - modelExtents.minimum;
glm::vec3 corner = -(dimensions * _registrationPoint);
AABox modelFrameBox(corner, dimensions);
glm::vec3 modelFramePoint = glm::vec3(worldToModelMatrix * glm::vec4(point, 1.0f));
// we can use the AABox's contains() by mapping our point into the model frame
// and testing there.
if (modelFrameBox.contains(modelFramePoint)){
@ -605,7 +605,7 @@ bool Model::convexHullContains(glm::vec3 point) {
if (!_calculatedMeshTrianglesValid) {
recalculateMeshBoxes(true);
}
// If we are inside the models box, then consider the submeshes...
int subMeshIndex = 0;
foreach(const AABox& subMeshBox, _calculatedMeshBoxes) {
@ -619,7 +619,7 @@ bool Model::convexHullContains(glm::vec3 point) {
insideMesh = false;
break;
}
}
if (insideMesh) {
// It's inside this mesh, return true.
@ -658,7 +658,7 @@ void Model::recalculateMeshPartOffsets() {
// Any script might trigger findRayIntersectionAgainstSubMeshes (and maybe convexHullContains), so these
// can occur multiple times. In addition, rendering does it's own ray picking in order to decide which
// entity-scripts to call. I think it would be best to do the picking once-per-frame (in cpu, or gpu if possible)
// and then the calls use the most recent such result.
// and then the calls use the most recent such result.
void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
PROFILE_RANGE(__FUNCTION__);
bool calculatedMeshTrianglesNeeded = pickAgainstTriangles && !_calculatedMeshTrianglesValid;
@ -703,7 +703,7 @@ void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
glm::vec3 mv1 = glm::vec3(mesh.modelTransform * glm::vec4(mesh.vertices[i1], 1.0f));
glm::vec3 mv2 = glm::vec3(mesh.modelTransform * glm::vec4(mesh.vertices[i2], 1.0f));
glm::vec3 mv3 = glm::vec3(mesh.modelTransform * glm::vec4(mesh.vertices[i3], 1.0f));
// track the mesh parts in model space
if (!atLeastOnePointInBounds) {
thisPartBounds.setBox(mv0, 0.0f);
@ -719,18 +719,18 @@ void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
glm::vec3 v1 = calculateScaledOffsetPoint(mv1);
glm::vec3 v2 = calculateScaledOffsetPoint(mv2);
glm::vec3 v3 = calculateScaledOffsetPoint(mv3);
// Sam's recommended triangle slices
Triangle tri1 = { v0, v1, v3 };
Triangle tri2 = { v1, v2, v3 };
// NOTE: Random guy on the internet's recommended triangle slices
//Triangle tri1 = { v0, v1, v2 };
//Triangle tri2 = { v2, v3, v0 };
thisMeshTriangles.push_back(tri1);
thisMeshTriangles.push_back(tri2);
}
}
@ -792,7 +792,7 @@ void Model::renderSetup(RenderArgs* args) {
_dilatedTextures.append(dilated);
}
}
if (!_meshGroupsKnown && isLoaded()) {
segregateMeshGroups();
}
@ -805,7 +805,7 @@ public:
transparent(transparent), model(model), url(model->getURL()), meshIndex(meshIndex), partIndex(partIndex) { }
typedef render::Payload<MeshPartPayload> Payload;
typedef Payload::DataPointer Pointer;
bool transparent;
Model* model;
QUrl url;
@ -814,14 +814,14 @@ public:
};
namespace render {
template <> const ItemKey payloadGetKey(const MeshPartPayload::Pointer& payload) {
template <> const ItemKey payloadGetKey(const MeshPartPayload::Pointer& payload) {
if (!payload->model->isVisible()) {
return ItemKey::Builder().withInvisible().build();
}
return payload->transparent ? ItemKey::Builder::transparentShape() : ItemKey::Builder::opaqueShape();
}
template <> const Item::Bound payloadGetBound(const MeshPartPayload::Pointer& payload) {
template <> const Item::Bound payloadGetBound(const MeshPartPayload::Pointer& payload) {
if (payload) {
return payload->model->getPartBounds(payload->meshIndex, payload->partIndex);
}
@ -875,7 +875,7 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
_renderItems.insert(item, renderPayload);
somethingAdded = true;
}
_readyWhenAdded = readyToAddToScene();
return somethingAdded;
@ -907,7 +907,7 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
_renderItems.insert(item, renderPayload);
somethingAdded = true;
}
_readyWhenAdded = readyToAddToScene();
return somethingAdded;
@ -929,7 +929,7 @@ void Model::renderDebugMeshBoxes(gpu::Batch& batch) {
_debugMeshBoxesID = DependencyManager::get<GeometryCache>()->allocateID();
}
QVector<glm::vec3> points;
glm::vec3 brn = box.getCorner();
glm::vec3 bln = brn + glm::vec3(box.getDimensions().x, 0, 0);
glm::vec3 brf = brn + glm::vec3(0, 0, box.getDimensions().z);
@ -963,12 +963,12 @@ void Model::renderDebugMeshBoxes(gpu::Batch& batch) {
{ 1.0f, 1.0f, 0.0f, 1.0f }, // yellow
{ 0.0f, 1.0f, 1.0f, 1.0f }, // cyan
{ 1.0f, 1.0f, 1.0f, 1.0f }, // white
{ 0.0f, 0.5f, 0.0f, 1.0f },
{ 0.0f, 0.0f, 0.5f, 1.0f },
{ 0.5f, 0.0f, 0.5f, 1.0f },
{ 0.5f, 0.5f, 0.0f, 1.0f },
{ 0.0f, 0.5f, 0.0f, 1.0f },
{ 0.0f, 0.0f, 0.5f, 1.0f },
{ 0.5f, 0.0f, 0.5f, 1.0f },
{ 0.5f, 0.5f, 0.0f, 1.0f },
{ 0.0f, 0.5f, 0.5f, 1.0f } };
DependencyManager::get<GeometryCache>()->updateVertices(_debugMeshBoxesID, points, color[colorNdx]);
DependencyManager::get<GeometryCache>()->renderVertices(batch, gpu::LINES, _debugMeshBoxesID);
colorNdx++;
@ -1003,7 +1003,7 @@ Extents Model::getUnscaledMeshExtents() const {
if (!isActive()) {
return Extents();
}
const Extents& extents = _geometry->getFBXGeometry().meshExtents;
// even though our caller asked for "unscaled" we need to include any fst scaling, translation, and rotation, which
@ -1011,7 +1011,7 @@ Extents Model::getUnscaledMeshExtents() const {
glm::vec3 minimum = glm::vec3(_geometry->getFBXGeometry().offset * glm::vec4(extents.minimum, 1.0f));
glm::vec3 maximum = glm::vec3(_geometry->getFBXGeometry().offset * glm::vec4(extents.maximum, 1.0f));
Extents scaledExtents = { minimum, maximum };
return scaledExtents;
}
@ -1020,12 +1020,12 @@ Extents Model::calculateScaledOffsetExtents(const Extents& extents) const {
glm::vec3 minimum = glm::vec3(_geometry->getFBXGeometry().offset * glm::vec4(extents.minimum, 1.0f));
glm::vec3 maximum = glm::vec3(_geometry->getFBXGeometry().offset * glm::vec4(extents.maximum, 1.0f));
Extents scaledOffsetExtents = { ((minimum + _offset) * _scale),
Extents scaledOffsetExtents = { ((minimum + _offset) * _scale),
((maximum + _offset) * _scale) };
Extents rotatedExtents = scaledOffsetExtents.getRotated(_rotation);
Extents translatedExtents = { rotatedExtents.minimum + _translation,
Extents translatedExtents = { rotatedExtents.minimum + _translation,
rotatedExtents.maximum + _translation };
return translatedExtents;
@ -1084,7 +1084,7 @@ void Model::setURL(const QUrl& url, const QUrl& fallback, bool retainCurrent, bo
onInvalidate();
// if so instructed, keep the current geometry until the new one is loaded
// if so instructed, keep the current geometry until the new one is loaded
_nextGeometry = DependencyManager::get<GeometryCache>()->getGeometry(url, fallback, delayLoad);
_nextLODHysteresis = NetworkGeometry::NO_HYSTERESIS;
if (!retainCurrent || !isActive() || (_nextGeometry && _nextGeometry->isLoaded())) {
@ -1094,14 +1094,14 @@ void Model::setURL(const QUrl& url, const QUrl& fallback, bool retainCurrent, bo
void Model::geometryRefreshed() {
QObject* sender = QObject::sender();
if (sender == _geometry) {
_readyWhenAdded = false; // reset out render items.
_needsReload = true;
invalidCalculatedMeshBoxes();
onInvalidate();
// if so instructed, keep the current geometry until the new one is loaded
_nextGeometry = DependencyManager::get<GeometryCache>()->getGeometry(_url);
_nextLODHysteresis = NetworkGeometry::NO_HYSTERESIS;
@ -1121,7 +1121,7 @@ const QSharedPointer<NetworkGeometry> Model::getCollisionGeometry(bool delayLoad
if (_collisionGeometry && _collisionGeometry->isLoaded()) {
return _collisionGeometry;
}
return QSharedPointer<NetworkGeometry>();
}
@ -1176,11 +1176,11 @@ public:
Blender(Model* model, int blendNumber, const QWeakPointer<NetworkGeometry>& geometry,
const QVector<FBXMesh>& meshes, const QVector<float>& blendshapeCoefficients);
virtual void run();
private:
QPointer<Model> _model;
int _blendNumber;
QWeakPointer<NetworkGeometry> _geometry;
@ -1254,10 +1254,10 @@ void Model::setScaleToFit(bool scaleToFit, float largestDimension, bool forceRes
}
return;
}
if (forceRescale || _scaleToFit != scaleToFit || glm::length(_scaleToFitDimensions) != largestDimension) {
_scaleToFit = scaleToFit;
// we only need to do this work if we're "turning on" scale to fit.
if (scaleToFit) {
Extents modelMeshExtents = getUnscaledMeshExtents();
@ -1278,7 +1278,7 @@ void Model::scaleToFit() {
// we didn't yet have an active mesh. We can only enter this scaleToFit() in this state
// if we now do have an active mesh, so we take this opportunity to actually determine
// the correct scale.
if (_scaleToFit && _scaleToFitDimensions.y == FAKE_DIMENSION_PLACEHOLDER
if (_scaleToFit && _scaleToFitDimensions.y == FAKE_DIMENSION_PLACEHOLDER
&& _scaleToFitDimensions.z == FAKE_DIMENSION_PLACEHOLDER) {
setScaleToFit(_scaleToFit, _scaleToFitDimensions.x);
}
@ -1313,7 +1313,7 @@ void Model::simulate(float deltaTime, bool fullUpdate) {
PROFILE_RANGE(__FUNCTION__);
fullUpdate = updateGeometry() || fullUpdate || (_scaleToFit && !_scaledToFit)
|| (_snapModelToRegistrationPoint && !_snappedToRegistrationPoint);
if (isActive() && fullUpdate) {
// NOTE: This is overly aggressive and we are invalidating the MeshBoxes when in fact they may not be invalid
// they really only become invalid if something about the transform to world space has changed. This is
@ -1440,7 +1440,7 @@ void Model::setBlendedVertices(int blendNumber, const QWeakPointer<NetworkGeomet
return;
}
_appliedBlendNumber = blendNumber;
const FBXGeometry& fbxGeometry = _geometry->getFBXGeometry();
const FBXGeometry& fbxGeometry = _geometry->getFBXGeometry();
int index = 0;
for (int i = 0; i < fbxGeometry.meshes.size(); i++) {
const FBXMesh& mesh = fbxGeometry.meshes.at(i);
@ -1461,7 +1461,7 @@ void Model::setGeometry(const QSharedPointer<NetworkGeometry>& newGeometry) {
if (_geometry == newGeometry) {
return;
}
if (_geometry) {
_geometry->disconnect(_geometry.data(), &Resource::onRefresh, this, &Model::geometryRefreshed);
}
@ -1474,10 +1474,10 @@ void Model::applyNextGeometry() {
deleteGeometry();
_dilatedTextures.clear();
_lodHysteresis = _nextLODHysteresis;
// we retain a reference to the base geometry so that its reference count doesn't fall to zero
setGeometry(_nextGeometry);
_meshGroupsKnown = false;
_readyWhenAdded = false; // in case any of our users are using scenes
_needsReload = false; // we are loaded now!
@ -1509,9 +1509,9 @@ AABox Model::getPartBounds(int meshIndex, int partIndex) {
return calculateScaledOffsetAABox(_geometry->getFBXGeometry().meshExtents);
}
}
if (_geometry->getFBXGeometry().meshes.size() > meshIndex) {
// FIX ME! - This is currently a hack because for some mesh parts our efforts to calculate the bounding
// box of the mesh part fails. It seems to create boxes that are not consistent with where the
// geometry actually renders. If instead we make all the parts share the bounds of the entire subMesh
@ -1536,7 +1536,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
if (!_readyWhenAdded) {
return; // bail asap
}
// We need to make sure we have valid offsets calculated before we can render
if (!_calculatedMeshPartOffsetValid) {
_mutex.lock();
@ -1561,13 +1561,13 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
// guard against partially loaded meshes
if (meshIndex >= networkMeshes.size() || meshIndex >= geometry.meshes.size() || meshIndex >= _meshStates.size() ) {
return;
return;
}
const NetworkMesh& networkMesh = networkMeshes.at(meshIndex);
const FBXMesh& mesh = geometry.meshes.at(meshIndex);
const MeshState& state = _meshStates.at(meshIndex);
bool translucentMesh = translucent; // networkMesh.getTranslucentPartCount(mesh) == networkMesh.parts.size();
bool hasTangents = !mesh.tangents.isEmpty();
bool hasSpecular = mesh.hasSpecularTexture();
@ -1597,7 +1597,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
DependencyManager::get<DeferredLightingEffect>()->renderWireCube(batch, 1.0f, cubeColor);
}
#endif //def DEBUG_BOUNDING_PARTS
if (wireframe) {
translucentMesh = hasTangents = hasSpecular = hasLightmap = isSkinned = false;
}
@ -1614,14 +1614,14 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
// if our index is ever out of range for either meshes or networkMeshes, then skip it, and set our _meshGroupsKnown
// to false to rebuild out mesh groups.
if (meshIndex < 0 || meshIndex >= networkMeshes.size() || meshIndex > geometry.meshes.size()) {
_meshGroupsKnown = false; // regenerate these lists next time around.
_readyWhenAdded = false; // in case any of our users are using scenes
invalidCalculatedMeshBoxes(); // if we have to reload, we need to assume our mesh boxes are all invalid
return; // FIXME!
}
batch.setIndexBuffer(gpu::UINT32, (networkMesh._indexBuffer), 0);
int vertexCount = mesh.vertices.size();
if (vertexCount == 0) {
@ -1633,7 +1633,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
if (_transforms.empty()) {
_transforms.push_back(Transform());
}
if (isSkinned) {
const float* bones;
if (_cauterizeBones) {
@ -1682,7 +1682,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
qCDebug(renderutils) << "WARNING: material == nullptr!!!";
}
#endif
if (material != nullptr) {
// apply material properties
@ -1724,12 +1724,12 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
batch._glUniformMatrix4fv(locations->texcoordMatrices, 2, false, (const float*) &texcoordTransform);
}
if (!mesh.tangents.isEmpty()) {
if (!mesh.tangents.isEmpty()) {
NetworkTexture* normalMap = networkPart.normalTexture.data();
batch.setResourceTexture(1, (!normalMap || !normalMap->isLoaded()) ?
textureCache->getBlueTexture() : normalMap->getGPUTexture());
}
if (locations->specularTextureUnit >= 0) {
NetworkTexture* specularMap = networkPart.specularTexture.data();
batch.setResourceTexture(locations->specularTextureUnit, (!specularMap || !specularMap->isLoaded()) ?
@ -1747,18 +1747,18 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
float emissiveOffset = part.emissiveParams.x;
float emissiveScale = part.emissiveParams.y;
batch._glUniform2f(locations->emissiveParams, emissiveOffset, emissiveScale);
NetworkTexture* emissiveMap = networkPart.emissiveTexture.data();
batch.setResourceTexture(locations->emissiveTextureUnit, (!emissiveMap || !emissiveMap->isLoaded()) ?
textureCache->getGrayTexture() : emissiveMap->getGPUTexture());
}
if (translucent && locations->lightBufferUnit >= 0) {
DependencyManager::get<DeferredLightingEffect>()->setupTransparent(args, locations->lightBufferUnit);
}
}
}
qint64 offset;
{
// FIXME_STUTTER: We should n't have any lock here
@ -1798,7 +1798,7 @@ void Model::segregateMeshGroups() {
qDebug() << "WARNING!!!! Mesh Sizes don't match! We will not segregate mesh groups yet.";
return;
}
_transparentRenderItems.clear();
_opaqueRenderItems.clear();
@ -1807,18 +1807,6 @@ void Model::segregateMeshGroups() {
const NetworkMesh& networkMesh = networkMeshes.at(i);
const FBXMesh& mesh = geometry.meshes.at(i);
const MeshState& state = _meshStates.at(i);
bool translucentMesh = networkMesh.getTranslucentPartCount(mesh) == networkMesh.parts.size();
bool hasTangents = !mesh.tangents.isEmpty();
bool hasSpecular = mesh.hasSpecularTexture();
bool hasLightmap = mesh.hasEmissiveTexture();
bool isSkinned = state.clusterMatrices.size() > 1;
bool wireframe = isWireframe();
if (wireframe) {
translucentMesh = hasTangents = hasSpecular = hasLightmap = isSkinned = false;
}
// Create the render payloads
int totalParts = mesh.parts.size();
@ -1831,7 +1819,7 @@ void Model::segregateMeshGroups() {
}
}
_meshGroupsKnown = true;
}
}
void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
bool hasLightmap, bool hasTangents, bool hasSpecular, bool isSkinned, bool isWireframe, RenderArgs* args,
@ -1851,7 +1839,7 @@ void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, f
gpu::ShaderPointer program = (*pipeline).second._pipeline->getProgram();
locations = (*pipeline).second._locations.get();
// Setup the One pipeline
batch.setPipeline((*pipeline).second._pipeline);
@ -1865,7 +1853,7 @@ void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, f
}
if ((locations->normalFittingMapUnit > -1)) {
batch.setResourceTexture(locations->normalFittingMapUnit,
batch.setResourceTexture(locations->normalFittingMapUnit,
DependencyManager::get<TextureCache>()->getNormalFittingTexture());
}
}