render models in scene checkpoint

This commit is contained in:
ZappoMan 2015-05-28 17:19:30 -07:00
parent 1c3398f27e
commit eb19b93433
8 changed files with 373 additions and 11 deletions

View file

@ -489,6 +489,10 @@ void EntityTreeRenderer::applyZonePropertiesToScene(std::shared_ptr<ZoneEntityIt
void EntityTreeRenderer::render(RenderArgs* renderArgs) {
if (_tree && !_shuttingDown) {
renderArgs->_renderer = this;
checkPendingAddToScene(renderArgs);
Model::startScene(renderArgs->_renderSide);
ViewFrustum* frustum = (renderArgs->_renderMode == RenderArgs::SHADOW_RENDER_MODE) ?
@ -503,7 +507,6 @@ void EntityTreeRenderer::render(RenderArgs* renderArgs) {
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
renderArgs->_renderer = this;
renderArgs->_batch = &batch;
_tree->lockForRead();
@ -1081,16 +1084,38 @@ void EntityTreeRenderer::deletingEntity(const EntityItemID& entityID) {
void EntityTreeRenderer::addingEntity(const EntityItemID& entityID) {
checkAndCallPreload(entityID);
// here's where we add the entity payload to the scene
auto entity = static_cast<EntityTree*>(_tree)->findEntityByID(entityID);
addEntityToScene(entity);
}
void EntityTreeRenderer::addEntityToScene(EntityItemPointer entity) {
// here's where we add the entity payload to the scene
if (entity && entity->canRenderInScene()) {
render::PendingChanges pendingChanges;
auto scene = _viewState->getMain3DScene();
if (entity->addToScene(entity, scene, pendingChanges)) {
_entitiesInScene.insert(entity);
if (entity->readyToAddToScene()) {
render::PendingChanges pendingChanges;
auto scene = _viewState->getMain3DScene();
if (entity->addToScene(entity, scene, pendingChanges)) {
_entitiesInScene.insert(entity);
}
scene->enqueuePendingChanges(pendingChanges);
} else {
if (!_pendingAddToScene.contains(entity)) {
_pendingAddToScene << entity;
}
}
scene->enqueuePendingChanges(pendingChanges);
}
}
void EntityTreeRenderer::checkPendingAddToScene(RenderArgs* renderArgs) {
QSet<EntityItemPointer> addedToScene;
foreach (auto entity, _pendingAddToScene) {
if (entity->readyToAddToScene(renderArgs)) {
addEntityToScene(entity);
addedToScene << entity;
}
}
foreach (auto addedEntity, addedToScene) {
_pendingAddToScene.remove(addedEntity);
}
}

View file

@ -124,6 +124,10 @@ protected:
virtual Octree* createTree() { return new EntityTree(true); }
private:
void checkPendingAddToScene(RenderArgs* renderArgs);
void addEntityToScene(EntityItemPointer entity);
QSet<EntityItemPointer> _pendingAddToScene;
void applyZonePropertiesToScene(std::shared_ptr<ZoneEntityItem> zone);
void renderElementProxy(EntityTreeElement* entityTreeElement, RenderArgs* args);
void checkAndCallPreload(const EntityItemID& entityID);

View file

@ -108,7 +108,35 @@ void RenderableModelEntityItem::remapTextures() {
_currentTextures = _textures;
}
bool RenderableModelEntityItem::readyToAddToScene(RenderArgs* renderArgs) {
if (!_model && renderArgs) {
// TODO: this getModel() appears to be about 3% of model render time. We should optimize
PerformanceTimer perfTimer("getModel");
EntityTreeRenderer* renderer = static_cast<EntityTreeRenderer*>(renderArgs->_renderer);
qDebug() << "RenderableModelEntityItem::readyToAddToScene().... renderer:" << renderer;
getModel(renderer);
}
bool ready = (bool)_model;
qDebug() << "RenderableModelEntityItem::readyToAddToScene().... id:" << getEntityItemID()
<< "ready:" << ready << "renderArgs:" << renderArgs;
return ready;
}
bool RenderableModelEntityItem::addToScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene,
render::PendingChanges& pendingChanges) {
qDebug() << "RenderableModelEntityItem::addToScene().... id:" << getEntityItemID();
return false;
}
void RenderableModelEntityItem::removeFromScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene,
render::PendingChanges& pendingChanges) {
qDebug() << "RenderableModelEntityItem::removeFromScene().... id:" << getEntityItemID();
}
void RenderableModelEntityItem::render(RenderArgs* args) {
qDebug() << "RenderableModelEntityItem::render().... id:" << getEntityItemID();
PerformanceTimer perfTimer("RMEIrender");
assert(getType() == EntityTypes::Model);
@ -199,6 +227,10 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
Model* RenderableModelEntityItem::getModel(EntityTreeRenderer* renderer) {
Model* result = NULL;
if (!renderer) {
return result;
}
// make sure our renderer is setup
if (!_myRenderer) {
@ -206,7 +238,7 @@ Model* RenderableModelEntityItem::getModel(EntityTreeRenderer* renderer) {
}
assert(_myRenderer == renderer); // you should only ever render on one renderer
if (QThread::currentThread() != _myRenderer->thread()) {
if (!_myRenderer || QThread::currentThread() != _myRenderer->thread()) {
return _model;
}

View file

@ -43,7 +43,12 @@ public:
virtual void somethingChangedNotification() { _needsInitialSimulation = true; }
virtual bool canRenderInScene() { return false; } // we don't yet play well with others
virtual bool canRenderInScene() { return true; }
virtual bool readyToAddToScene(RenderArgs* renderArgs = nullptr);
virtual bool addToScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges);
virtual void removeFromScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges);
virtual void render(RenderArgs* args);
virtual bool supportsDetailedRayIntersection() const { return true; }
virtual bool findDetailedRayIntersection(const glm::vec3& origin, const glm::vec3& direction,

View file

@ -973,7 +973,7 @@ bool EntityItem::setProperties(const EntityItemProperties& properties) {
}
void EntityItem::recordCreationTime() {
assert(_created == UNKNOWN_CREATED_TIME);
//assert(_created == UNKNOWN_CREATED_TIME);
_created = usecTimestampNow();
_lastEdited = _created;
_lastUpdated = _created;

View file

@ -158,6 +158,7 @@ public:
{ return 0; }
virtual bool canRenderInScene() { return false; } // does your entity property render using Render Items and Payloads
virtual bool readyToAddToScene(RenderArgs* renderArgs = nullptr) { return true; } // we assume you're ready to add
virtual bool addToScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene,
render::PendingChanges& pendingChanges) { return false; } // by default entity items don't add to scene
virtual void removeFromScene(EntityItemPointer self, std::shared_ptr<render::Scene> scene,

View file

@ -765,6 +765,29 @@ void Model::renderSetup(RenderArgs* args) {
}
}
bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges) {
bool somethingAdded = false;
// allow the attachments to add to scene
foreach (Model* attachment, _attachments) {
bool attachementSomethingAdded = attachment->addToScene(scene, pendingChanges);
somethingAdded = somethingAdded || attachementSomethingAdded;
}
// TODO --- need to do something here
return somethingAdded;
}
void Model::removeFromScene(std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges) {
// allow the attachments to remove to scene
foreach (Model* attachment, _attachments) {
attachment->removeFromScene(scene, pendingChanges);
}
// TODO --- need to do something here
}
bool Model::render(RenderArgs* renderArgs, float alpha) {
PROFILE_RANGE(__FUNCTION__);
@ -2068,7 +2091,235 @@ bool Model::renderInScene(float alpha, RenderArgs* args) {
return true;
}
class TransparentMeshPart {
public:
TransparentMeshPart(Model* model, int meshIndex, int partIndex) : model(model), meshIndex(meshIndex), partIndex(partIndex) { }
typedef render::Payload<TransparentMeshPart> Payload;
typedef Payload::DataPointer Pointer;
Model* model;
int meshIndex;
int partIndex;
};
namespace render {
template <> const ItemKey payloadGetKey(const TransparentMeshPart::Pointer& payload) {
return ItemKey::Builder::transparentShape();
}
template <> const Item::Bound payloadGetBound(const TransparentMeshPart::Pointer& payload) {
if (payload) {
return payload->model->getPartBounds(payload->meshIndex, payload->partIndex);
}
return render::Item::Bound();
}
template <> void payloadRender(const TransparentMeshPart::Pointer& payload, RenderArgs* args) {
if (args) {
args->_elementsTouched++;
return payload->model->renderPart(args, payload->meshIndex, payload->partIndex, true);
}
}
}
class OpaqueMeshPart {
public:
OpaqueMeshPart(Model* model, int meshIndex, int partIndex) : model(model), meshIndex(meshIndex), partIndex(partIndex) { }
typedef render::Payload<OpaqueMeshPart> Payload;
typedef Payload::DataPointer Pointer;
Model* model;
int meshIndex;
int partIndex;
};
namespace render {
template <> const ItemKey payloadGetKey(const OpaqueMeshPart::Pointer& payload) {
return ItemKey::Builder::opaqueShape();
}
template <> const Item::Bound payloadGetBound(const OpaqueMeshPart::Pointer& payload) {
if (payload) {
return payload->model->getPartBounds(payload->meshIndex, payload->partIndex);
}
return render::Item::Bound();
}
template <> void payloadRender(const OpaqueMeshPart::Pointer& payload, RenderArgs* args) {
if (args) {
args->_elementsTouched++;
return payload->model->renderPart(args, payload->meshIndex, payload->partIndex, false);
}
}
}
AABox Model::getPartBounds(int meshIndex, int partIndex) {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const FBXMesh& mesh = geometry.meshes.at(meshIndex);
AABox partBox = mesh._mesh.evalPartBound(partIndex);
return partBox;
}
void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool translucent) {
auto textureCache = DependencyManager::get<TextureCache>();
const float DEFAULT_ALPHA_THRESHOLD = 0.5f;
int i = meshIndex;
int j = partIndex;
gpu::Batch& batch = *(args->_batch);
auto mode = args->_renderMode;
auto alphaThreshold = DEFAULT_ALPHA_THRESHOLD; // FIX ME
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const QVector<NetworkMesh>& networkMeshes = _geometry->getMeshes();
const NetworkMesh& networkMesh = networkMeshes.at(meshIndex);
const FBXMesh& mesh = geometry.meshes.at(meshIndex);
const MeshState& state = _meshStates.at(meshIndex);
int vertexCount = mesh.vertices.size(); // NOTE: This seems wrong, shouldn't it be the part's vertex count?
bool translucentMesh = networkMesh.getTranslucentPartCount(mesh) == networkMesh.parts.size();
bool hasTangents = !mesh.tangents.isEmpty();
bool hasSpecular = mesh.hasSpecularTexture();
bool hasLightmap = mesh.hasEmissiveTexture();
bool isSkinned = state.clusterMatrices.size() > 1;
bool wireframe = isWireframe();
if (wireframe) {
translucentMesh = hasTangents = hasSpecular = hasLightmap = isSkinned = false;
}
Locations* locations = nullptr;
pickPrograms(batch, mode, translucent, alphaThreshold, hasLightmap, hasTangents, hasSpecular, isSkinned, wireframe,
args, locations);
if (state.clusterMatrices.size() > 1) {
GLBATCH(glUniformMatrix4fv)(locations->clusterMatrices, state.clusterMatrices.size(), false,
(const float*)state.clusterMatrices.constData());
batch.setModelTransform(Transform());
} else {
batch.setModelTransform(Transform(state.clusterMatrices[0]));
}
if (mesh.blendshapes.isEmpty()) {
batch.setInputFormat(networkMesh._vertexFormat);
batch.setInputStream(0, *networkMesh._vertexStream);
} else {
batch.setInputFormat(networkMesh._vertexFormat);
batch.setInputBuffer(0, _blendedVertexBuffers[i], 0, sizeof(glm::vec3));
batch.setInputBuffer(1, _blendedVertexBuffers[i], vertexCount * sizeof(glm::vec3), sizeof(glm::vec3));
batch.setInputStream(2, *networkMesh._vertexStream);
}
if (mesh.colors.isEmpty()) {
GLBATCH(glColor4f)(1.0f, 1.0f, 1.0f, 1.0f);
}
qint64 offset = 0;
const NetworkMeshPart& networkPart = networkMesh.parts.at(j);
const FBXMeshPart& part = mesh.parts.at(j);
model::MaterialPointer material = part._material;
if ((networkPart.isTranslucent() || part.opacity != 1.0f) != translucent) {
offset += (part.quadIndices.size() + part.triangleIndices.size()) * sizeof(int);
//continue;
// FIX ME!!!
}
// apply material properties
if (mode == RenderArgs::SHADOW_RENDER_MODE) {
/// GLBATCH(glBindTexture)(GL_TEXTURE_2D, 0);
} else {
const bool wantDebug = false;
if (wantDebug) {
qCDebug(renderutils) << "Material Changed ---------------------------------------------";
qCDebug(renderutils) << "part INDEX:" << j;
qCDebug(renderutils) << "NEW part.materialID:" << part.materialID;
}
if (locations->materialBufferUnit >= 0) {
batch.setUniformBuffer(locations->materialBufferUnit, material->getSchemaBuffer());
}
Texture* diffuseMap = networkPart.diffuseTexture.data();
if (mesh.isEye && diffuseMap) {
diffuseMap = (_dilatedTextures[i][j] =
static_cast<DilatableNetworkTexture*>(diffuseMap)->getDilatedTexture(_pupilDilation)).data();
}
static bool showDiffuse = true;
if (showDiffuse && diffuseMap) {
batch.setUniformTexture(0, diffuseMap->getGPUTexture());
} else {
batch.setUniformTexture(0, textureCache->getWhiteTexture());
}
if (locations->texcoordMatrices >= 0) {
glm::mat4 texcoordTransform[2];
if (!part.diffuseTexture.transform.isIdentity()) {
part.diffuseTexture.transform.getMatrix(texcoordTransform[0]);
}
if (!part.emissiveTexture.transform.isIdentity()) {
part.emissiveTexture.transform.getMatrix(texcoordTransform[1]);
}
GLBATCH(glUniformMatrix4fv)(locations->texcoordMatrices, 2, false, (const float*) &texcoordTransform);
}
if (!mesh.tangents.isEmpty()) {
Texture* normalMap = networkPart.normalTexture.data();
batch.setUniformTexture(1, !normalMap ?
textureCache->getBlueTexture() : normalMap->getGPUTexture());
}
if (locations->specularTextureUnit >= 0) {
Texture* specularMap = networkPart.specularTexture.data();
batch.setUniformTexture(locations->specularTextureUnit, !specularMap ?
textureCache->getWhiteTexture() : specularMap->getGPUTexture());
}
if (args) {
args->_materialSwitches++;
}
// HACK: For unkwon reason (yet!) this code that should be assigned only if the material changes need to be called for every
// drawcall with an emissive, so let's do it for now.
if (locations->emissiveTextureUnit >= 0) {
// assert(locations->emissiveParams >= 0); // we should have the emissiveParams defined in the shader
float emissiveOffset = part.emissiveParams.x;
float emissiveScale = part.emissiveParams.y;
GLBATCH(glUniform2f)(locations->emissiveParams, emissiveOffset, emissiveScale);
Texture* emissiveMap = networkPart.emissiveTexture.data();
batch.setUniformTexture(locations->emissiveTextureUnit, !emissiveMap ?
textureCache->getWhiteTexture() : emissiveMap->getGPUTexture());
}
}
if (part.quadIndices.size() > 0) {
batch.drawIndexed(gpu::QUADS, part.quadIndices.size(), offset);
offset += part.quadIndices.size() * sizeof(int);
}
if (part.triangleIndices.size() > 0) {
batch.drawIndexed(gpu::TRIANGLES, part.triangleIndices.size(), offset);
offset += part.triangleIndices.size() * sizeof(int);
}
if (args) {
const int INDICES_PER_TRIANGLE = 3;
const int INDICES_PER_QUAD = 4;
args->_trianglesRendered += part.triangleIndices.size() / INDICES_PER_TRIANGLE;
args->_quadsRendered += part.quadIndices.size() / INDICES_PER_QUAD;
}
}
void Model::segregateMeshGroups() {
//qDebug() << "Model::segregateMeshGroups() ------------------------------------------------";
_renderBuckets.clear();
const FBXGeometry& geometry = _geometry->getFBXGeometry();
@ -2086,6 +2337,7 @@ void Model::segregateMeshGroups() {
const NetworkMesh& networkMesh = networkMeshes.at(i);
const FBXMesh& mesh = geometry.meshes.at(i);
const MeshState& state = _meshStates.at(i);
bool translucentMesh = networkMesh.getTranslucentPartCount(mesh) == networkMesh.parts.size();
bool hasTangents = !mesh.tangents.isEmpty();
@ -2097,6 +2349,23 @@ void Model::segregateMeshGroups() {
if (wireframe) {
translucentMesh = hasTangents = hasSpecular = hasLightmap = isSkinned = false;
}
// Debug...
//qDebug() << "Mesh parts..." << mesh._mesh.getNumParts();
int totalParts = mesh._mesh.getNumParts();
for (int partIndex = 0; partIndex < totalParts; partIndex++) {
AABox boxPart = mesh._mesh.evalPartBound(partIndex);
// this is a good place to create our renderPayloads
if (translucentMesh) {
//qDebug() << "Transparent Mesh parts[" << partIndex << "].box=" << boxPart;
_transparentRenderItems << std::shared_ptr<TransparentMeshPart>(new TransparentMeshPart(this, i, partIndex));
} else {
//qDebug() << "Opaque Mesh parts[" << partIndex << "].box=" << boxPart;
_opaqueRenderItems << std::shared_ptr<OpaqueMeshPart>(new OpaqueMeshPart(this, i, partIndex));
}
}
QString materialID;

View file

@ -45,6 +45,21 @@ class Shape;
#include "RenderArgs.h"
class ViewFrustum;
namespace render {
class Scene;
class PendingChanges;
}
class OpaqueMeshPart;
class TransparentMeshPart;
inline uint qHash(const std::shared_ptr<TransparentMeshPart>& a, uint seed) {
return qHash(a.get(), seed);
}
inline uint qHash(const std::shared_ptr<OpaqueMeshPart>& a, uint seed) {
return qHash(a.get(), seed);
}
/// A generic 3D model displaying geometry loaded from a URL.
class Model : public QObject, public PhysicsEntity {
@ -105,6 +120,10 @@ public:
bool renderInScene(float alpha = 1.0f, RenderArgs* args = NULL);
static void endScene(RenderArgs* args);
// new Scene/Engine rendering support
bool addToScene(std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges);
void removeFromScene(std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges);
/// Sets the URL of the model to render.
/// \param fallback the URL of a fallback model to render if the requested model fails to load
/// \param retainCurrent if true, keep rendering the current model until the new one is loaded
@ -219,6 +238,9 @@ public:
BoxFace& face, QString& extraInfo, bool pickAgainstTriangles = false);
bool convexHullContains(glm::vec3 point);
AABox getPartBounds(int meshIndex, int partIndex);
void renderPart(RenderArgs* args, int meshIndex, int partIndex, bool translucent);
protected:
QSharedPointer<NetworkGeometry> _geometry;
@ -511,6 +533,10 @@ private:
RenderBucketMap _renderBuckets;
bool _renderCollisionHull;
QSet<std::shared_ptr<TransparentMeshPart>> _transparentRenderItems;
QSet<std::shared_ptr<OpaqueMeshPart>> _opaqueRenderItems;
};
Q_DECLARE_METATYPE(QPointer<Model>)