Merge remote-tracking branch 'upstream/master' into render_cursor

This commit is contained in:
Brad Davis 2015-06-11 02:21:02 -07:00
commit fa27550df4
30 changed files with 301 additions and 808 deletions

View file

@ -18,3 +18,4 @@ Script.load("notifications.js");
Script.load("users.js");
Script.load("grab.js");
Script.load("pointer.js");
Script.load("directory.js");

92
examples/directory.js Normal file
View file

@ -0,0 +1,92 @@
//
// directory.js
// examples
//
// Created by David Rowe on 8 Jun 2015
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
Script.include("libraries/globals.js");
var directory = (function () {
var DIRECTORY_URL = "https://metaverse.highfidelity.com/directory",
directoryWindow,
DIRECTORY_BUTTON_URL = HIFI_PUBLIC_BUCKET + "images/tools/directory.svg",
BUTTON_WIDTH = 50,
BUTTON_HEIGHT = 50,
BUTTON_ALPHA = 0.9,
BUTTON_MARGIN = 8,
directoryButton,
EDIT_TOOLBAR_BUTTONS = 10, // Number of buttons in edit.js toolbar
viewport;
function updateButtonPosition() {
Overlays.editOverlay(directoryButton, {
x: viewport.x - BUTTON_WIDTH - BUTTON_MARGIN,
y: (viewport.y - (EDIT_TOOLBAR_BUTTONS + 1) * (BUTTON_HEIGHT + BUTTON_MARGIN) - BUTTON_MARGIN) / 2 - 1
});
}
function onMousePressEvent(event) {
var clickedOverlay;
clickedOverlay = Overlays.getOverlayAtPoint({ x: event.x, y: event.y });
if (clickedOverlay === directoryButton) {
if (directoryWindow.url !== DIRECTORY_URL) {
directoryWindow.setURL(DIRECTORY_URL);
}
directoryWindow.setVisible(true);
directoryWindow.raise();
}
}
function onDomainChanged() {
directoryWindow.setVisible(false);
}
function onScriptUpdate() {
var oldViewport = viewport;
viewport = Controller.getViewportDimensions();
if (viewport.x !== oldViewport.x || viewport.y !== oldViewport.y) {
updateButtonPosition();
}
}
function setUp() {
viewport = Controller.getViewportDimensions();
directoryWindow = new WebWindow('Directory', DIRECTORY_URL, 900, 700, false);
directoryWindow.setVisible(false);
directoryButton = Overlays.addOverlay("image", {
imageURL: DIRECTORY_BUTTON_URL,
width: BUTTON_WIDTH,
height: BUTTON_HEIGHT,
x: viewport.x - BUTTON_WIDTH - BUTTON_MARGIN,
y: BUTTON_MARGIN,
alpha: BUTTON_ALPHA,
visible: true
});
updateButtonPosition();
Controller.mousePressEvent.connect(onMousePressEvent);
Window.domainChanged.connect(onDomainChanged);
Script.update.connect(onScriptUpdate);
}
function tearDown() {
Overlays.deleteOverlay(directoryButton);
}
setUp();
Script.scriptEnding.connect(tearDown);
}());

View file

@ -360,6 +360,10 @@
var elVoxelVolumeSizeZ = document.getElementById("property-voxel-volume-size-z");
var elVoxelSurfaceStyle = document.getElementById("property-voxel-surface-style");
var elHyperlinkHref = document.getElementById("property-hyperlink-href");
var elHyperlinkDescription = document.getElementById("property-hyperlink-description");
if (window.EventBridge !== undefined) {
EventBridge.scriptEventReceived.connect(function(data) {
@ -467,6 +471,9 @@
elScriptURL.value = properties.script;
elUserData.value = properties.userData;
elHyperlinkHref.value = properties.href;
elHyperlinkDescription.value = properties.description;
for (var i = 0; i < allSections.length; i++) {
for (var j = 0; j < allSections[i].length; j++) {
allSections[i][j].style.display = 'none';
@ -612,6 +619,8 @@
elLocked.addEventListener('change', createEmitCheckedPropertyUpdateFunction('locked'));
elName.addEventListener('change', createEmitTextPropertyUpdateFunction('name'));
elHyperlinkHref.addEventListener('change', createEmitTextPropertyUpdateFunction('href'));
elHyperlinkDescription.addEventListener('change', createEmitTextPropertyUpdateFunction('description'));
elVisible.addEventListener('change', createEmitCheckedPropertyUpdateFunction('visible'));
var positionChangeFunction = createEmitVec3PropertyUpdateFunction(
@ -850,7 +859,6 @@
elVoxelVolumeSizeZ.addEventListener('change', voxelVolumeSizeChangeFunction);
elVoxelSurfaceStyle.addEventListener('change', createEmitTextPropertyUpdateFunction('voxelSurfaceStyle'));
elMoveSelectionToGrid.addEventListener("click", function() {
EventBridge.emitWebEvent(JSON.stringify({
type: "action",
@ -937,6 +945,18 @@
<input type="text" id="property-name"></input>
</div>
</div>
<div class="property">
<div class="label">Hyperlink</div>
<div class="input-area">Href<br></div>
<div class="value">
<input id="property-hyperlink-href" class="url"></input>
</div>
<div class="input-area">Description<br></div> <div class="value">
<input id="property-hyperlink-description" class="url"></input>
</div>
</div>
<div class="property">
<span class="label">Locked</span>
<span class="value">

View file

@ -524,8 +524,16 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_window->setVisible(true);
_glWidget->setFocusPolicy(Qt::StrongFocus);
_glWidget->setFocus();
#ifdef Q_OS_MAC
// OSX doesn't seem to provide for hiding the cursor only on the GL widget
_window->setCursor(Qt::BlankCursor);
#else
// On windows and linux, hiding the top level cursor also means it's invisible
// when hovering over the window menu, which is a pain, so only hide it for
// the GL surface
_glWidget->setCursor(Qt::BlankCursor);
#endif
// enable mouse tracking; otherwise, we only get drag events
_glWidget->setMouseTracking(true);

View file

@ -107,6 +107,9 @@ Avatar::Avatar() :
}
Avatar::~Avatar() {
for(auto attachment : _unusedAttachments) {
delete attachment;
}
}
const float BILLBOARD_LOD_DISTANCE = 40.0f;
@ -298,6 +301,11 @@ bool Avatar::addToScene(AvatarSharedPointer self, std::shared_ptr<render::Scene>
pendingChanges.resetItem(_renderItemID, avatarPayloadPointer);
_skeletonModel.addToScene(scene, pendingChanges);
getHead()->getFaceModel().addToScene(scene, pendingChanges);
for (auto attachmentModel : _attachmentModels) {
attachmentModel->addToScene(scene, pendingChanges);
}
return true;
}
@ -305,6 +313,9 @@ void Avatar::removeFromScene(AvatarSharedPointer self, std::shared_ptr<render::S
pendingChanges.removeItem(_renderItemID);
_skeletonModel.removeFromScene(scene, pendingChanges);
getHead()->getFaceModel().removeFromScene(scene, pendingChanges);
for (auto attachmentModel : _attachmentModels) {
attachmentModel->removeFromScene(scene, pendingChanges);
}
}
void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition, bool postLighting) {
@ -517,7 +528,7 @@ glm::quat Avatar::computeRotationFromBodyToWorldUp(float proportion) const {
return glm::angleAxis(angle * proportion, axis);
}
void Avatar::renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bool postLighting, float glowLevel) {
void Avatar::fixupModelsInScene() {
// check to see if when we added our models to the scene they were ready, if they were not ready, then
// fix them up in the scene
render::ScenePointer scene = Application::getInstance()->getMain3DScene();
@ -530,8 +541,24 @@ void Avatar::renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bool
getHead()->getFaceModel().removeFromScene(scene, pendingChanges);
getHead()->getFaceModel().addToScene(scene, pendingChanges);
}
for (auto attachmentModel : _attachmentModels) {
if (attachmentModel->needsFixupInScene()) {
attachmentModel->removeFromScene(scene, pendingChanges);
attachmentModel->addToScene(scene, pendingChanges);
}
}
for (auto attachmentModelToRemove : _attachmentsToRemove) {
attachmentModelToRemove->removeFromScene(scene, pendingChanges);
_unusedAttachments << attachmentModelToRemove;
}
_attachmentsToRemove.clear();
scene->enqueuePendingChanges(pendingChanges);
}
void Avatar::renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bool postLighting, float glowLevel) {
fixupModelsInScene();
{
Glower glower(renderArgs, glowLevel);
@ -545,10 +572,6 @@ void Avatar::renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bool
if (postLighting) {
getHand()->render(renderArgs, false);
} else {
// NOTE: we no longer call this here, because we've added all the model parts as renderable items in the scene
//_skeletonModel.render(renderArgs, 1.0f);
renderAttachments(renderArgs);
}
}
getHead()->render(renderArgs, 1.0f, renderFrustum, postLighting);
@ -572,22 +595,14 @@ void Avatar::simulateAttachments(float deltaTime) {
_skeletonModel.getJointCombinedRotation(jointIndex, jointRotation)) {
model->setTranslation(jointPosition + jointRotation * attachment.translation * _scale);
model->setRotation(jointRotation * attachment.rotation);
model->setScaleToFit(true, _scale * attachment.scale);
model->setScaleToFit(true, _scale * attachment.scale, true); // hack to force rescale
model->setSnapModelToCenter(false); // hack to force resnap
model->setSnapModelToCenter(true);
model->simulate(deltaTime);
}
}
}
void Avatar::renderAttachments(RenderArgs* args) {
// RenderArgs::RenderMode modelRenderMode = (renderMode == RenderArgs::SHADOW_RENDER_MODE) ?
// RenderArgs::SHADOW_RENDER_MODE : RenderArgs::DEFAULT_RENDER_MODE;
/*
foreach (Model* model, _attachmentModels) {
model->render(args, 1.0f);
}
*/
}
void Avatar::updateJointMappings() {
// no-op; joint mappings come from skeleton model
}
@ -945,12 +960,18 @@ void Avatar::setAttachmentData(const QVector<AttachmentData>& attachmentData) {
}
// make sure we have as many models as attachments
while (_attachmentModels.size() < attachmentData.size()) {
Model* model = new Model(this);
Model* model = nullptr;
if (_unusedAttachments.size() > 0) {
model = _unusedAttachments.takeFirst();
} else {
model = new Model(this);
}
model->init();
_attachmentModels.append(model);
}
while (_attachmentModels.size() > attachmentData.size()) {
delete _attachmentModels.takeLast();
auto attachmentModel = _attachmentModels.takeLast();
_attachmentsToRemove << attachmentModel;
}
// update the urls

View file

@ -195,6 +195,8 @@ protected:
SkeletonModel _skeletonModel;
glm::vec3 _skeletonOffset;
QVector<Model*> _attachmentModels;
QVector<Model*> _attachmentsToRemove;
QVector<Model*> _unusedAttachments;
float _bodyYawDelta;
// These position histories and derivatives are in the world-frame.
@ -234,9 +236,9 @@ protected:
void renderDisplayName(RenderArgs* renderArgs);
virtual void renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bool postLighting, float glowLevel = 0.0f);
virtual bool shouldRenderHead(const RenderArgs* renderArgs, const glm::vec3& cameraPosition) const;
virtual void fixupModelsInScene();
void simulateAttachments(float deltaTime);
virtual void renderAttachments(RenderArgs* args);
virtual void updateJointMappings();

View file

@ -1178,44 +1178,10 @@ void MyAvatar::renderBody(RenderArgs* renderArgs, ViewFrustum* renderFrustum, bo
// check to see if when we added our models to the scene they were ready, if they were not ready, then
// fix them up in the scene
render::ScenePointer scene = Application::getInstance()->getMain3DScene();
render::PendingChanges pendingChanges;
if (_skeletonModel.needsFixupInScene()) {
_skeletonModel.removeFromScene(scene, pendingChanges);
_skeletonModel.addToScene(scene, pendingChanges);
}
if (getHead()->getFaceModel().needsFixupInScene()) {
getHead()->getFaceModel().removeFromScene(scene, pendingChanges);
getHead()->getFaceModel().addToScene(scene, pendingChanges);
}
scene->enqueuePendingChanges(pendingChanges);
fixupModelsInScene();
Camera *camera = Application::getInstance()->getCamera();
const glm::vec3 cameraPos = camera->getPosition();
const glm::vec3 cameraPos = Application::getInstance()->getCamera()->getPosition();
// HACK: comment this block which possibly change the near and break the rendering 5/6/2015
// Only tweak the frustum near far if it's not shadow
/* if (renderMode != RenderArgs::SHADOW_RENDER_MODE) {
// Set near clip distance according to skeleton model dimensions if first person and there is no separate head model.
if (shouldRenderHead(cameraPos, renderMode) || !getHead()->getFaceModel().getURL().isEmpty()) {
renderFrustum->setNearClip(DEFAULT_NEAR_CLIP);
} else {
float clipDistance = _skeletonModel.getHeadClipDistance();
clipDistance = glm::length(getEyePosition()
+ camera->getOrientation() * glm::vec3(0.0f, 0.0f, -clipDistance) - cameraPos);
renderFrustum->setNearClip(clipDistance);
}
}*/
// Render the body's voxels and head
if (!postLighting) {
// NOTE: we no longer call this here, because we've added all the model parts as renderable items in the scene
//_skeletonModel.render(renderArgs, 1.0f);
renderAttachments(renderArgs);
}
// Render head so long as the camera isn't inside it
if (shouldRenderHead(renderArgs, cameraPos)) {
getHead()->render(renderArgs, 1.0f, renderFrustum, postLighting);
@ -1571,27 +1537,6 @@ void MyAvatar::updateMotionBehavior() {
_feetTouchFloor = menu->isOptionChecked(MenuOption::ShiftHipsForIdleAnimations);
}
void MyAvatar::renderAttachments(RenderArgs* args) {
if (Application::getInstance()->getCamera()->getMode() != CAMERA_MODE_FIRST_PERSON || args->_renderMode == RenderArgs::MIRROR_RENDER_MODE) {
Avatar::renderAttachments(args);
return;
}
const FBXGeometry& geometry = _skeletonModel.getGeometry()->getFBXGeometry();
QString headJointName = (geometry.headJointIndex == -1) ? QString() : geometry.joints.at(geometry.headJointIndex).name;
// RenderArgs::RenderMode modelRenderMode = (renderMode == RenderArgs::SHADOW_RENDER_MODE) ?
// RenderArgs::SHADOW_RENDER_MODE : RenderArgs::DEFAULT_RENDER_MODE;
// FIX ME - attachments need to be added to scene too...
/*
for (int i = 0; i < _attachmentData.size(); i++) {
const QString& jointName = _attachmentData.at(i).jointName;
if (jointName != headJointName && jointName != "Head") {
_attachmentModels.at(i)->render(args, 1.0f);
}
}
*/
}
//Renders sixense laser pointers for UI selection with controllers
void MyAvatar::renderLaserPointers() {
const float PALM_TIP_ROD_RADIUS = 0.002f;

View file

@ -197,9 +197,6 @@ public slots:
signals:
void transformChanged();
protected:
virtual void renderAttachments(RenderArgs* args);
private:
// These are made private for MyAvatar so that you will use the "use" methods instead

View file

@ -31,7 +31,7 @@ WindowScriptingInterface::WindowScriptingInterface() :
_formResult(QDialog::Rejected)
{
const DomainHandler& domainHandler = DependencyManager::get<NodeList>()->getDomainHandler();
connect(&domainHandler, &DomainHandler::hostnameChanged, this, &WindowScriptingInterface::domainChanged);
connect(&domainHandler, &DomainHandler::connectedToDomain, this, &WindowScriptingInterface::domainChanged);
connect(Application::getInstance(), &Application::svoImportRequested, this, &WindowScriptingInterface::svoImportRequested);
connect(Application::getInstance(), &Application::domainConnectionRefused, this, &WindowScriptingInterface::domainConnectionRefused);
}

View file

@ -9,15 +9,18 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "RenderableBoxEntityItem.h"
#include <glm/gtx/quaternion.hpp>
#include <gpu/GPUConfig.h>
#include <gpu/Batch.h>
#include <DeferredLightingEffect.h>
#include <ObjectMotionState.h>
#include <PerfStat.h>
#include "RenderableBoxEntityItem.h"
#include "RenderableDebugableEntityItem.h"
EntityItemPointer RenderableBoxEntityItem::factory(const EntityItemID& entityID, const EntityItemProperties& properties) {
return EntityItemPointer(new RenderableBoxEntityItem(entityID, properties));
@ -27,23 +30,11 @@ void RenderableBoxEntityItem::render(RenderArgs* args) {
PerformanceTimer perfTimer("RenderableBoxEntityItem::render");
Q_ASSERT(getType() == EntityTypes::Box);
glm::vec4 cubeColor(toGlm(getXColor()), getLocalRenderAlpha());
bool debugSimulationOwnership = args->_debugFlags & RenderArgs::RENDER_DEBUG_SIMULATION_OWNERSHIP;
bool highlightSimulationOwnership = false;
if (debugSimulationOwnership) {
auto nodeList = DependencyManager::get<NodeList>();
const QUuid& myNodeID = nodeList->getSessionUUID();
highlightSimulationOwnership = (getSimulatorID() == myNodeID);
}
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(getTransformToCenter());
if (highlightSimulationOwnership) {
DependencyManager::get<DeferredLightingEffect>()->renderWireCube(batch, 1.0f, cubeColor);
} else {
DependencyManager::get<DeferredLightingEffect>()->renderSolidCube(batch, 1.0f, cubeColor);
}
DependencyManager::get<DeferredLightingEffect>()->renderSolidCube(batch, 1.0f, cubeColor);
RenderableDebugableEntityItem::render(this, args);
};

View file

@ -13,7 +13,6 @@
#define hifi_RenderableBoxEntityItem_h
#include <BoxEntityItem.h>
#include "RenderableDebugableEntityItem.h"
#include "RenderableEntityItem.h"
class RenderableBoxEntityItem : public BoxEntityItem {

View file

@ -10,58 +10,57 @@
//
#include "RenderableDebugableEntityItem.h"
#include <glm/gtx/quaternion.hpp>
#include <gpu/GPUConfig.h>
#include <gpu/Batch.h>
#include <DeferredLightingEffect.h>
#include <PhysicsEngine.h>
#include "RenderableDebugableEntityItem.h"
#include <ObjectMotionState.h>
void RenderableDebugableEntityItem::renderBoundingBox(EntityItem* entity, RenderArgs* args,
float puffedOut, glm::vec4& color) {
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(entity->getTransformToCenter());
Transform transform = entity->getTransformToCenter();
//transform.postScale(entity->getDimensions());
batch.setModelTransform(transform);
DependencyManager::get<DeferredLightingEffect>()->renderWireCube(batch, 1.0f + puffedOut, color);
}
void RenderableDebugableEntityItem::renderHoverDot(EntityItem* entity, RenderArgs* args) {
const int SLICES = 8, STACKS = 8;
float radius = 0.05f;
glm::vec4 blueColor(0.0f, 0.0f, 1.0f, 1.0f);
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
Transform transform = entity->getTransformToCenter();
// Cancel true dimensions and set scale to 2 * radius (diameter)
transform.postScale(2.0f * glm::vec3(radius, radius, radius) / entity->getDimensions());
batch.setModelTransform(transform);
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch, 0.5f, SLICES, STACKS, blueColor);
}
void RenderableDebugableEntityItem::render(EntityItem* entity, RenderArgs* args) {
bool debugSimulationOwnership = args->_debugFlags & RenderArgs::RENDER_DEBUG_SIMULATION_OWNERSHIP;
if (args->_debugFlags & RenderArgs::RENDER_DEBUG_SIMULATION_OWNERSHIP) {
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
Transform transform = entity->getTransformToCenter();
transform.postScale(entity->getDimensions());
batch.setModelTransform(transform);
auto nodeList = DependencyManager::get<NodeList>();
const QUuid& myNodeID = nodeList->getSessionUUID();
bool highlightSimulationOwnership = (entity->getSimulatorID() == myNodeID);
if (highlightSimulationOwnership) {
glm::vec4 greenColor(0.0f, 1.0f, 0.2f, 1.0f);
renderBoundingBox(entity, args, 0.08f, greenColor);
}
if (debugSimulationOwnership) {
quint64 now = usecTimestampNow();
if (now - entity->getLastEditedFromRemote() < 0.1f * USECS_PER_SECOND) {
glm::vec4 redColor(1.0f, 0.0f, 0.0f, 1.0f);
renderBoundingBox(entity, args, 0.2f, redColor);
renderBoundingBox(entity, args, 0.16f, redColor);
}
if (now - entity->getLastBroadcast() < 0.2f * USECS_PER_SECOND) {
glm::vec4 yellowColor(1.0f, 1.0f, 0.2f, 1.0f);
renderBoundingBox(entity, args, 0.3f, yellowColor);
renderBoundingBox(entity, args, 0.24f, yellowColor);
}
ObjectMotionState* motionState = static_cast<ObjectMotionState*>(entity->getPhysicsInfo());
if (motionState && motionState->isActive()) {
renderHoverDot(entity, args);
glm::vec4 blueColor(0.0f, 0.0f, 1.0f, 1.0f);
renderBoundingBox(entity, args, 0.32f, blueColor);
}
}
}

View file

@ -17,7 +17,6 @@
class RenderableDebugableEntityItem {
public:
static void renderBoundingBox(EntityItem* entity, RenderArgs* args, float puffedOut, glm::vec4& color);
static void renderHoverDot(EntityItem* entity, RenderArgs* args);
static void render(EntityItem* entity, RenderArgs* args);
};

View file

@ -201,14 +201,6 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
glm::vec3 position = getPosition();
glm::vec3 dimensions = getDimensions();
bool debugSimulationOwnership = args->_debugFlags & RenderArgs::RENDER_DEBUG_SIMULATION_OWNERSHIP;
bool highlightSimulationOwnership = false;
if (debugSimulationOwnership) {
auto nodeList = DependencyManager::get<NodeList>();
const QUuid& myNodeID = nodeList->getSessionUUID();
highlightSimulationOwnership = (getSimulatorID() == myNodeID);
}
if (hasModel()) {
if (_model) {
if (QUrl(getModelURL()) != _model->getURL()) {
@ -274,11 +266,6 @@ void RenderableModelEntityItem::render(RenderArgs* args) {
}
}
}
if (highlightSimulationOwnership) {
glm::vec4 greenColor(0.0f, 1.0f, 0.0f, 1.0f);
RenderableDebugableEntityItem::renderBoundingBox(this, args, 0.0f, greenColor);
}
} else {
glm::vec4 greenColor(0.0f, 1.0f, 0.0f, 1.0f);
RenderableDebugableEntityItem::renderBoundingBox(this, args, 0.0f, greenColor);

View file

@ -9,6 +9,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "RenderableSphereEntityItem.h"
#include <glm/gtx/quaternion.hpp>
#include <gpu/GPUConfig.h>
@ -18,7 +20,7 @@
#include <DeferredLightingEffect.h>
#include <PerfStat.h>
#include "RenderableSphereEntityItem.h"
#include "RenderableDebugableEntityItem.h"
EntityItemPointer RenderableSphereEntityItem::factory(const EntityItemID& entityID, const EntityItemProperties& properties) {
return EntityItemPointer(new RenderableSphereEntityItem(entityID, properties));
@ -39,4 +41,6 @@ void RenderableSphereEntityItem::render(RenderArgs* args) {
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(getTransformToCenter());
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch, 0.5f, SLICES, STACKS, sphereColor);
RenderableDebugableEntityItem::render(this, args);
};

View file

@ -70,7 +70,9 @@ EntityItem::EntityItem(const EntityItemID& entityItemID) :
_dirtyFlags(0),
_element(nullptr),
_physicsInfo(nullptr),
_simulated(false)
_simulated(false),
_href(""),
_description("")
{
quint64 now = usecTimestampNow();
_lastSimulated = now;
@ -117,6 +119,8 @@ EntityPropertyFlags EntityItem::getEntityProperties(EncodeBitstreamParams& param
requestedProperties += PROP_MARKETPLACE_ID;
requestedProperties += PROP_NAME;
requestedProperties += PROP_SIMULATOR_ID;
requestedProperties += PROP_HREF;
requestedProperties += PROP_DESCRIPTION;
return requestedProperties;
}
@ -246,6 +250,9 @@ OctreeElement::AppendState EntityItem::appendEntityData(OctreePacketData* packet
APPEND_ENTITY_PROPERTY(PROP_MARKETPLACE_ID, getMarketplaceID());
APPEND_ENTITY_PROPERTY(PROP_NAME, getName());
APPEND_ENTITY_PROPERTY(PROP_COLLISION_SOUND_URL, getCollisionSoundURL());
APPEND_ENTITY_PROPERTY(PROP_HREF, getHref());
APPEND_ENTITY_PROPERTY(PROP_DESCRIPTION, getDescription());
appendSubclassData(packetData, params, entityTreeElementExtraEncodeData,
requestedProperties,
@ -573,6 +580,9 @@ int EntityItem::readEntityDataFromBuffer(const unsigned char* data, int bytesLef
READ_ENTITY_PROPERTY(PROP_NAME, QString, setName);
READ_ENTITY_PROPERTY(PROP_COLLISION_SOUND_URL, QString, setCollisionSoundURL);
READ_ENTITY_PROPERTY(PROP_HREF, QString, setHref);
READ_ENTITY_PROPERTY(PROP_DESCRIPTION, QString, setDescription);
bytesRead += readEntitySubclassDataFromBuffer(dataAt, (bytesLeftToRead - bytesRead), args, propertyFlags, overwriteLocalData);
////////////////////////////////////
@ -905,6 +915,8 @@ EntityItemProperties EntityItem::getProperties() const {
COPY_ENTITY_PROPERTY_TO_PROPERTIES(simulatorID, getSimulatorID);
COPY_ENTITY_PROPERTY_TO_PROPERTIES(marketplaceID, getMarketplaceID);
COPY_ENTITY_PROPERTY_TO_PROPERTIES(name, getName);
COPY_ENTITY_PROPERTY_TO_PROPERTIES(href, getHref);
COPY_ENTITY_PROPERTY_TO_PROPERTIES(description, getDescription);
properties._defaultSettings = false;
@ -963,6 +975,8 @@ bool EntityItem::setProperties(const EntityItemProperties& properties) {
SET_ENTITY_PROPERTY_FROM_PROPERTIES(userData, setUserData);
SET_ENTITY_PROPERTY_FROM_PROPERTIES(marketplaceID, setMarketplaceID);
SET_ENTITY_PROPERTY_FROM_PROPERTIES(name, setName);
SET_ENTITY_PROPERTY_FROM_PROPERTIES(href, setHref);
SET_ENTITY_PROPERTY_FROM_PROPERTIES(description, setDescription);
if (somethingChanged) {
uint64_t now = usecTimestampNow();

View file

@ -203,7 +203,14 @@ public:
inline const glm::quat& getRotation() const { return _transform.getRotation(); }
inline void setRotation(const glm::quat& rotation) { _transform.setRotation(rotation); }
// Hyperlink related getters and setters
QString getHref() const { return _href; }
void setHref(QString value) { _href = value; }
QString getDescription() const { return _description; }
void setDescription(QString value) { _description = value; }
/// Dimensions in meters (0.0 - TREE_SCALE)
inline const glm::vec3& getDimensions() const { return _transform.getScale(); }
virtual void setDimensions(const glm::vec3& value);
@ -415,6 +422,8 @@ protected:
quint64 _simulatorIDChangedTime; // when was _simulatorID last updated?
QString _marketplaceID;
QString _name;
QString _href; //Hyperlink href
QString _description; //Hyperlink description
// NOTE: Damping is applied like this: v *= pow(1 - damping, dt)
//

View file

@ -347,6 +347,9 @@ EntityPropertyFlags EntityItemProperties::getChangedProperties() const {
CHECK_PROPERTY_CHANGE(PROP_VOXEL_SURFACE_STYLE, voxelSurfaceStyle);
CHECK_PROPERTY_CHANGE(PROP_LINE_WIDTH, lineWidth);
CHECK_PROPERTY_CHANGE(PROP_LINE_POINTS, linePoints);
CHECK_PROPERTY_CHANGE(PROP_HREF, href);
CHECK_PROPERTY_CHANGE(PROP_DESCRIPTION, description);
changedProperties += _stage.getChangedProperties();
changedProperties += _atmosphere.getChangedProperties();
changedProperties += _skybox.getChangedProperties();
@ -439,7 +442,9 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
COPY_PROPERTY_TO_QSCRIPTVALUE(voxelSurfaceStyle);
COPY_PROPERTY_TO_QSCRIPTVALUE(lineWidth);
COPY_PROPERTY_TO_QSCRIPTVALUE(linePoints);
COPY_PROPERTY_TO_QSCRIPTVALUE(href);
COPY_PROPERTY_TO_QSCRIPTVALUE(description);
// Sitting properties support
if (!skipDefaults) {
QScriptValue sittingPoints = engine->newObject();
@ -548,6 +553,9 @@ void EntityItemProperties::copyFromScriptValue(const QScriptValue& object, bool
COPY_PROPERTY_FROM_QSCRIPTVALUE(voxelSurfaceStyle, uint16_t, setVoxelSurfaceStyle);
COPY_PROPERTY_FROM_QSCRIPTVALUE(lineWidth, float, setLineWidth);
COPY_PROPERTY_FROM_QSCRIPTVALUE(linePoints, qVectorVec3, setLinePoints);
COPY_PROPERTY_FROM_QSCRIPTVALUE(href, QString, setHref);
COPY_PROPERTY_FROM_QSCRIPTVALUE(description, QString, setDescription);
if (!honorReadOnly) {
// this is used by the json reader to set things that we don't want javascript to able to affect.
@ -712,6 +720,8 @@ bool EntityItemProperties::encodeEntityEditPacket(PacketType command, EntityItem
APPEND_ENTITY_PROPERTY(PROP_LOCKED, properties.getLocked());
APPEND_ENTITY_PROPERTY(PROP_USER_DATA, properties.getUserData());
APPEND_ENTITY_PROPERTY(PROP_SIMULATOR_ID, properties.getSimulatorID());
APPEND_ENTITY_PROPERTY(PROP_HREF, properties.getHref());
APPEND_ENTITY_PROPERTY(PROP_DESCRIPTION, properties.getDescription());
if (properties.getType() == EntityTypes::Web) {
APPEND_ENTITY_PROPERTY(PROP_SOURCE_URL, properties.getSourceUrl());
@ -962,6 +972,8 @@ bool EntityItemProperties::decodeEntityEditPacket(const unsigned char* data, int
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_LOCKED, bool, setLocked);
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_USER_DATA, QString, setUserData);
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_SIMULATOR_ID, QUuid, setSimulatorID);
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_HREF, QString, setHref);
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_DESCRIPTION, QString, setDescription);
if (properties.getType() == EntityTypes::Web) {
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_SOURCE_URL, QString, setSourceUrl);
@ -1147,6 +1159,9 @@ void EntityItemProperties::markAllChanged() {
_lineWidthChanged = true;
_linePointsChanged = true;
_hrefChanged = true;
_descriptionChanged = true;
}
/// The maximum bounding cube for the entity, independent of it's rotation.

View file

@ -148,6 +148,8 @@ public:
DEFINE_PROPERTY_REF(PROP_SOURCE_URL, SourceUrl, sourceUrl, QString);
DEFINE_PROPERTY(PROP_LINE_WIDTH, LineWidth, lineWidth, float);
DEFINE_PROPERTY_REF(LINE_POINTS, LinePoints, linePoints, QVector<glm::vec3>);
DEFINE_PROPERTY_REF(PROP_HREF, Href, href, QString);
DEFINE_PROPERTY_REF(PROP_DESCRIPTION, Description, description, QString);
static QString getBackgroundModeString(BackgroundMode mode);
@ -295,6 +297,8 @@ inline QDebug operator<<(QDebug debug, const EntityItemProperties& properties) {
DEBUG_PROPERTY_IF_CHANGED(debug, properties, VoxelVolumeSize, voxelVolumeSize, "");
DEBUG_PROPERTY_IF_CHANGED(debug, properties, VoxelData, voxelData, "");
DEBUG_PROPERTY_IF_CHANGED(debug, properties, VoxelSurfaceStyle, voxelSurfaceStyle, "");
DEBUG_PROPERTY_IF_CHANGED(debug, properties, Href, href, "");
DEBUG_PROPERTY_IF_CHANGED(debug, properties, Description, description, "");
properties.getStage().debugDump();
properties.getAtmosphere().debugDump();

View file

@ -117,6 +117,10 @@ enum EntityPropertyList {
//for lines
PROP_LINE_WIDTH,
PROP_LINE_POINTS,
// used by hyperlinks
PROP_HREF,
PROP_DESCRIPTION,
////////////////////////////////////////////////////////////////////////////////////////////////////
// ATTENTION: add new properties ABOVE this line

View file

@ -2646,34 +2646,6 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping,
}
}
geometry.palmDirection = parseVec3(mapping.value("palmDirection", "0, -1, 0").toString());
// process attachments
QVariantHash attachments = mapping.value("attach").toHash();
for (QVariantHash::const_iterator it = attachments.constBegin(); it != attachments.constEnd(); it++) {
FBXAttachment attachment;
attachment.jointIndex = modelIDs.indexOf(processID(it.key()));
attachment.scale = glm::vec3(1.0f, 1.0f, 1.0f);
QVariantList properties = it->toList();
if (properties.isEmpty()) {
attachment.url = it->toString();
} else {
attachment.url = properties.at(0).toString();
if (properties.size() >= 2) {
attachment.translation = parseVec3(properties.at(1).toString());
if (properties.size() >= 3) {
attachment.rotation = glm::quat(glm::radians(parseVec3(properties.at(2).toString())));
if (properties.size() >= 4) {
attachment.scale = parseVec3(properties.at(3).toString());
}
}
}
}
geometry.attachments.append(attachment);
}
// Add sitting points
QVariantHash sittingPoints = mapping.value("sit").toHash();

View file

@ -189,17 +189,6 @@ public:
Q_DECLARE_METATYPE(FBXAnimationFrame)
Q_DECLARE_METATYPE(QVector<FBXAnimationFrame>)
/// An attachment to an FBX document.
class FBXAttachment {
public:
int jointIndex;
QUrl url;
glm::vec3 translation;
glm::quat rotation;
glm::vec3 scale;
};
/// A point where an avatar can sit
class SittingPoint {
public:
@ -256,9 +245,7 @@ public:
Extents meshExtents;
QVector<FBXAnimationFrame> animationFrames;
QVector<FBXAttachment> attachments;
int getJointIndex(const QString& name) const { return jointIndices.value(name) - 1; }
QStringList getJointNames() const;

View file

@ -124,7 +124,9 @@ FSTReader::ModelType FSTReader::getTypeFromName(const QString& name) {
_namesToTypes["head"] = HEAD_MODEL ;
_namesToTypes["body"] = BODY_ONLY_MODEL;
_namesToTypes["body+head"] = HEAD_AND_BODY_MODEL;
_namesToTypes["attachment"] = ATTACHMENT_MODEL;
// NOTE: this is not yet implemented, but will be used to allow you to attach fully independent models to your avatar
_namesToTypes["attachment"] = ATTACHMENT_MODEL;
}
return _namesToTypes[name];
}

View file

@ -544,7 +544,6 @@ void fbxDebugDump(const FBXGeometry& fbxgeo) {
qCDebug(modelformat) << "---------------- fbxGeometry ----------------";
qCDebug(modelformat) << " hasSkeletonJoints =" << fbxgeo.hasSkeletonJoints;
qCDebug(modelformat) << " offset =" << fbxgeo.offset;
qCDebug(modelformat) << " attachments.count() = " << fbxgeo.attachments.count();
qCDebug(modelformat) << " meshes.count() =" << fbxgeo.meshes.count();
foreach (FBXMesh mesh, fbxgeo.meshes) {
qCDebug(modelformat) << " vertices.count() =" << mesh.vertices.count();

View file

@ -180,10 +180,6 @@ btCollisionShape* EntityMotionState::computeNewShape() {
return nullptr;
}
// RELIABLE_SEND_HACK: until we have truly reliable resends of non-moving updates
// we alwasy resend packets for objects that have stopped moving up to some max limit.
const int MAX_NUM_NON_MOVING_UPDATES = 5;
bool EntityMotionState::isCandidateForOwnership(const QUuid& sessionID) const {
if (!_body || !_entity) {
return false;
@ -495,6 +491,10 @@ void EntityMotionState::measureBodyAcceleration() {
glm::vec3 velocity = bulletToGLM(_body->getLinearVelocity());
_measuredAcceleration = (velocity / powf(1.0f - _body->getLinearDamping(), dt) - _lastVelocity) * invDt;
_lastVelocity = velocity;
if (numSubsteps > PHYSICS_ENGINE_MAX_NUM_SUBSTEPS && !_candidateForOwnership) {
_loopsSinceOwnershipBid = 0;
_loopsWithoutOwner = 0;
}
}
}
glm::vec3 EntityMotionState::getObjectLinearVelocityChange() const {

View file

@ -53,8 +53,6 @@ const uint32_t OUTGOING_DIRTY_PHYSICS_FLAGS = EntityItem::DIRTY_TRANSFORM | Enti
class OctreeEditPacketSender;
class PhysicsEngine;
extern const int MAX_NUM_NON_MOVING_UPDATES;
class ObjectMotionState : public btMotionState {
public:
// These poroperties of the PhysicsEngine are "global" within the context of all ObjectMotionStates

View file

@ -227,8 +227,7 @@ void PhysicsEngine::stepSimulation() {
// (3) synchronize outgoing motion states
// (4) send outgoing packets
const int MAX_NUM_SUBSTEPS = 4;
const float MAX_TIMESTEP = (float)MAX_NUM_SUBSTEPS * PHYSICS_ENGINE_FIXED_SUBSTEP;
const float MAX_TIMESTEP = (float)PHYSICS_ENGINE_MAX_NUM_SUBSTEPS * PHYSICS_ENGINE_FIXED_SUBSTEP;
float dt = 1.0e-6f * (float)(_clock.getTimeMicroseconds());
_clock.reset();
float timeStep = btMin(dt, MAX_TIMESTEP);
@ -245,7 +244,7 @@ void PhysicsEngine::stepSimulation() {
_characterController->preSimulation(timeStep);
}
int numSubsteps = _dynamicsWorld->stepSimulation(timeStep, MAX_NUM_SUBSTEPS, PHYSICS_ENGINE_FIXED_SUBSTEP);
int numSubsteps = _dynamicsWorld->stepSimulation(timeStep, PHYSICS_ENGINE_MAX_NUM_SUBSTEPS, PHYSICS_ENGINE_FIXED_SUBSTEP);
if (numSubsteps > 0) {
BT_PROFILE("postSimulation");
_numSubsteps += (uint32_t)numSubsteps;

View file

@ -405,9 +405,6 @@ void Model::reset() {
if (_jointStates.isEmpty()) {
return;
}
foreach (Model* attachment, _attachments) {
attachment->reset();
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
for (int i = 0; i < _jointStates.size(); i++) {
_jointStates[i].setRotationInConstrainedFrame(geometry.joints.at(i).rotation, 0.0f);
@ -419,14 +416,7 @@ void Model::reset() {
}
bool Model::updateGeometry() {
// NOTE: this is a recursive call that walks all attachments, and their attachments
bool needFullUpdate = false;
for (int i = 0; i < _attachments.size(); i++) {
Model* model = _attachments.at(i);
if (model->updateGeometry()) {
needFullUpdate = true;
}
}
bool needToRebuild = false;
if (_nextGeometry) {
@ -445,6 +435,7 @@ bool Model::updateGeometry() {
QSharedPointer<NetworkGeometry> geometry = _geometry->getLODOrFallback(_lodDistance, _lodHysteresis);
if (_geometry != geometry) {
// NOTE: it is theoretically impossible to reach here after passing through the applyNextGeometry() call above.
// Which means we don't need to worry about calling deleteGeometry() below immediately after creating new geometry.
@ -499,12 +490,6 @@ bool Model::updateGeometry() {
}
_blendedVertexBuffers.push_back(buffer);
}
foreach (const FBXAttachment& attachment, fbxGeometry.attachments) {
Model* model = new Model(this);
model->init();
model->setURL(attachment.url);
_attachments.append(model);
}
needFullUpdate = true;
}
return needFullUpdate;
@ -827,71 +812,41 @@ void Model::renderSetup(RenderArgs* args) {
}
class TransparentMeshPart {
class MeshPartPayload {
public:
TransparentMeshPart(Model* model, int meshIndex, int partIndex) : model(model), meshIndex(meshIndex), partIndex(partIndex) { }
typedef render::Payload<TransparentMeshPart> Payload;
MeshPartPayload(bool transparent, Model* model, int meshIndex, int partIndex) :
transparent(transparent), model(model), url(model->getURL()), meshIndex(meshIndex), partIndex(partIndex) { }
typedef render::Payload<MeshPartPayload> Payload;
typedef Payload::DataPointer Pointer;
Model* model;
bool transparent;
Model* model;
QUrl url;
int meshIndex;
int partIndex;
};
namespace render {
template <> const ItemKey payloadGetKey(const TransparentMeshPart::Pointer& payload) {
template <> const ItemKey payloadGetKey(const MeshPartPayload::Pointer& payload) {
if (!payload->model->isVisible()) {
return ItemKey::Builder().withInvisible().build();
}
return ItemKey::Builder::transparentShape();
return payload->transparent ? ItemKey::Builder::transparentShape() : ItemKey::Builder::opaqueShape();
}
template <> const Item::Bound payloadGetBound(const TransparentMeshPart::Pointer& payload) {
template <> const Item::Bound payloadGetBound(const MeshPartPayload::Pointer& payload) {
if (payload) {
return payload->model->getPartBounds(payload->meshIndex, payload->partIndex);
}
return render::Item::Bound();
}
template <> void payloadRender(const TransparentMeshPart::Pointer& payload, RenderArgs* args) {
template <> void payloadRender(const MeshPartPayload::Pointer& payload, RenderArgs* args) {
if (args) {
return payload->model->renderPart(args, payload->meshIndex, payload->partIndex, true);
return payload->model->renderPart(args, payload->meshIndex, payload->partIndex, payload->transparent);
}
}
}
class OpaqueMeshPart {
public:
OpaqueMeshPart(Model* model, int meshIndex, int partIndex) : model(model), meshIndex(meshIndex), partIndex(partIndex) { }
typedef render::Payload<OpaqueMeshPart> Payload;
typedef Payload::DataPointer Pointer;
Model* model;
int meshIndex;
int partIndex;
};
namespace render {
template <> const ItemKey payloadGetKey(const OpaqueMeshPart::Pointer& payload) {
if (!payload->model->isVisible()) {
return ItemKey::Builder().withInvisible().build();
}
return ItemKey::Builder::opaqueShape();
}
template <> const Item::Bound payloadGetBound(const OpaqueMeshPart::Pointer& payload) {
if (payload) {
Item::Bound result = payload->model->getPartBounds(payload->meshIndex, payload->partIndex);
//qDebug() << "payloadGetBound(OpaqueMeshPart) " << result;
return result;
}
return render::Item::Bound();
}
template <> void payloadRender(const OpaqueMeshPart::Pointer& payload, RenderArgs* args) {
if (args) {
return payload->model->renderPart(args, payload->meshIndex, payload->partIndex, false);
}
}
/* template <> const model::MaterialKey& shapeGetMaterialKey(const OpaqueMeshPart::Pointer& payload) {
/* template <> const model::MaterialKey& shapeGetMaterialKey(const MeshPartPayload::Pointer& payload) {
return payload->model->getPartMaterial(payload->meshIndex, payload->partIndex);
}*/
}
@ -916,24 +871,19 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
bool somethingAdded = false;
// allow the attachments to add to scene
foreach (Model* attachment, _attachments) {
bool attachementSomethingAdded = attachment->addToScene(scene, pendingChanges);
somethingAdded = somethingAdded || attachementSomethingAdded;
}
foreach (auto renderItem, _transparentRenderItems) {
auto item = scene->allocateID();
auto renderData = TransparentMeshPart::Pointer(renderItem);
auto renderPayload = render::PayloadPointer(new TransparentMeshPart::Payload(renderData));
auto renderData = MeshPartPayload::Pointer(renderItem);
auto renderPayload = render::PayloadPointer(new MeshPartPayload::Payload(renderData));
pendingChanges.resetItem(item, renderPayload);
_renderItems.insert(item, renderPayload);
somethingAdded = true;
}
foreach (auto renderItem, _opaqueRenderItems) {
auto item = scene->allocateID();
auto renderData = OpaqueMeshPart::Pointer(renderItem);
auto renderPayload = render::PayloadPointer(new OpaqueMeshPart::Payload(renderData));
auto renderData = MeshPartPayload::Pointer(renderItem);
auto renderPayload = render::PayloadPointer(new MeshPartPayload::Payload(renderData));
pendingChanges.resetItem(item, renderPayload);
_renderItems.insert(item, renderPayload);
somethingAdded = true;
@ -945,11 +895,6 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
}
void Model::removeFromScene(std::shared_ptr<render::Scene> scene, render::PendingChanges& pendingChanges) {
// allow the attachments to remove to scene
foreach (Model* attachment, _attachments) {
attachment->removeFromScene(scene, pendingChanges);
}
foreach (auto item, _renderItems.keys()) {
pendingChanges.removeItem(item);
}
@ -957,210 +902,6 @@ void Model::removeFromScene(std::shared_ptr<render::Scene> scene, render::Pendin
_readyWhenAdded = false;
}
bool Model::render(RenderArgs* renderArgs, float alpha) {
return true; //
PROFILE_RANGE(__FUNCTION__);
// render the attachments
foreach (Model* attachment, _attachments) {
attachment->render(renderArgs, alpha);
}
if (_meshStates.isEmpty()) {
return false;
}
renderSetup(renderArgs);
return renderCore(renderArgs, alpha);
}
bool Model::renderCore(RenderArgs* args, float alpha) {
return true;
PROFILE_RANGE(__FUNCTION__);
if (!_viewState) {
return false;
}
auto mode = args->_renderMode;
// Let's introduce a gpu::Batch to capture all the calls to the graphics api
_renderBatch.clear();
gpu::Batch& batch = _renderBatch;
// Setup the projection matrix
if (args && args->_viewFrustum) {
glm::mat4 proj;
// If for easier debug depending on the pass
if (mode == RenderArgs::SHADOW_RENDER_MODE) {
args->_viewFrustum->evalProjectionMatrix(proj);
} else {
args->_viewFrustum->evalProjectionMatrix(proj);
}
batch.setProjectionTransform(proj);
}
// Capture the view matrix once for the rendering of this model
if (_transforms.empty()) {
_transforms.push_back(Transform());
}
_transforms[0] = _viewState->getViewTransform();
// apply entity translation offset to the viewTransform in one go (it's a preTranslate because viewTransform goes from world to eye space)
_transforms[0].preTranslate(-_translation);
batch.setViewTransform(_transforms[0]);
/*DependencyManager::get<TextureCache>()->setPrimaryDrawBuffers(
mode == RenderArgs::DEFAULT_RENDER_MODE || mode == RenderArgs::DIFFUSE_RENDER_MODE,
mode == RenderArgs::DEFAULT_RENDER_MODE || mode == RenderArgs::NORMAL_RENDER_MODE,
mode == RenderArgs::DEFAULT_RENDER_MODE);
*/
/*if (mode != RenderArgs::SHADOW_RENDER_MODE)*/ {
GLenum buffers[3];
int bufferCount = 0;
// if (mode == RenderArgs::DEFAULT_RENDER_MODE || mode == RenderArgs::DIFFUSE_RENDER_MODE) {
if (mode != RenderArgs::SHADOW_RENDER_MODE) {
buffers[bufferCount++] = GL_COLOR_ATTACHMENT0;
}
// if (mode == RenderArgs::DEFAULT_RENDER_MODE || mode == RenderArgs::NORMAL_RENDER_MODE) {
if (mode != RenderArgs::SHADOW_RENDER_MODE) {
buffers[bufferCount++] = GL_COLOR_ATTACHMENT1;
}
// if (mode == RenderArgs::DEFAULT_RENDER_MODE) {
if (mode != RenderArgs::SHADOW_RENDER_MODE) {
buffers[bufferCount++] = GL_COLOR_ATTACHMENT2;
}
GLBATCH(glDrawBuffers)(bufferCount, buffers);
// batch.setFramebuffer(DependencyManager::get<TextureCache>()->getPrimaryOpaqueFramebuffer());
}
const float DEFAULT_ALPHA_THRESHOLD = 0.5f;
//renderMeshes(batch, mode, translucent, alphaThreshold, hasTangents, hasSpecular, isSkinned, args, forceRenderMeshes);
int opaqueMeshPartsRendered = 0;
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, false, false, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, false, true, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, true, false, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, false, true, true, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, false, false, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, false, true, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, true, false, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, false, true, true, true, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, false, false, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, false, true, false, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, true, false, false, false, args, true);
opaqueMeshPartsRendered += renderMeshes(batch, mode, false, DEFAULT_ALPHA_THRESHOLD, true, true, true, false, false, args, true);
// render translucent meshes afterwards
//DependencyManager::get<TextureCache>()->setPrimaryDrawBuffers(false, true, true);
{
GLenum buffers[2];
int bufferCount = 0;
buffers[bufferCount++] = GL_COLOR_ATTACHMENT1;
buffers[bufferCount++] = GL_COLOR_ATTACHMENT2;
GLBATCH(glDrawBuffers)(bufferCount, buffers);
}
int translucentMeshPartsRendered = 0;
const float MOSTLY_OPAQUE_THRESHOLD = 0.75f;
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, false, false, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, false, true, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, true, false, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, false, true, true, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, false, false, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, false, true, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, true, false, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_OPAQUE_THRESHOLD, false, true, true, true, false, args, true);
{
GLenum buffers[1];
int bufferCount = 0;
buffers[bufferCount++] = GL_COLOR_ATTACHMENT0;
GLBATCH(glDrawBuffers)(bufferCount, buffers);
}
// if (mode == RenderArgs::DEFAULT_RENDER_MODE || mode == RenderArgs::DIFFUSE_RENDER_MODE) {
if (mode != RenderArgs::SHADOW_RENDER_MODE) {
// batch.setFramebuffer(DependencyManager::get<TextureCache>()->getPrimaryTransparentFramebuffer());
const float MOSTLY_TRANSPARENT_THRESHOLD = 0.0f;
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, false, false, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, false, true, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, true, false, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, false, true, true, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, false, false, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, false, true, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, true, false, false, args, true);
translucentMeshPartsRendered += renderMeshes(batch, mode, true, MOSTLY_TRANSPARENT_THRESHOLD, false, true, true, true, false, args, true);
// batch.setFramebuffer(DependencyManager::get<TextureCache>()->getPrimaryOpaqueFramebuffer());
}
GLBATCH(glDepthMask)(true);
GLBATCH(glDepthFunc)(GL_LESS);
GLBATCH(glDisable)(GL_CULL_FACE);
if (mode == RenderArgs::SHADOW_RENDER_MODE) {
GLBATCH(glCullFace)(GL_BACK);
}
GLBATCH(glActiveTexture)(GL_TEXTURE0 + 1);
GLBATCH(glBindTexture)(GL_TEXTURE_2D, 0);
GLBATCH(glActiveTexture)(GL_TEXTURE0 + 2);
GLBATCH(glBindTexture)(GL_TEXTURE_2D, 0);
GLBATCH(glActiveTexture)(GL_TEXTURE0 + 3);
GLBATCH(glBindTexture)(GL_TEXTURE_2D, 0);
GLBATCH(glActiveTexture)(GL_TEXTURE0);
GLBATCH(glBindTexture)(GL_TEXTURE_2D, 0);
// deactivate vertex arrays after drawing
GLBATCH(glDisableClientState)(GL_NORMAL_ARRAY);
GLBATCH(glDisableClientState)(GL_VERTEX_ARRAY);
GLBATCH(glDisableClientState)(GL_TEXTURE_COORD_ARRAY);
GLBATCH(glDisableClientState)(GL_COLOR_ARRAY);
GLBATCH(glDisableVertexAttribArray)(gpu::Stream::TANGENT);
GLBATCH(glDisableVertexAttribArray)(gpu::Stream::SKIN_CLUSTER_INDEX);
GLBATCH(glDisableVertexAttribArray)(gpu::Stream::SKIN_CLUSTER_WEIGHT);
// bind with 0 to switch back to normal operation
GLBATCH(glBindBuffer)(GL_ARRAY_BUFFER, 0);
GLBATCH(glBindBuffer)(GL_ELEMENT_ARRAY_BUFFER, 0);
GLBATCH(glBindTexture)(GL_TEXTURE_2D, 0);
// Back to no program
GLBATCH(glUseProgram)(0);
// Render!
{
PROFILE_RANGE("render Batch");
#if defined(ANDROID)
#else
glPushMatrix();
#endif
::gpu::GLBackend::renderBatch(batch, true); // force sync with gl state here
#if defined(ANDROID)
#else
glPopMatrix();
#endif
}
// restore all the default material settings
_viewState->setupWorldLight();
#ifdef WANT_DEBUG_MESHBOXES
renderDebugMeshBoxes();
#endif
return true;
}
void Model::renderDebugMeshBoxes() {
int colorNdx = 0;
_mutex.lock();
@ -1267,12 +1008,12 @@ Extents Model::calculateScaledOffsetExtents(const Extents& extents) const {
Extents translatedExtents = { rotatedExtents.minimum + _translation,
rotatedExtents.maximum + _translation };
return translatedExtents;
}
/// Returns the world space equivalent of some box in model space.
AABox Model::calculateScaledOffsetAABox(const AABox& box) const {
return AABox(calculateScaledOffsetExtents(Extents(box)));
}
@ -1341,9 +1082,10 @@ void Model::setURL(const QUrl& url, const QUrl& fallback, bool retainCurrent, bo
if (_url == url && _geometry && _geometry->getURL() == url) {
return;
}
_readyWhenAdded = false; // reset out render items.
_needsReload = true;
invalidCalculatedMeshBoxes();
_url = url;
@ -1532,7 +1274,7 @@ void Model::setScaleToFit(bool scaleToFit, const glm::vec3& dimensions) {
}
}
void Model::setScaleToFit(bool scaleToFit, float largestDimension) {
void Model::setScaleToFit(bool scaleToFit, float largestDimension, bool forceRescale) {
// NOTE: if the model is not active, then it means we don't actually know the true/natural dimensions of the
// mesh, and so we can't do the needed calculations for scaling to fit to a single largest dimension. In this
// case we will record that we do want to do this, but we will stick our desired single dimension into the
@ -1545,7 +1287,7 @@ void Model::setScaleToFit(bool scaleToFit, float largestDimension) {
return;
}
if (_scaleToFit != scaleToFit || glm::length(_scaleToFitDimensions) != largestDimension) {
if (forceRescale || _scaleToFit != scaleToFit || glm::length(_scaleToFitDimensions) != largestDimension) {
_scaleToFit = scaleToFit;
// we only need to do this work if we're "turning on" scale to fit.
@ -1555,7 +1297,7 @@ void Model::setScaleToFit(bool scaleToFit, float largestDimension) {
float maxScale = largestDimension / maxDimension;
glm::vec3 modelMeshDimensions = modelMeshExtents.maximum - modelMeshExtents.minimum;
glm::vec3 dimensions = modelMeshDimensions * maxScale;
_scaleToFitDimensions = dimensions;
_scaledToFit = false; // force rescaling
}
@ -1626,7 +1368,6 @@ void Model::simulate(float deltaTime, bool fullUpdate) {
}
void Model::simulateInternal(float deltaTime) {
// NOTE: this is a recursive call that walks all attachments, and their attachments
// update the world space transforms for all joints
// update animations
@ -1643,31 +1384,7 @@ void Model::simulateInternal(float deltaTime) {
_shapesAreDirty = !_shapes.isEmpty();
// update the attachment transforms and simulate them
const FBXGeometry& geometry = _geometry->getFBXGeometry();
for (int i = 0; i < _attachments.size(); i++) {
const FBXAttachment& attachment = geometry.attachments.at(i);
Model* model = _attachments.at(i);
glm::vec3 jointTranslation = _translation;
glm::quat jointRotation = _rotation;
if (_showTrueJointTransforms) {
getJointPositionInWorldFrame(attachment.jointIndex, jointTranslation);
getJointRotationInWorldFrame(attachment.jointIndex, jointRotation);
} else {
getVisibleJointPositionInWorldFrame(attachment.jointIndex, jointTranslation);
getVisibleJointRotationInWorldFrame(attachment.jointIndex, jointRotation);
}
model->setTranslation(jointTranslation + jointRotation * attachment.translation * _scale);
model->setRotation(jointRotation * attachment.rotation);
model->setScale(_scale * attachment.scale);
if (model->isActive()) {
model->simulateInternal(deltaTime);
}
}
glm::mat4 modelToWorld = glm::mat4_cast(_rotation);
for (int i = 0; i < _meshStates.size(); i++) {
MeshState& state = _meshStates[i];
@ -2005,10 +1722,6 @@ void Model::applyNextGeometry() {
}
void Model::deleteGeometry() {
foreach (Model* attachment, _attachments) {
delete attachment;
}
_attachments.clear();
_blendedVertexBuffers.clear();
_jointStates.clear();
_meshStates.clear();
@ -2082,7 +1795,6 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
glm::mat4 scale = glm::scale(partBounds.getDimensions());
glm::mat4 modelToWorldMatrix = translation * scale;
batch.setModelTransform(modelToWorldMatrix);
//qDebug() << "partBounds:" << partBounds;
DependencyManager::get<DeferredLightingEffect>()->renderWireCube(batch, 1.0f, cubeColor);
}
#endif //def DEBUG_BOUNDING_PARTS
@ -2172,16 +1884,18 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
// guard against partially loaded meshes
if (partIndex >= networkMesh.parts.size() || partIndex >= mesh.parts.size()) {
return;
return;
}
const NetworkMeshPart& networkPart = networkMesh.parts.at(partIndex);
const FBXMeshPart& part = mesh.parts.at(partIndex);
model::MaterialPointer material = part._material;
#ifdef WANT_DEBUG
if (material == nullptr) {
// qCDebug(renderutils) << "WARNING: material == nullptr!!!";
qCDebug(renderutils) << "WARNING: material == nullptr!!!";
}
#endif
if (material != nullptr) {
@ -2283,8 +1997,6 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
}
void Model::segregateMeshGroups() {
_renderBuckets.clear();
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const QVector<NetworkMesh>& networkMeshes = _geometry->getMeshes();
@ -2294,6 +2006,9 @@ void Model::segregateMeshGroups() {
qDebug() << "WARNING!!!! Mesh Sizes don't match! We will not segregate mesh groups yet.";
return;
}
_transparentRenderItems.clear();
_opaqueRenderItems.clear();
// Run through all of the meshes, and place them into their segregated, but unsorted buckets
for (int i = 0; i < networkMeshes.size(); i++) {
@ -2318,62 +2033,15 @@ void Model::segregateMeshGroups() {
for (int partIndex = 0; partIndex < totalParts; partIndex++) {
// this is a good place to create our renderPayloads
if (translucentMesh) {
_transparentRenderItems << std::shared_ptr<TransparentMeshPart>(new TransparentMeshPart(this, i, partIndex));
_transparentRenderItems << std::shared_ptr<MeshPartPayload>(new MeshPartPayload(true, this, i, partIndex));
} else {
_opaqueRenderItems << std::shared_ptr<OpaqueMeshPart>(new OpaqueMeshPart(this, i, partIndex));
_opaqueRenderItems << std::shared_ptr<MeshPartPayload>(new MeshPartPayload(false, this, i, partIndex));
}
}
QString materialID;
// create a material name from all the parts. If there's one part, this will be a single material and its
// true name. If however the mesh has multiple parts the name will be all the part's materials mashed together
// which will result in those parts being sorted away from single material parts.
QString lastPartMaterialID;
foreach(FBXMeshPart part, mesh.parts) {
if (part.materialID != lastPartMaterialID) {
materialID += part.materialID;
}
lastPartMaterialID = part.materialID;
}
const bool wantDebug = false;
if (wantDebug) {
qCDebug(renderutils) << "materialID:" << materialID << "parts:" << mesh.parts.size();
}
RenderKey key(translucentMesh, hasLightmap, hasTangents, hasSpecular, isSkinned, wireframe);
// reuse or create the bucket corresponding to that key and insert the mesh as unsorted
_renderBuckets[key.getRaw()]._unsortedMeshes.insertMulti(materialID, i);
}
for(auto& b : _renderBuckets) {
foreach(auto i, b.second._unsortedMeshes) {
b.second._meshes.append(i);
}
b.second._unsortedMeshes.clear();
}
_meshGroupsKnown = true;
}
QVector<int>* Model::pickMeshList(bool translucent, float alphaThreshold, bool hasLightmap, bool hasTangents, bool hasSpecular, bool isSkinned, bool isWireframe) {
PROFILE_RANGE(__FUNCTION__);
// depending on which parameters we were called with, pick the correct mesh group to render
QVector<int>* whichList = NULL;
RenderKey key(translucent, hasLightmap, hasTangents, hasSpecular, isSkinned, isWireframe);
auto bucket = _renderBuckets.find(key.getRaw());
if (bucket != _renderBuckets.end()) {
whichList = &(*bucket).second._meshes;
}
return whichList;
}
void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
bool hasLightmap, bool hasTangents, bool hasSpecular, bool isSkinned, bool isWireframe, RenderArgs* args,
Locations*& locations) {
@ -2402,212 +2070,6 @@ void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, f
}
}
int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
bool hasLightmap, bool hasTangents, bool hasSpecular, bool isSkinned, bool isWireframe, RenderArgs* args,
bool forceRenderSomeMeshes) {
PROFILE_RANGE(__FUNCTION__);
int meshPartsRendered = 0;
//Pick the mesh list with the requested render flags
QVector<int>* whichList = pickMeshList(translucent, alphaThreshold, hasLightmap, hasTangents, hasSpecular, isSkinned, isWireframe);
if (!whichList) {
return 0;
}
QVector<int>& list = *whichList;
// If this list has nothing to render, then don't bother proceeding. This saves us on binding to programs
if (list.empty()) {
return 0;
}
Locations* locations = nullptr;
pickPrograms(batch, mode, translucent, alphaThreshold, hasLightmap, hasTangents, hasSpecular, isSkinned, isWireframe,
args, locations);
meshPartsRendered = renderMeshesFromList(list, batch, mode, translucent, alphaThreshold,
args, locations, forceRenderSomeMeshes);
return meshPartsRendered;
}
int Model::renderMeshesFromList(QVector<int>& list, gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold, RenderArgs* args,
Locations* locations, bool forceRenderMeshes) {
PROFILE_RANGE(__FUNCTION__);
auto textureCache = DependencyManager::get<TextureCache>();
QString lastMaterialID;
int meshPartsRendered = 0;
updateVisibleJointStates();
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const QVector<NetworkMesh>& networkMeshes = _geometry->getMeshes();
// i is the "index" from the original networkMeshes QVector...
foreach (int i, list) {
// if our index is ever out of range for either meshes or networkMeshes, then skip it, and set our _meshGroupsKnown
// to false to rebuild out mesh groups.
if (i < 0 || i >= networkMeshes.size() || i > geometry.meshes.size()) {
_meshGroupsKnown = false; // regenerate these lists next time around.
_readyWhenAdded = false; // in case any of our users are using scenes
invalidCalculatedMeshBoxes(); // if we have to reload, we need to assume our mesh boxes are all invalid
continue;
}
// exit early if the translucency doesn't match what we're drawing
const NetworkMesh& networkMesh = networkMeshes.at(i);
const FBXMesh& mesh = geometry.meshes.at(i);
batch.setIndexBuffer(gpu::UINT32, (networkMesh._indexBuffer), 0);
int vertexCount = mesh.vertices.size();
if (vertexCount == 0) {
// sanity check
continue;
}
// if we got here, then check to see if this mesh is in view
if (args) {
bool shouldRender = true;
if (args->_viewFrustum) {
shouldRender = forceRenderMeshes ||
args->_viewFrustum->boxInFrustum(_calculatedMeshBoxes.at(i)) != ViewFrustum::OUTSIDE;
if (shouldRender && !forceRenderMeshes) {
float distance = args->_viewFrustum->distanceToCamera(_calculatedMeshBoxes.at(i).calcCenter());
shouldRender = !_viewState ? false : _viewState->shouldRenderMesh(_calculatedMeshBoxes.at(i).getLargestDimension(),
distance);
}
}
if (!shouldRender) {
continue; // skip this mesh
}
}
const MeshState& state = _meshStates.at(i);
if (state.clusterMatrices.size() > 1) {
GLBATCH(glUniformMatrix4fv)(locations->clusterMatrices, state.clusterMatrices.size(), false,
(const float*)state.clusterMatrices.constData());
batch.setModelTransform(Transform());
} else {
batch.setModelTransform(Transform(state.clusterMatrices[0]));
}
if (mesh.blendshapes.isEmpty()) {
batch.setInputFormat(networkMesh._vertexFormat);
batch.setInputStream(0, *networkMesh._vertexStream);
} else {
batch.setInputFormat(networkMesh._vertexFormat);
batch.setInputBuffer(0, _blendedVertexBuffers[i], 0, sizeof(glm::vec3));
batch.setInputBuffer(1, _blendedVertexBuffers[i], vertexCount * sizeof(glm::vec3), sizeof(glm::vec3));
batch.setInputStream(2, *networkMesh._vertexStream);
}
if (mesh.colors.isEmpty()) {
GLBATCH(glColor4f)(1.0f, 1.0f, 1.0f, 1.0f);
}
qint64 offset = 0;
for (int j = 0; j < networkMesh.parts.size(); j++) {
const NetworkMeshPart& networkPart = networkMesh.parts.at(j);
const FBXMeshPart& part = mesh.parts.at(j);
model::MaterialPointer material = part._material;
if ((networkPart.isTranslucent() || part.opacity != 1.0f) != translucent) {
offset += (part.quadIndices.size() + part.triangleIndices.size()) * sizeof(int);
continue;
}
// apply material properties
if (mode == RenderArgs::SHADOW_RENDER_MODE) {
/// GLBATCH(glBindTexture)(GL_TEXTURE_2D, 0);
} else {
if (lastMaterialID != part.materialID) {
const bool wantDebug = false;
if (wantDebug) {
qCDebug(renderutils) << "Material Changed ---------------------------------------------";
qCDebug(renderutils) << "part INDEX:" << j;
qCDebug(renderutils) << "NEW part.materialID:" << part.materialID;
}
if (locations->materialBufferUnit >= 0) {
batch.setUniformBuffer(locations->materialBufferUnit, material->getSchemaBuffer());
}
Texture* diffuseMap = networkPart.diffuseTexture.data();
if (mesh.isEye && diffuseMap) {
diffuseMap = (_dilatedTextures[i][j] =
static_cast<DilatableNetworkTexture*>(diffuseMap)->getDilatedTexture(_pupilDilation)).data();
}
static bool showDiffuse = true;
if (showDiffuse && diffuseMap) {
batch.setUniformTexture(0, diffuseMap->getGPUTexture());
} else {
batch.setUniformTexture(0, textureCache->getWhiteTexture());
}
if (locations->texcoordMatrices >= 0) {
glm::mat4 texcoordTransform[2];
if (!part.diffuseTexture.transform.isIdentity()) {
part.diffuseTexture.transform.getMatrix(texcoordTransform[0]);
}
if (!part.emissiveTexture.transform.isIdentity()) {
part.emissiveTexture.transform.getMatrix(texcoordTransform[1]);
}
GLBATCH(glUniformMatrix4fv)(locations->texcoordMatrices, 2, false, (const float*) &texcoordTransform);
}
if (!mesh.tangents.isEmpty()) {
Texture* normalMap = networkPart.normalTexture.data();
batch.setUniformTexture(1, !normalMap ?
textureCache->getBlueTexture() : normalMap->getGPUTexture());
}
if (locations->specularTextureUnit >= 0) {
Texture* specularMap = networkPart.specularTexture.data();
batch.setUniformTexture(locations->specularTextureUnit, !specularMap ?
textureCache->getWhiteTexture() : specularMap->getGPUTexture());
}
}
// HACK: For unkwon reason (yet!) this code that should be assigned only if the material changes need to be called for every
// drawcall with an emissive, so let's do it for now.
if (locations->emissiveTextureUnit >= 0) {
// assert(locations->emissiveParams >= 0); // we should have the emissiveParams defined in the shader
float emissiveOffset = part.emissiveParams.x;
float emissiveScale = part.emissiveParams.y;
GLBATCH(glUniform2f)(locations->emissiveParams, emissiveOffset, emissiveScale);
Texture* emissiveMap = networkPart.emissiveTexture.data();
batch.setUniformTexture(locations->emissiveTextureUnit, !emissiveMap ?
textureCache->getWhiteTexture() : emissiveMap->getGPUTexture());
}
lastMaterialID = part.materialID;
}
meshPartsRendered++;
if (part.quadIndices.size() > 0) {
batch.drawIndexed(gpu::QUADS, part.quadIndices.size(), offset);
offset += part.quadIndices.size() * sizeof(int);
}
if (part.triangleIndices.size() > 0) {
batch.drawIndexed(gpu::TRIANGLES, part.triangleIndices.size(), offset);
offset += part.triangleIndices.size() * sizeof(int);
}
}
}
return meshPartsRendered;
}
ModelBlender::ModelBlender() :
_pendingBlenders(0) {

View file

@ -51,17 +51,11 @@ namespace render {
class PendingChanges;
typedef unsigned int ItemID;
}
class OpaqueMeshPart;
class TransparentMeshPart;
class MeshPartPayload;
inline uint qHash(const std::shared_ptr<TransparentMeshPart>& a, uint seed) {
inline uint qHash(const std::shared_ptr<MeshPartPayload>& a, uint seed) {
return qHash(a.get(), seed);
}
inline uint qHash(const std::shared_ptr<OpaqueMeshPart>& a, uint seed) {
return qHash(a.get(), seed);
}
/// A generic 3D model displaying geometry loaded from a URL.
class Model : public QObject, public PhysicsEntity {
@ -77,7 +71,7 @@ public:
virtual ~Model();
/// enables/disables scale to fit behavior, the model will be automatically scaled to the specified largest dimension
void setScaleToFit(bool scaleToFit, float largestDimension = 0.0f);
void setScaleToFit(bool scaleToFit, float largestDimension = 0.0f, bool forceRescale = false);
bool getScaleToFit() const { return _scaleToFit; } /// is scale to fit enabled
bool getIsScaledToFit() const { return _scaledToFit; } /// is model scaled to fit
const glm::vec3& getScaleToFitDimensions() const { return _scaleToFitDimensions; } /// the dimensions model is scaled to
@ -350,8 +344,6 @@ private:
QVector<QVector<QSharedPointer<Texture> > > _dilatedTextures;
QVector<Model*> _attachments;
QSet<WeakAnimationHandlePointer> _animationHandles;
QList<AnimationHandlePointer> _runningAnimations;
@ -401,18 +393,8 @@ private:
int _debugMeshBoxesID = GeometryCache::UNKNOWN_ID;
// helper functions used by render() or renderInScene()
bool renderCore(RenderArgs* args, float alpha);
int renderMeshes(gpu::Batch& batch, RenderArgs::RenderMode mode, bool translucent, float alphaThreshold,
bool hasLightmap, bool hasTangents, bool hasSpecular, bool isSkinned, bool isWireframe, RenderArgs* args = NULL,
bool forceRenderMeshes = false);
void setupBatchTransform(gpu::Batch& batch, RenderArgs* args);
QVector<int>* pickMeshList(bool translucent, float alphaThreshold, bool hasLightmap, bool hasTangents, bool hasSpecular, bool isSkinned, bool isWireframe);
int renderMeshesFromList(QVector<int>& list, gpu::Batch& batch, RenderArgs::RenderMode mode, bool translucent, float alphaThreshold,
RenderArgs* args, Locations* locations,
bool forceRenderSomeMeshes = false);
static void pickPrograms(gpu::Batch& batch, RenderArgs::RenderMode mode, bool translucent, float alphaThreshold,
bool hasLightmap, bool hasTangents, bool hasSpecular, bool isSkinned, bool isWireframe, RenderArgs* args,
Locations*& locations);
@ -523,35 +505,15 @@ private:
};
static RenderPipelineLib _renderPipelineLib;
class RenderBucket {
public:
QVector<int> _meshes;
QMap<QString, int> _unsortedMeshes;
};
typedef std::unordered_map<int, RenderBucket> BaseRenderBucketMap;
class RenderBucketMap : public BaseRenderBucketMap {
public:
typedef RenderKey Key;
};
RenderBucketMap _renderBuckets;
bool _renderCollisionHull;
QSet<std::shared_ptr<TransparentMeshPart>> _transparentRenderItems;
QSet<std::shared_ptr<OpaqueMeshPart>> _opaqueRenderItems;
QSet<std::shared_ptr<MeshPartPayload>> _transparentRenderItems;
QSet<std::shared_ptr<MeshPartPayload>> _opaqueRenderItems;
QMap<render::ItemID, render::PayloadPointer> _renderItems;
bool _readyWhenAdded = false;
bool _needsReload = true;
private:
// FIX ME - We want to get rid of this interface for rendering...
// right now the only remaining user are Avatar attachments.
// that usage has been temporarily disabled...
bool render(RenderArgs* renderArgs, float alpha = 1.0f);
};
Q_DECLARE_METATYPE(QPointer<Model>)

View file

@ -15,6 +15,7 @@
#include <glm/glm.hpp>
#include <glm/gtc/quaternion.hpp>
const int PHYSICS_ENGINE_MAX_NUM_SUBSTEPS = 4;
const float PHYSICS_ENGINE_FIXED_SUBSTEP = 1.0f / 60.0f;
// return incremental rotation (Bullet-style) caused by angularVelocity over timeStep