Merge branch 'master' of https://github.com/highfidelity/hifi into 20339_nitpick_v1.3

This commit is contained in:
NissimHadar 2018-12-28 11:41:27 -08:00
commit 04e872769d
72 changed files with 1687 additions and 972 deletions

View file

@ -11,7 +11,7 @@ setup_memory_debugger()
# link in the shared libraries
link_hifi_libraries(
audio avatars octree gpu graphics fbx hfm entities
audio avatars octree gpu graphics shaders fbx hfm entities
networking animation recording shared script-engine embedded-webserver
controllers physics plugins midi image
)

View file

@ -1,16 +1,2 @@
{
"RenderMainView": {
"RenderShadowTask": {
"Enabled": {
"enabled": true
}
},
"RenderDeferredTask": {
"AmbientOcclusion": {
"Enabled": {
"enabled": true
}
}
}
}
}

View file

@ -48,6 +48,7 @@
#include "DeferredLightingEffect.h"
#include "PickManager.h"
#include "LightingModel.h"
#include "AmbientOcclusionEffect.h"
#include "RenderShadowTask.h"
#include "AntialiasingEffect.h"
@ -393,13 +394,9 @@ Menu::Menu() {
connect(action, &QAction::triggered, [action] {
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
if (renderConfig) {
auto mainViewShadowTaskConfig = renderConfig->getConfig<RenderShadowTask>("RenderMainView.RenderShadowTask");
if (mainViewShadowTaskConfig) {
if (action->isChecked()) {
mainViewShadowTaskConfig->setPreset("Enabled");
} else {
mainViewShadowTaskConfig->setPreset("None");
}
auto lightingModelConfig = renderConfig->getConfig<MakeLightingModel>("RenderMainView.LightingModel");
if (lightingModelConfig) {
lightingModelConfig->setShadow(action->isChecked());
}
}
});
@ -408,15 +405,11 @@ Menu::Menu() {
connect(action, &QAction::triggered, [action] {
auto renderConfig = qApp->getRenderEngine()->getConfiguration();
if (renderConfig) {
auto mainViewAmbientOcclusionConfig = renderConfig->getConfig<AmbientOcclusionEffect>("RenderMainView.AmbientOcclusion");
if (mainViewAmbientOcclusionConfig) {
if (action->isChecked()) {
mainViewAmbientOcclusionConfig->setPreset("Enabled");
} else {
mainViewAmbientOcclusionConfig->setPreset("None");
}
auto lightingModelConfig = renderConfig->getConfig<MakeLightingModel>("RenderMainView.LightingModel");
if (lightingModelConfig) {
lightingModelConfig->setAmbientOcclusion(action->isChecked());
}
}
}
});
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::WorldAxes);

View file

@ -13,6 +13,7 @@
#include <RenderDeferredTask.h>
#include <RenderForwardTask.h>
#include <RenderViewTask.h>
#include <glm/gtx/transform.hpp>
#include <gpu/Context.h>
@ -270,14 +271,8 @@ public:
void SecondaryCameraRenderTask::build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cullFunctor, bool isDeferred) {
const auto cachedArg = task.addJob<SecondaryCameraJob>("SecondaryCamera");
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1);
assert(items.canCast<RenderFetchCullSortTask::Output>());
if (isDeferred) {
const render::Varying cascadeSceneBBoxes;
const auto renderInput = RenderDeferredTask::Input(items, cascadeSceneBBoxes).asVarying();
task.addJob<RenderDeferredTask>("RenderDeferredTask", renderInput, false);
} else {
task.addJob<RenderForwardTask>("Forward", items);
}
task.addJob<RenderViewTask>("RenderSecondView", cullFunctor, isDeferred, render::ItemKey::TAG_BITS_1, render::ItemKey::TAG_BITS_1);
task.addJob<EndSecondaryCameraFrame>("EndSecondaryCamera", cachedArg);
}

View file

@ -178,9 +178,11 @@ void Base3DOverlay::setProperties(const QVariantMap& originalProperties) {
}
if (properties["isDashedLine"].isValid()) {
qDebug() << "isDashed is deprecated and will be removed in RC79!";
setIsDashedLine(properties["isDashedLine"].toBool());
}
if (properties["dashed"].isValid()) {
qDebug() << "dashed is deprecated and will be removed in RC79!";
setIsDashedLine(properties["dashed"].toBool());
}
if (properties["ignorePickIntersection"].isValid()) {
@ -223,7 +225,7 @@ void Base3DOverlay::setProperties(const QVariantMap& originalProperties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.
@ -259,6 +261,7 @@ QVariant Base3DOverlay::getProperty(const QString& property) {
return !_isSolid;
}
if (property == "isDashedLine" || property == "dashed") {
qDebug() << "isDashedLine/dashed are deprecated and will be removed in RC79!";
return _isDashedLine;
}
if (property == "ignorePickIntersection" || property == "ignoreRayIntersection") {

View file

@ -399,7 +399,7 @@ void Circle3DOverlay::setProperties(const QVariantMap& properties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -156,7 +156,7 @@ void Cube3DOverlay::setProperties(const QVariantMap& properties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -142,7 +142,7 @@ void Grid3DOverlay::setProperties(const QVariantMap& properties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -222,7 +222,7 @@ void Image3DOverlay::setProperties(const QVariantMap& properties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -286,7 +286,7 @@ void Line3DOverlay::setProperties(const QVariantMap& originalProperties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -382,7 +382,7 @@ vectorType ModelOverlay::mapJoints(mapFunction<itemType> function) const {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -138,7 +138,7 @@ const render::ShapeKey Rectangle3DOverlay::getShapeKey() {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -158,7 +158,7 @@ void Shape3DOverlay::setProperties(const QVariantMap& properties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -59,7 +59,7 @@ Sphere3DOverlay::Sphere3DOverlay(const Sphere3DOverlay* Sphere3DOverlay) :
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -224,7 +224,7 @@ void Text3DOverlay::setProperties(const QVariantMap& properties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -555,7 +555,7 @@ void Web3DOverlay::setProperties(const QVariantMap& properties) {
* @property {boolean} isSolid=false - Synonyms: <ode>solid</code>, <code>isFilled</code>, and <code>filled</code>.
* Antonyms: <code>isWire</code> and <code>wire</code>.
* @property {boolean} isDashedLine=false - If <code>true</code>, a dashed line is drawn on the overlay's edges. Synonym:
* <code>dashed</code>.
* <code>dashed</code>. Deprecated.
* @property {boolean} ignorePickIntersection=false - If <code>true</code>, picks ignore the overlay. <code>ignoreRayIntersection</code> is a synonym.
* @property {boolean} drawInFront=false - If <code>true</code>, the overlay is rendered in front of other overlays that don't
* have <code>drawInFront</code> set to <code>true</code>, and in front of entities.

View file

@ -27,8 +27,6 @@
// Sphere entities should fit inside a cube entity of the same size, so a sphere that has dimensions 1x1x1
// is a half unit sphere. However, the geometry cache renders a UNIT sphere, so we need to scale down.
static const float SPHERE_ENTITY_SCALE = 0.5f;
static const unsigned int SUN_SHADOW_CASCADE_COUNT{ 4 };
static const float SUN_SHADOW_MAX_DISTANCE{ 40.0f };
using namespace render;
using namespace render::entities;
@ -43,7 +41,6 @@ void ZoneEntityRenderer::onRemoveFromSceneTyped(const TypedEntityPointer& entity
if (!LightStage::isIndexInvalid(_sunIndex)) {
_stage->removeLight(_sunIndex);
_sunIndex = INVALID_INDEX;
_shadowIndex = INVALID_INDEX;
}
if (!LightStage::isIndexInvalid(_ambientIndex)) {
_stage->removeLight(_ambientIndex);
@ -74,36 +71,6 @@ void ZoneEntityRenderer::onRemoveFromSceneTyped(const TypedEntityPointer& entity
}
void ZoneEntityRenderer::doRender(RenderArgs* args) {
#if 0
if (ZoneEntityItem::getDrawZoneBoundaries()) {
switch (_entity->getShapeType()) {
case SHAPE_TYPE_BOX:
case SHAPE_TYPE_SPHERE:
{
PerformanceTimer perfTimer("zone->renderPrimitive");
static const glm::vec4 DEFAULT_COLOR(1.0f, 1.0f, 1.0f, 1.0f);
if (!updateModelTransform()) {
break;
}
auto geometryCache = DependencyManager::get<GeometryCache>();
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(_modelTransform);
if (_entity->getShapeType() == SHAPE_TYPE_SPHERE) {
geometryCache->renderWireSphereInstance(args, batch, DEFAULT_COLOR);
} else {
geometryCache->renderWireCubeInstance(args, batch, DEFAULT_COLOR);
}
}
break;
// Compund shapes are handled by the _model member
case SHAPE_TYPE_COMPOUND:
default:
// Not handled
break;
}
}
#endif
if (!_stage) {
_stage = args->_scene->getStage<LightStage>();
assert(_stage);
@ -130,7 +97,6 @@ void ZoneEntityRenderer::doRender(RenderArgs* args) {
// Do we need to allocate the light in the stage ?
if (LightStage::isIndexInvalid(_sunIndex)) {
_sunIndex = _stage->addLight(_sunLight);
_shadowIndex = _stage->addShadow(_sunIndex, SUN_SHADOW_MAX_DISTANCE, SUN_SHADOW_CASCADE_COUNT);
} else {
_stage->updateLightArrayBuffer(_sunIndex);
}

View file

@ -99,7 +99,6 @@ private:
ComponentMode _bloomMode { COMPONENT_MODE_INHERIT };
indexed_container::Index _sunIndex { LightStage::INVALID_INDEX };
indexed_container::Index _shadowIndex { LightStage::INVALID_INDEX };
indexed_container::Index _ambientIndex { LightStage::INVALID_INDEX };
BackgroundStagePointer _backgroundStage;

View file

@ -19,20 +19,6 @@
#include <glm/glm.hpp>
#if defined(Q_OS_ANDROID)
#define FBX_PACK_NORMALS 0
#else
#define FBX_PACK_NORMALS 1
#endif
#if FBX_PACK_NORMALS
using NormalType = glm::uint32;
#define FBX_NORMAL_ELEMENT gpu::Element::VEC4F_NORMALIZED_XYZ10W2
#else
using NormalType = glm::vec3;
#define FBX_NORMAL_ELEMENT gpu::Element::VEC3F_XYZ
#endif
// See comment in FBXSerializer::parseFBX().
static const int FBX_HEADER_BYTES_BEFORE_VERSION = 23;
static const QByteArray FBX_BINARY_PROLOG("Kaydara FBX Binary ");

View file

@ -11,27 +11,13 @@
#include "FBXSerializer.h"
#include <iostream>
#include <QBuffer>
#include <QDataStream>
#include <QIODevice>
#include <QStringList>
#include <QTextStream>
#include <QtDebug>
#include <QtEndian>
#include <QFileInfo>
#include <glm/gtc/quaternion.hpp>
#include <glm/gtx/quaternion.hpp>
#include <glm/gtx/transform.hpp>
#include <FaceshiftConstants.h>
#include <GeometryUtil.h>
#include <GLMHelpers.h>
#include <NumericalConstants.h>
#include <OctalCode.h>
#include <gpu/Format.h>
#include <LogHandler.h>
#include <hfm/ModelFormatLogging.h>
@ -1640,14 +1626,9 @@ HFMModel* FBXSerializer::extractHFMModel(const QVariantHash& mapping, const QStr
}
}
}
buildModelMesh(extracted.mesh, url);
hfmModel.meshes.append(extracted.mesh);
int meshIndex = hfmModel.meshes.size() - 1;
if (extracted.mesh._mesh) {
extracted.mesh._mesh->displayName = QString("%1#/mesh/%2").arg(url).arg(meshIndex).toStdString();
extracted.mesh._mesh->modelName = modelIDsToNames.value(modelID).toStdString();
}
meshIDsToMeshIndices.insert(it.key(), meshIndex);
}
@ -1715,22 +1696,6 @@ HFMModel* FBXSerializer::extractHFMModel(const QVariantHash& mapping, const QStr
}
}
}
{
int i = 0;
for (const auto& mesh : hfmModel.meshes) {
auto name = hfmModel.getModelNameOfMesh(i++);
if (!name.isEmpty()) {
if (mesh._mesh) {
mesh._mesh->modelName = name.toStdString();
if (!mesh._mesh->displayName.size()) {
mesh._mesh->displayName = QString("#%1").arg(name).toStdString();
}
} else {
qDebug() << "modelName but no mesh._mesh" << name;
}
}
}
}
auto offsets = getJointRotationOffsets(mapping);
hfmModel.jointRotationOffsets.clear();

View file

@ -111,9 +111,6 @@ public:
static ExtractedMesh extractMesh(const FBXNode& object, unsigned int& meshIndex, bool deduplicate = true);
QHash<QString, ExtractedMesh> meshes;
static void buildModelMesh(HFMMesh& extractedMesh, const QString& url);
static glm::vec3 normalizeDirForPacking(const glm::vec3& dir);
HFMTexture getTexture(const QString& textureID);

View file

@ -42,16 +42,6 @@
using vec2h = glm::tvec2<glm::detail::hdata>;
#define HFM_PACK_COLORS 1
#if HFM_PACK_COLORS
using ColorType = glm::uint32;
#define FBX_COLOR_ELEMENT gpu::Element::COLOR_RGBA_32
#else
using ColorType = glm::vec3;
#define FBX_COLOR_ELEMENT gpu::Element::VEC3F_XYZ
#endif
class Vertex {
public:
int originalIndex;
@ -556,364 +546,3 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
return data.extracted;
}
glm::vec3 FBXSerializer::normalizeDirForPacking(const glm::vec3& dir) {
auto maxCoord = glm::max(fabsf(dir.x), glm::max(fabsf(dir.y), fabsf(dir.z)));
if (maxCoord > 1e-6f) {
return dir / maxCoord;
}
return dir;
}
void FBXSerializer::buildModelMesh(HFMMesh& extractedMesh, const QString& url) {
unsigned int totalSourceIndices = 0;
foreach(const HFMMeshPart& part, extractedMesh.parts) {
totalSourceIndices += (part.quadTrianglesIndices.size() + part.triangleIndices.size());
}
static int repeatMessageID = LogHandler::getInstance().newRepeatedMessageID();
if (!totalSourceIndices) {
HIFI_FCDEBUG_ID(modelformat(), repeatMessageID, "buildModelMesh failed -- no indices, url = " << url);
return;
}
if (extractedMesh.vertices.size() == 0) {
HIFI_FCDEBUG_ID(modelformat(), repeatMessageID, "buildModelMesh failed -- no vertices, url = " << url);
return;
}
HFMMesh& hfmMesh = extractedMesh;
graphics::MeshPointer mesh(new graphics::Mesh());
int numVerts = extractedMesh.vertices.size();
if (!hfmMesh.normals.empty() && hfmMesh.tangents.empty()) {
// Fill with a dummy value to force tangents to be present if there are normals
hfmMesh.tangents.reserve(hfmMesh.normals.size());
std::fill_n(std::back_inserter(hfmMesh.tangents), hfmMesh.normals.size(), Vectors::UNIT_X);
}
// Same thing with blend shapes
for (auto& blendShape : hfmMesh.blendshapes) {
if (!blendShape.normals.empty() && blendShape.tangents.empty()) {
// Fill with a dummy value to force tangents to be present if there are normals
blendShape.tangents.reserve(blendShape.normals.size());
std::fill_n(std::back_inserter(blendShape.tangents), blendShape.normals.size(), Vectors::UNIT_X);
}
}
// evaluate all attribute elements and data sizes
// Position is a vec3
const auto positionElement = gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ);
const int positionsSize = numVerts * positionElement.getSize();
// Normal and tangent are always there together packed in normalized xyz32bits word (times 2)
const auto normalElement = FBX_NORMAL_ELEMENT;
const int normalsSize = hfmMesh.normals.size() * normalElement.getSize();
const int tangentsSize = hfmMesh.tangents.size() * normalElement.getSize();
// If there are normals then there should be tangents
assert(normalsSize <= tangentsSize);
if (tangentsSize > normalsSize) {
qWarning() << "Unexpected tangents in " << url;
}
const auto normalsAndTangentsSize = normalsSize + tangentsSize;
// Color attrib
const auto colorElement = FBX_COLOR_ELEMENT;
const int colorsSize = hfmMesh.colors.size() * colorElement.getSize();
// Texture coordinates are stored in 2 half floats
const auto texCoordsElement = gpu::Element(gpu::VEC2, gpu::HALF, gpu::UV);
const int texCoordsSize = hfmMesh.texCoords.size() * texCoordsElement.getSize();
const int texCoords1Size = hfmMesh.texCoords1.size() * texCoordsElement.getSize();
// Support for 4 skinning clusters:
// 4 Indices are uint8 ideally, uint16 if more than 256.
const auto clusterIndiceElement = (hfmMesh.clusters.size() < UINT8_MAX ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW));
// 4 Weights are normalized 16bits
const auto clusterWeightElement = gpu::Element(gpu::VEC4, gpu::NUINT16, gpu::XYZW);
// Cluster indices and weights must be the same sizes
const int NUM_CLUSTERS_PER_VERT = 4;
const int numVertClusters = (hfmMesh.clusterIndices.size() == hfmMesh.clusterWeights.size() ? hfmMesh.clusterIndices.size() / NUM_CLUSTERS_PER_VERT : 0);
const int clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize();
const int clusterWeightsSize = numVertClusters * clusterWeightElement.getSize();
// Decide on where to put what seequencially in a big buffer:
const int positionsOffset = 0;
const int normalsAndTangentsOffset = positionsOffset + positionsSize;
const int colorsOffset = normalsAndTangentsOffset + normalsAndTangentsSize;
const int texCoordsOffset = colorsOffset + colorsSize;
const int texCoords1Offset = texCoordsOffset + texCoordsSize;
const int clusterIndicesOffset = texCoords1Offset + texCoords1Size;
const int clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize;
const int totalVertsSize = clusterWeightsOffset + clusterWeightsSize;
// Copy all vertex data in a single buffer
auto vertBuffer = std::make_shared<gpu::Buffer>();
vertBuffer->resize(totalVertsSize);
// First positions
vertBuffer->setSubData(positionsOffset, positionsSize, (const gpu::Byte*) extractedMesh.vertices.data());
// Interleave normals and tangents
if (normalsSize > 0) {
std::vector<NormalType> normalsAndTangents;
normalsAndTangents.reserve(hfmMesh.normals.size() + hfmMesh.tangents.size());
for (auto normalIt = hfmMesh.normals.constBegin(), tangentIt = hfmMesh.tangents.constBegin();
normalIt != hfmMesh.normals.constEnd();
++normalIt, ++tangentIt) {
#if FBX_PACK_NORMALS
const auto normal = normalizeDirForPacking(*normalIt);
const auto tangent = normalizeDirForPacking(*tangentIt);
const auto packedNormal = glm::packSnorm3x10_1x2(glm::vec4(normal, 0.0f));
const auto packedTangent = glm::packSnorm3x10_1x2(glm::vec4(tangent, 0.0f));
#else
const auto packedNormal = *normalIt;
const auto packedTangent = *tangentIt;
#endif
normalsAndTangents.push_back(packedNormal);
normalsAndTangents.push_back(packedTangent);
}
vertBuffer->setSubData(normalsAndTangentsOffset, normalsAndTangentsSize, (const gpu::Byte*) normalsAndTangents.data());
}
// Pack colors
if (colorsSize > 0) {
#if HFM_PACK_COLORS
std::vector<ColorType> colors;
colors.reserve(hfmMesh.colors.size());
for (const auto& color : hfmMesh.colors) {
colors.push_back(glm::packUnorm4x8(glm::vec4(color, 1.0f)));
}
vertBuffer->setSubData(colorsOffset, colorsSize, (const gpu::Byte*) colors.data());
#else
vertBuffer->setSubData(colorsOffset, colorsSize, (const gpu::Byte*) hfmMesh.colors.constData());
#endif
}
// Pack Texcoords 0 and 1 (if exists)
if (texCoordsSize > 0) {
QVector<vec2h> texCoordData;
texCoordData.reserve(hfmMesh.texCoords.size());
for (auto& texCoordVec2f : hfmMesh.texCoords) {
vec2h texCoordVec2h;
texCoordVec2h.x = glm::detail::toFloat16(texCoordVec2f.x);
texCoordVec2h.y = glm::detail::toFloat16(texCoordVec2f.y);
texCoordData.push_back(texCoordVec2h);
}
vertBuffer->setSubData(texCoordsOffset, texCoordsSize, (const gpu::Byte*) texCoordData.constData());
}
if (texCoords1Size > 0) {
QVector<vec2h> texCoordData;
texCoordData.reserve(hfmMesh.texCoords1.size());
for (auto& texCoordVec2f : hfmMesh.texCoords1) {
vec2h texCoordVec2h;
texCoordVec2h.x = glm::detail::toFloat16(texCoordVec2f.x);
texCoordVec2h.y = glm::detail::toFloat16(texCoordVec2f.y);
texCoordData.push_back(texCoordVec2h);
}
vertBuffer->setSubData(texCoords1Offset, texCoords1Size, (const gpu::Byte*) texCoordData.constData());
}
// Clusters data
if (clusterIndicesSize > 0) {
if (hfmMesh.clusters.size() < UINT8_MAX) {
// yay! we can fit the clusterIndices within 8-bits
int32_t numIndices = hfmMesh.clusterIndices.size();
QVector<uint8_t> clusterIndices;
clusterIndices.resize(numIndices);
for (int32_t i = 0; i < numIndices; ++i) {
assert(hfmMesh.clusterIndices[i] <= UINT8_MAX);
clusterIndices[i] = (uint8_t)(hfmMesh.clusterIndices[i]);
}
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) clusterIndices.constData());
} else {
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) hfmMesh.clusterIndices.constData());
}
}
if (clusterWeightsSize > 0) {
vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) hfmMesh.clusterWeights.constData());
}
// Now we decide on how to interleave the attributes and provide the vertices among bufers:
// Aka the Vertex format and the vertexBufferStream
auto vertexFormat = std::make_shared<gpu::Stream::Format>();
auto vertexBufferStream = std::make_shared<gpu::BufferStream>();
// Decision time:
// if blendshapes then keep position and normals/tangents as separated channel buffers from interleaved attributes
// else everything is interleaved in one buffer
// Default case is no blend shapes
gpu::BufferPointer attribBuffer;
int totalAttribBufferSize = totalVertsSize;
gpu::uint8 posChannel = 0;
gpu::uint8 tangentChannel = posChannel;
gpu::uint8 attribChannel = posChannel;
bool interleavePositions = true;
bool interleaveNormalsTangents = true;
// Define the vertex format, compute the offset for each attributes as we append them to the vertex format
gpu::Offset bufOffset = 0;
if (positionsSize) {
vertexFormat->setAttribute(gpu::Stream::POSITION, posChannel, positionElement, bufOffset);
bufOffset += positionElement.getSize();
if (!interleavePositions) {
bufOffset = 0;
}
}
if (normalsSize) {
vertexFormat->setAttribute(gpu::Stream::NORMAL, tangentChannel, normalElement, bufOffset);
bufOffset += normalElement.getSize();
vertexFormat->setAttribute(gpu::Stream::TANGENT, tangentChannel, normalElement, bufOffset);
bufOffset += normalElement.getSize();
if (!interleaveNormalsTangents) {
bufOffset = 0;
}
}
// Pack normal and Tangent with the rest of atributes if no blend shapes
if (colorsSize) {
vertexFormat->setAttribute(gpu::Stream::COLOR, attribChannel, colorElement, bufOffset);
bufOffset += colorElement.getSize();
}
if (texCoordsSize) {
vertexFormat->setAttribute(gpu::Stream::TEXCOORD, attribChannel, texCoordsElement, bufOffset);
bufOffset += texCoordsElement.getSize();
}
if (texCoords1Size) {
vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, attribChannel, texCoordsElement, bufOffset);
bufOffset += texCoordsElement.getSize();
} else if (texCoordsSize) {
vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, attribChannel, texCoordsElement, bufOffset - texCoordsElement.getSize());
}
if (clusterIndicesSize) {
vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, attribChannel, clusterIndiceElement, bufOffset);
bufOffset += clusterIndiceElement.getSize();
}
if (clusterWeightsSize) {
vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, attribChannel, clusterWeightElement, bufOffset);
bufOffset += clusterWeightElement.getSize();
}
// Finally, allocate and fill the attribBuffer interleaving the attributes as needed:
{
auto vPositionOffset = 0;
auto vPositionSize = (interleavePositions ? positionsSize / numVerts : 0);
auto vNormalsAndTangentsOffset = vPositionOffset + vPositionSize;
auto vNormalsAndTangentsSize = (interleaveNormalsTangents ? normalsAndTangentsSize / numVerts : 0);
auto vColorOffset = vNormalsAndTangentsOffset + vNormalsAndTangentsSize;
auto vColorSize = colorsSize / numVerts;
auto vTexcoord0Offset = vColorOffset + vColorSize;
auto vTexcoord0Size = texCoordsSize / numVerts;
auto vTexcoord1Offset = vTexcoord0Offset + vTexcoord0Size;
auto vTexcoord1Size = texCoords1Size / numVerts;
auto vClusterIndiceOffset = vTexcoord1Offset + vTexcoord1Size;
auto vClusterIndiceSize = clusterIndicesSize / numVerts;
auto vClusterWeightOffset = vClusterIndiceOffset + vClusterIndiceSize;
auto vClusterWeightSize = clusterWeightsSize / numVerts;
auto vStride = vClusterWeightOffset + vClusterWeightSize;
std::vector<gpu::Byte> dest;
dest.resize(totalAttribBufferSize);
auto vDest = dest.data();
auto source = vertBuffer->getData();
for (int i = 0; i < numVerts; i++) {
if (vPositionSize) memcpy(vDest + vPositionOffset, source + positionsOffset + i * vPositionSize, vPositionSize);
if (vNormalsAndTangentsSize) memcpy(vDest + vNormalsAndTangentsOffset, source + normalsAndTangentsOffset + i * vNormalsAndTangentsSize, vNormalsAndTangentsSize);
if (vColorSize) memcpy(vDest + vColorOffset, source + colorsOffset + i * vColorSize, vColorSize);
if (vTexcoord0Size) memcpy(vDest + vTexcoord0Offset, source + texCoordsOffset + i * vTexcoord0Size, vTexcoord0Size);
if (vTexcoord1Size) memcpy(vDest + vTexcoord1Offset, source + texCoords1Offset + i * vTexcoord1Size, vTexcoord1Size);
if (vClusterIndiceSize) memcpy(vDest + vClusterIndiceOffset, source + clusterIndicesOffset + i * vClusterIndiceSize, vClusterIndiceSize);
if (vClusterWeightSize) memcpy(vDest + vClusterWeightOffset, source + clusterWeightsOffset + i * vClusterWeightSize, vClusterWeightSize);
vDest += vStride;
}
auto attribBuffer = std::make_shared<gpu::Buffer>();
attribBuffer->setData(totalAttribBufferSize, dest.data());
vertexBufferStream->addBuffer(attribBuffer, 0, vStride);
}
// Mesh vertex format and vertex stream is ready
mesh->setVertexFormatAndStream(vertexFormat, vertexBufferStream);
// Index and Part Buffers
unsigned int totalIndices = 0;
foreach(const HFMMeshPart& part, extractedMesh.parts) {
totalIndices += (part.quadTrianglesIndices.size() + part.triangleIndices.size());
}
if (! totalIndices) {
qCDebug(modelformat) << "buildModelMesh failed -- no indices, url = " << url;
return;
}
auto indexBuffer = std::make_shared<gpu::Buffer>();
indexBuffer->resize(totalIndices * sizeof(int));
int indexNum = 0;
int offset = 0;
std::vector< graphics::Mesh::Part > parts;
if (extractedMesh.parts.size() > 1) {
indexNum = 0;
}
foreach(const HFMMeshPart& part, extractedMesh.parts) {
graphics::Mesh::Part modelPart(indexNum, 0, 0, graphics::Mesh::TRIANGLES);
if (part.quadTrianglesIndices.size()) {
indexBuffer->setSubData(offset,
part.quadTrianglesIndices.size() * sizeof(int),
(gpu::Byte*) part.quadTrianglesIndices.constData());
offset += part.quadTrianglesIndices.size() * sizeof(int);
indexNum += part.quadTrianglesIndices.size();
modelPart._numIndices += part.quadTrianglesIndices.size();
}
if (part.triangleIndices.size()) {
indexBuffer->setSubData(offset,
part.triangleIndices.size() * sizeof(int),
(gpu::Byte*) part.triangleIndices.constData());
offset += part.triangleIndices.size() * sizeof(int);
indexNum += part.triangleIndices.size();
modelPart._numIndices += part.triangleIndices.size();
}
parts.push_back(modelPart);
}
gpu::BufferView indexBufferView(indexBuffer, gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::XYZ));
mesh->setIndexBuffer(indexBufferView);
if (parts.size()) {
auto pb = std::make_shared<gpu::Buffer>();
pb->setData(parts.size() * sizeof(graphics::Mesh::Part), (const gpu::Byte*) parts.data());
gpu::BufferView pbv(pb, gpu::Element(gpu::VEC4, gpu::UINT32, gpu::XYZW));
mesh->setPartBuffer(pbv);
} else {
qCDebug(modelformat) << "buildModelMesh failed -- no parts, url = " << url;
return;
}
// graphics::Box box =
mesh->evalPartBound(0);
extractedMesh._mesh = mesh;
}

View file

@ -890,7 +890,6 @@ bool GLTFSerializer::buildGeometry(HFMModel& hfmModel, const QUrl& url) {
}
mesh.meshIndex = hfmModel.meshes.size();
FBXSerializer::buildModelMesh(mesh, url.toString());
}
}

View file

@ -831,9 +831,6 @@ HFMModel::Pointer OBJSerializer::read(const QByteArray& data, const QVariantHash
hfmModel.meshExtents.addPoint(vertex);
}
// Build the single mesh.
FBXSerializer::buildModelMesh(mesh, _url.toString());
// hfmDebugDump(hfmModel);
} catch(const std::exception& e) {
qCDebug(modelformat) << "OBJSerializer fail: " << e.what();

View file

@ -29,9 +29,9 @@ void GLBackend::do_setProjectionTransform(const Batch& batch, size_t paramOffset
}
void GLBackend::do_setProjectionJitter(const Batch& batch, size_t paramOffset) {
_transform._projectionJitter.x = batch._params[paramOffset]._float;
_transform._projectionJitter.y = batch._params[paramOffset+1]._float;
_transform._invalidProj = true;
_transform._projectionJitter.x = batch._params[paramOffset]._float;
_transform._projectionJitter.y = batch._params[paramOffset+1]._float;
_transform._invalidProj = true;
}
void GLBackend::do_setViewportTransform(const Batch& batch, size_t paramOffset) {

View file

@ -25,6 +25,30 @@
#include <graphics/Geometry.h>
#include <graphics/Material.h>
#if defined(Q_OS_ANDROID)
#define HFM_PACK_NORMALS 0
#else
#define HFM_PACK_NORMALS 1
#endif
#if HFM_PACK_NORMALS
using NormalType = glm::uint32;
#define HFM_NORMAL_ELEMENT gpu::Element::VEC4F_NORMALIZED_XYZ10W2
#else
using NormalType = glm::vec3;
#define HFM_NORMAL_ELEMENT gpu::Element::VEC3F_XYZ
#endif
#define HFM_PACK_COLORS 1
#if HFM_PACK_COLORS
using ColorType = glm::uint32;
#define HFM_COLOR_ELEMENT gpu::Element::COLOR_RGBA_32
#else
using ColorType = glm::vec3;
#define HFM_COLOR_ELEMENT gpu::Element::VEC3F_XYZ
#endif
const int MAX_NUM_PIXELS_FOR_FBX_TEXTURE = 2048 * 2048;
// High Fidelity Model namespace

View file

@ -1,8 +1,6 @@
set(TARGET_NAME model-baker)
setup_hifi_library()
link_hifi_libraries(shared task)
link_hifi_libraries(shared task gpu graphics)
include_hifi_library_headers(gpu)
include_hifi_library_headers(graphics)
include_hifi_library_headers(hfm)

View file

@ -11,17 +11,89 @@
#include "Baker.h"
#include <shared/HifiTypes.h>
#include "BakerTypes.h"
#include "BuildGraphicsMeshTask.h"
namespace baker {
class GetModelPartsTask {
public:
using Input = hfm::Model::Pointer;
using Output = VaryingSet3<std::vector<hfm::Mesh>, hifi::URL, MeshIndicesToModelNames>;
using JobModel = Job::ModelIO<GetModelPartsTask, Input, Output>;
void run(const BakeContextPointer& context, const Input& input, Output& output) {
auto& hfmModelIn = input;
output.edit0() = hfmModelIn->meshes.toStdVector();
output.edit1() = hfmModelIn->originalURL;
output.edit2() = hfmModelIn->meshIndicesToModelNames;
}
};
class BuildMeshesTask {
public:
using Input = VaryingSet4<std::vector<hfm::Mesh>, std::vector<graphics::MeshPointer>, TangentsPerMesh, BlendshapesPerMesh>;
using Output = std::vector<hfm::Mesh>;
using JobModel = Job::ModelIO<BuildMeshesTask, Input, Output>;
void run(const BakeContextPointer& context, const Input& input, Output& output) {
auto& meshesIn = input.get0();
int numMeshes = (int)meshesIn.size();
auto& graphicsMeshesIn = input.get1();
auto& tangentsPerMeshIn = input.get2();
auto& blendshapesPerMeshIn = input.get3();
auto meshesOut = meshesIn;
for (int i = 0; i < numMeshes; i++) {
auto& meshOut = meshesOut[i];
meshOut._mesh = graphicsMeshesIn[i];
meshOut.tangents = QVector<glm::vec3>::fromStdVector(tangentsPerMeshIn[i]);
meshOut.blendshapes = QVector<hfm::Blendshape>::fromStdVector(blendshapesPerMeshIn[i]);
}
output = meshesOut;
}
};
class BuildModelTask {
public:
using Input = VaryingSet2<hfm::Model::Pointer, std::vector<hfm::Mesh>>;
using Output = hfm::Model::Pointer;
using JobModel = Job::ModelIO<BuildModelTask, Input, Output>;
void run(const BakeContextPointer& context, const Input& input, Output& output) {
auto hfmModelOut = input.get0();
hfmModelOut->meshes = QVector<hfm::Mesh>::fromStdVector(input.get1());
output = hfmModelOut;
}
};
class BakerEngineBuilder {
public:
using Unused = int;
using Input = hfm::Model::Pointer;
using Output = hfm::Model::Pointer;
using JobModel = Task::ModelIO<BakerEngineBuilder, Input, Output>;
void build(JobModel& model, const Varying& in, Varying& out) {
out = in;
void build(JobModel& model, const Varying& hfmModelIn, Varying& hfmModelOut) {
// Split up the inputs from hfm::Model
const auto modelPartsIn = model.addJob<GetModelPartsTask>("GetModelParts", hfmModelIn);
const auto meshesIn = modelPartsIn.getN<GetModelPartsTask::Output>(0);
const auto url = modelPartsIn.getN<GetModelPartsTask::Output>(1);
const auto meshIndicesToModelNames = modelPartsIn.getN<GetModelPartsTask::Output>(2);
// Build the graphics::MeshPointer for each hfm::Mesh
const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames).asVarying();
const auto buildGraphicsMeshOutputs = model.addJob<BuildGraphicsMeshTask>("BuildGraphicsMesh", buildGraphicsMeshInputs);
const auto graphicsMeshes = buildGraphicsMeshOutputs.getN<BuildGraphicsMeshTask::Output>(0);
// TODO: Move tangent/blendshape validation/calculation to an earlier step
const auto tangentsPerMesh = buildGraphicsMeshOutputs.getN<BuildGraphicsMeshTask::Output>(1);
const auto blendshapesPerMesh = buildGraphicsMeshOutputs.getN<BuildGraphicsMeshTask::Output>(2);
// Combine the outputs into a new hfm::Model
const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, graphicsMeshes, tangentsPerMesh, blendshapesPerMesh).asVarying();
const auto meshesOut = model.addJob<BuildMeshesTask>("BuildMeshes", buildMeshesInputs);
const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut).asVarying();
hfmModelOut = model.addJob<BuildModelTask>("BuildModel", buildModelInputs);
}
};

View file

@ -17,7 +17,6 @@
#include "Engine.h"
namespace baker {
class Baker {
public:
Baker(const hfm::Model::Pointer& hfmModel);

View file

@ -0,0 +1,25 @@
//
// BakerTypes.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2018/12/10.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_BakerTypes_h
#define hifi_BakerTypes_h
#include <hfm/HFM.h>
namespace baker {
using MeshTangents = std::vector<glm::vec3>;
using TangentsPerMesh = std::vector<std::vector<glm::vec3>>;
using Blendshapes = std::vector<hfm::Blendshape>;
using BlendshapesPerMesh = std::vector<std::vector<hfm::Blendshape>>;
using MeshIndicesToModelNames = QHash<int, QString>;
};
#endif // hifi_BakerTypes_h

View file

@ -0,0 +1,412 @@
//
// BuildGraphicsMeshTask.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2018/12/06.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "BuildGraphicsMeshTask.h"
#include <glm/gtc/packing.hpp>
#include <LogHandler.h>
#include "ModelBakerLogging.h"
using vec2h = glm::tvec2<glm::detail::hdata>;
glm::vec3 normalizeDirForPacking(const glm::vec3& dir) {
auto maxCoord = glm::max(fabsf(dir.x), glm::max(fabsf(dir.y), fabsf(dir.z)));
if (maxCoord > 1e-6f) {
return dir / maxCoord;
}
return dir;
}
void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, baker::MeshTangents& meshTangents, baker::Blendshapes& blendshapes) {
auto graphicsMesh = std::make_shared<graphics::Mesh>();
unsigned int totalSourceIndices = 0;
foreach(const HFMMeshPart& part, hfmMesh.parts) {
totalSourceIndices += (part.quadTrianglesIndices.size() + part.triangleIndices.size());
}
static int repeatMessageID = LogHandler::getInstance().newRepeatedMessageID();
if (!totalSourceIndices) {
HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask failed -- no indices");
return;
}
if (hfmMesh.vertices.size() == 0) {
HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask failed -- no vertices");
return;
}
int numVerts = hfmMesh.vertices.size();
if (!hfmMesh.normals.empty() && hfmMesh.tangents.empty()) {
// Fill with a dummy value to force tangents to be present if there are normals
meshTangents.reserve(hfmMesh.normals.size());
std::fill_n(std::back_inserter(meshTangents), hfmMesh.normals.size(), Vectors::UNIT_X);
} else {
meshTangents = hfmMesh.tangents.toStdVector();
}
// Same thing with blend shapes
blendshapes = hfmMesh.blendshapes.toStdVector();
for (auto& blendShape : blendshapes) {
if (!blendShape.normals.empty() && blendShape.tangents.empty()) {
// Fill with a dummy value to force tangents to be present if there are normals
blendShape.tangents.reserve(blendShape.normals.size());
std::fill_n(std::back_inserter(blendShape.tangents), blendShape.normals.size(), Vectors::UNIT_X);
}
}
// evaluate all attribute elements and data sizes
// Position is a vec3
const auto positionElement = gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ);
const int positionsSize = numVerts * positionElement.getSize();
// Normal and tangent are always there together packed in normalized xyz32bits word (times 2)
const auto normalElement = HFM_NORMAL_ELEMENT;
const int normalsSize = hfmMesh.normals.size() * normalElement.getSize();
const int tangentsSize = (int)meshTangents.size() * normalElement.getSize();
// If there are normals then there should be tangents
assert(normalsSize <= tangentsSize);
if (tangentsSize > normalsSize) {
HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask -- Unexpected tangents in file");
}
const auto normalsAndTangentsSize = normalsSize + tangentsSize;
// Color attrib
const auto colorElement = HFM_COLOR_ELEMENT;
const int colorsSize = hfmMesh.colors.size() * colorElement.getSize();
// Texture coordinates are stored in 2 half floats
const auto texCoordsElement = gpu::Element(gpu::VEC2, gpu::HALF, gpu::UV);
const int texCoordsSize = hfmMesh.texCoords.size() * texCoordsElement.getSize();
const int texCoords1Size = hfmMesh.texCoords1.size() * texCoordsElement.getSize();
// Support for 4 skinning clusters:
// 4 Indices are uint8 ideally, uint16 if more than 256.
const auto clusterIndiceElement = (hfmMesh.clusters.size() < UINT8_MAX ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW));
// 4 Weights are normalized 16bits
const auto clusterWeightElement = gpu::Element(gpu::VEC4, gpu::NUINT16, gpu::XYZW);
// Cluster indices and weights must be the same sizes
const int NUM_CLUSTERS_PER_VERT = 4;
const int numVertClusters = (hfmMesh.clusterIndices.size() == hfmMesh.clusterWeights.size() ? hfmMesh.clusterIndices.size() / NUM_CLUSTERS_PER_VERT : 0);
const int clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize();
const int clusterWeightsSize = numVertClusters * clusterWeightElement.getSize();
// Decide on where to put what seequencially in a big buffer:
const int positionsOffset = 0;
const int normalsAndTangentsOffset = positionsOffset + positionsSize;
const int colorsOffset = normalsAndTangentsOffset + normalsAndTangentsSize;
const int texCoordsOffset = colorsOffset + colorsSize;
const int texCoords1Offset = texCoordsOffset + texCoordsSize;
const int clusterIndicesOffset = texCoords1Offset + texCoords1Size;
const int clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize;
const int totalVertsSize = clusterWeightsOffset + clusterWeightsSize;
// Copy all vertex data in a single buffer
auto vertBuffer = std::make_shared<gpu::Buffer>();
vertBuffer->resize(totalVertsSize);
// First positions
vertBuffer->setSubData(positionsOffset, positionsSize, (const gpu::Byte*) hfmMesh.vertices.data());
// Interleave normals and tangents
if (normalsSize > 0) {
std::vector<NormalType> normalsAndTangents;
normalsAndTangents.reserve(hfmMesh.normals.size() + (int)meshTangents.size());
auto normalIt = hfmMesh.normals.constBegin();
auto tangentIt = meshTangents.cbegin();
for (;
normalIt != hfmMesh.normals.constEnd();
++normalIt, ++tangentIt) {
#if HFM_PACK_NORMALS
const auto normal = normalizeDirForPacking(*normalIt);
const auto tangent = normalizeDirForPacking(*tangentIt);
const auto packedNormal = glm::packSnorm3x10_1x2(glm::vec4(normal, 0.0f));
const auto packedTangent = glm::packSnorm3x10_1x2(glm::vec4(tangent, 0.0f));
#else
const auto packedNormal = *normalIt;
const auto packedTangent = *tangentIt;
#endif
normalsAndTangents.push_back(packedNormal);
normalsAndTangents.push_back(packedTangent);
}
vertBuffer->setSubData(normalsAndTangentsOffset, normalsAndTangentsSize, (const gpu::Byte*) normalsAndTangents.data());
}
// Pack colors
if (colorsSize > 0) {
#if HFM_PACK_COLORS
std::vector<ColorType> colors;
colors.reserve(hfmMesh.colors.size());
for (const auto& color : hfmMesh.colors) {
colors.push_back(glm::packUnorm4x8(glm::vec4(color, 1.0f)));
}
vertBuffer->setSubData(colorsOffset, colorsSize, (const gpu::Byte*) colors.data());
#else
vertBuffer->setSubData(colorsOffset, colorsSize, (const gpu::Byte*) hfmMesh.colors.constData());
#endif
}
// Pack Texcoords 0 and 1 (if exists)
if (texCoordsSize > 0) {
QVector<vec2h> texCoordData;
texCoordData.reserve(hfmMesh.texCoords.size());
for (auto& texCoordVec2f : hfmMesh.texCoords) {
vec2h texCoordVec2h;
texCoordVec2h.x = glm::detail::toFloat16(texCoordVec2f.x);
texCoordVec2h.y = glm::detail::toFloat16(texCoordVec2f.y);
texCoordData.push_back(texCoordVec2h);
}
vertBuffer->setSubData(texCoordsOffset, texCoordsSize, (const gpu::Byte*) texCoordData.constData());
}
if (texCoords1Size > 0) {
QVector<vec2h> texCoordData;
texCoordData.reserve(hfmMesh.texCoords1.size());
for (auto& texCoordVec2f : hfmMesh.texCoords1) {
vec2h texCoordVec2h;
texCoordVec2h.x = glm::detail::toFloat16(texCoordVec2f.x);
texCoordVec2h.y = glm::detail::toFloat16(texCoordVec2f.y);
texCoordData.push_back(texCoordVec2h);
}
vertBuffer->setSubData(texCoords1Offset, texCoords1Size, (const gpu::Byte*) texCoordData.constData());
}
// Clusters data
if (clusterIndicesSize > 0) {
if (hfmMesh.clusters.size() < UINT8_MAX) {
// yay! we can fit the clusterIndices within 8-bits
int32_t numIndices = hfmMesh.clusterIndices.size();
QVector<uint8_t> clusterIndices;
clusterIndices.resize(numIndices);
for (int32_t i = 0; i < numIndices; ++i) {
assert(hfmMesh.clusterIndices[i] <= UINT8_MAX);
clusterIndices[i] = (uint8_t)(hfmMesh.clusterIndices[i]);
}
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) clusterIndices.constData());
} else {
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) hfmMesh.clusterIndices.constData());
}
}
if (clusterWeightsSize > 0) {
vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) hfmMesh.clusterWeights.constData());
}
// Now we decide on how to interleave the attributes and provide the vertices among bufers:
// Aka the Vertex format and the vertexBufferStream
auto vertexFormat = std::make_shared<gpu::Stream::Format>();
auto vertexBufferStream = std::make_shared<gpu::BufferStream>();
// Decision time:
// if blendshapes then keep position and normals/tangents as separated channel buffers from interleaved attributes
// else everything is interleaved in one buffer
// Default case is no blend shapes
gpu::BufferPointer attribBuffer;
int totalAttribBufferSize = totalVertsSize;
gpu::uint8 posChannel = 0;
gpu::uint8 tangentChannel = posChannel;
gpu::uint8 attribChannel = posChannel;
bool interleavePositions = true;
bool interleaveNormalsTangents = true;
// Define the vertex format, compute the offset for each attributes as we append them to the vertex format
gpu::Offset bufOffset = 0;
if (positionsSize) {
vertexFormat->setAttribute(gpu::Stream::POSITION, posChannel, positionElement, bufOffset);
bufOffset += positionElement.getSize();
if (!interleavePositions) {
bufOffset = 0;
}
}
if (normalsSize) {
vertexFormat->setAttribute(gpu::Stream::NORMAL, tangentChannel, normalElement, bufOffset);
bufOffset += normalElement.getSize();
vertexFormat->setAttribute(gpu::Stream::TANGENT, tangentChannel, normalElement, bufOffset);
bufOffset += normalElement.getSize();
if (!interleaveNormalsTangents) {
bufOffset = 0;
}
}
// Pack normal and Tangent with the rest of atributes if no blend shapes
if (colorsSize) {
vertexFormat->setAttribute(gpu::Stream::COLOR, attribChannel, colorElement, bufOffset);
bufOffset += colorElement.getSize();
}
if (texCoordsSize) {
vertexFormat->setAttribute(gpu::Stream::TEXCOORD, attribChannel, texCoordsElement, bufOffset);
bufOffset += texCoordsElement.getSize();
}
if (texCoords1Size) {
vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, attribChannel, texCoordsElement, bufOffset);
bufOffset += texCoordsElement.getSize();
} else if (texCoordsSize) {
vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, attribChannel, texCoordsElement, bufOffset - texCoordsElement.getSize());
}
if (clusterIndicesSize) {
vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, attribChannel, clusterIndiceElement, bufOffset);
bufOffset += clusterIndiceElement.getSize();
}
if (clusterWeightsSize) {
vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, attribChannel, clusterWeightElement, bufOffset);
bufOffset += clusterWeightElement.getSize();
}
// Finally, allocate and fill the attribBuffer interleaving the attributes as needed:
{
auto vPositionOffset = 0;
auto vPositionSize = (interleavePositions ? positionsSize / numVerts : 0);
auto vNormalsAndTangentsOffset = vPositionOffset + vPositionSize;
auto vNormalsAndTangentsSize = (interleaveNormalsTangents ? normalsAndTangentsSize / numVerts : 0);
auto vColorOffset = vNormalsAndTangentsOffset + vNormalsAndTangentsSize;
auto vColorSize = colorsSize / numVerts;
auto vTexcoord0Offset = vColorOffset + vColorSize;
auto vTexcoord0Size = texCoordsSize / numVerts;
auto vTexcoord1Offset = vTexcoord0Offset + vTexcoord0Size;
auto vTexcoord1Size = texCoords1Size / numVerts;
auto vClusterIndiceOffset = vTexcoord1Offset + vTexcoord1Size;
auto vClusterIndiceSize = clusterIndicesSize / numVerts;
auto vClusterWeightOffset = vClusterIndiceOffset + vClusterIndiceSize;
auto vClusterWeightSize = clusterWeightsSize / numVerts;
auto vStride = vClusterWeightOffset + vClusterWeightSize;
std::vector<gpu::Byte> dest;
dest.resize(totalAttribBufferSize);
auto vDest = dest.data();
auto source = vertBuffer->getData();
for (int i = 0; i < numVerts; i++) {
if (vPositionSize) memcpy(vDest + vPositionOffset, source + positionsOffset + i * vPositionSize, vPositionSize);
if (vNormalsAndTangentsSize) memcpy(vDest + vNormalsAndTangentsOffset, source + normalsAndTangentsOffset + i * vNormalsAndTangentsSize, vNormalsAndTangentsSize);
if (vColorSize) memcpy(vDest + vColorOffset, source + colorsOffset + i * vColorSize, vColorSize);
if (vTexcoord0Size) memcpy(vDest + vTexcoord0Offset, source + texCoordsOffset + i * vTexcoord0Size, vTexcoord0Size);
if (vTexcoord1Size) memcpy(vDest + vTexcoord1Offset, source + texCoords1Offset + i * vTexcoord1Size, vTexcoord1Size);
if (vClusterIndiceSize) memcpy(vDest + vClusterIndiceOffset, source + clusterIndicesOffset + i * vClusterIndiceSize, vClusterIndiceSize);
if (vClusterWeightSize) memcpy(vDest + vClusterWeightOffset, source + clusterWeightsOffset + i * vClusterWeightSize, vClusterWeightSize);
vDest += vStride;
}
auto attribBuffer = std::make_shared<gpu::Buffer>();
attribBuffer->setData(totalAttribBufferSize, dest.data());
vertexBufferStream->addBuffer(attribBuffer, 0, vStride);
}
// Mesh vertex format and vertex stream is ready
graphicsMesh->setVertexFormatAndStream(vertexFormat, vertexBufferStream);
// Index and Part Buffers
unsigned int totalIndices = 0;
foreach(const HFMMeshPart& part, hfmMesh.parts) {
totalIndices += (part.quadTrianglesIndices.size() + part.triangleIndices.size());
}
if (!totalIndices) {
HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask failed -- no indices");
return;
}
auto indexBuffer = std::make_shared<gpu::Buffer>();
indexBuffer->resize(totalIndices * sizeof(int));
int indexNum = 0;
int offset = 0;
std::vector< graphics::Mesh::Part > parts;
if (hfmMesh.parts.size() > 1) {
indexNum = 0;
}
foreach(const HFMMeshPart& part, hfmMesh.parts) {
graphics::Mesh::Part modelPart(indexNum, 0, 0, graphics::Mesh::TRIANGLES);
if (part.quadTrianglesIndices.size()) {
indexBuffer->setSubData(offset,
part.quadTrianglesIndices.size() * sizeof(int),
(gpu::Byte*) part.quadTrianglesIndices.constData());
offset += part.quadTrianglesIndices.size() * sizeof(int);
indexNum += part.quadTrianglesIndices.size();
modelPart._numIndices += part.quadTrianglesIndices.size();
}
if (part.triangleIndices.size()) {
indexBuffer->setSubData(offset,
part.triangleIndices.size() * sizeof(int),
(gpu::Byte*) part.triangleIndices.constData());
offset += part.triangleIndices.size() * sizeof(int);
indexNum += part.triangleIndices.size();
modelPart._numIndices += part.triangleIndices.size();
}
parts.push_back(modelPart);
}
gpu::BufferView indexBufferView(indexBuffer, gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::XYZ));
graphicsMesh->setIndexBuffer(indexBufferView);
if (parts.size()) {
auto pb = std::make_shared<gpu::Buffer>();
pb->setData(parts.size() * sizeof(graphics::Mesh::Part), (const gpu::Byte*) parts.data());
gpu::BufferView pbv(pb, gpu::Element(gpu::VEC4, gpu::UINT32, gpu::XYZW));
graphicsMesh->setPartBuffer(pbv);
} else {
HIFI_FCDEBUG_ID(model_baker(), repeatMessageID, "BuildGraphicsMeshTask failed -- no parts");
return;
}
graphicsMesh->evalPartBound(0);
graphicsMeshPointer = graphicsMesh;
}
void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const Input& input, Output& output) {
auto& meshes = input.get0();
auto& url = input.get1();
auto& meshIndicesToModelNames = input.get2();
auto& graphicsMeshes = output.edit0();
auto& tangentsPerMesh = output.edit1();
auto& blendshapesPerMesh = output.edit2();
int n = (int)meshes.size();
for (int i = 0; i < n; i++) {
graphicsMeshes.emplace_back();
auto& graphicsMesh = graphicsMeshes[i];
tangentsPerMesh.emplace_back();
blendshapesPerMesh.emplace_back();
// Try to create the graphics::Mesh
buildGraphicsMesh(meshes[i], graphicsMesh, tangentsPerMesh[i], blendshapesPerMesh[i]);
// Choose a name for the mesh
if (graphicsMesh) {
graphicsMesh->displayName = url.toString().toStdString() + "#/mesh/" + std::to_string(i);
if (meshIndicesToModelNames.find(i) != meshIndicesToModelNames.cend()) {
graphicsMesh->modelName = meshIndicesToModelNames[i].toStdString();
}
}
}
}

View file

@ -0,0 +1,30 @@
//
// BuildGraphicsMeshTask.h
// model-baker/src/model-baker
//
// Created by Sabrina Shanman on 2018/12/06.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_BuildGraphicsMeshTask_h
#define hifi_BuildGraphicsMeshTask_h
#include <hfm/HFM.h>
#include <shared/HifiTypes.h>
#include "Engine.h"
#include "BakerTypes.h"
class BuildGraphicsMeshTask {
public:
using Input = baker::VaryingSet3<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames>;
using Output = baker::VaryingSet3<std::vector<graphics::MeshPointer>, std::vector<baker::MeshTangents>, std::vector<baker::Blendshapes>>;
using JobModel = baker::Job::ModelIO<BuildGraphicsMeshTask, Input, Output>;
void run(const baker::BakeContextPointer& context, const Input& input, Output& output);
};
#endif // hifi_BuildGraphicsMeshTask_h

View file

@ -0,0 +1,14 @@
//
// ModelBakerLogging.cpp
// libraries/baker/src/baker
//
// Created by Sabrina Shanman on 2018/12/12.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "ModelBakerLogging.h"
Q_LOGGING_CATEGORY(model_baker, "hifi.model_baker")

View file

@ -0,0 +1,19 @@
//
// ModelBakerLogging.h
// libraries/baker/src/baker
//
// Created by Sabrina Shanman on 2018/12/06.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_ModelBakerLogging_h
#define hifi_ModelBakerLogging_h
#include <QLoggingCategory>
Q_DECLARE_LOGGING_CATEGORY(model_baker)
#endif // hifi_ModelBakerLogging_h

View file

@ -303,8 +303,6 @@ AmbientOcclusionEffect::AmbientOcclusionEffect() {
}
void AmbientOcclusionEffect::configure(const Config& config) {
DependencyManager::get<DeferredLightingEffect>()->setAmbientOcclusionEnabled(config.isEnabled());
bool shouldUpdateBlurs = false;
bool shouldUpdateTechnique = false;
@ -591,14 +589,21 @@ void AmbientOcclusionEffect::updateJitterSamples() {
}
}
void AmbientOcclusionEffect::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs) {
void AmbientOcclusionEffect::run(const render::RenderContextPointer& renderContext, const Input& input, Output& output) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
RenderArgs* args = renderContext->args;
const auto& frameTransform = inputs.get0();
const auto& linearDepthFramebuffer = inputs.get2();
const auto& lightingModel = input.get0();
if (!lightingModel->isAmbientOcclusionEnabled()) {
output.edit0().reset();
return;
}
const auto& frameTransform = input.get1();
const auto& linearDepthFramebuffer = input.get3();
const int resolutionLevel = _aoParametersBuffer->getResolutionLevel();
const auto depthResolutionLevel = getDepthResolutionLevel();
@ -631,8 +636,8 @@ void AmbientOcclusionEffect::run(const render::RenderContextPointer& renderConte
auto occlusionFBO = _framebuffer->getOcclusionFramebuffer();
auto occlusionBlurredFBO = _framebuffer->getOcclusionBlurredFramebuffer();
outputs.edit0() = _framebuffer;
outputs.edit1() = _aoParametersBuffer;
output.edit0() = _framebuffer;
output.edit1() = _aoParametersBuffer;
auto occlusionPipeline = getOcclusionPipeline();
auto bilateralBlurPipeline = getBilateralBlurPipeline();

View file

@ -17,6 +17,7 @@
#include "render/DrawTask.h"
#include "LightingModel.h"
#include "DeferredFrameTransform.h"
#include "DeferredFramebuffer.h"
#include "SurfaceGeometryPass.h"
@ -152,15 +153,15 @@ signals:
class AmbientOcclusionEffect {
public:
using Inputs = render::VaryingSet3<DeferredFrameTransformPointer, DeferredFramebufferPointer, LinearDepthFramebufferPointer>;
using Outputs = render::VaryingSet2<AmbientOcclusionFramebufferPointer, gpu::BufferView>;
using Input = render::VaryingSet4<LightingModelPointer, DeferredFrameTransformPointer, DeferredFramebufferPointer, LinearDepthFramebufferPointer>;
using Output = render::VaryingSet2<AmbientOcclusionFramebufferPointer, gpu::BufferView>;
using Config = AmbientOcclusionEffectConfig;
using JobModel = render::Job::ModelIO<AmbientOcclusionEffect, Inputs, Outputs, Config>;
using JobModel = render::Job::ModelIO<AmbientOcclusionEffect, Input, Output, Config>;
AmbientOcclusionEffect();
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& outputs);
void run(const render::RenderContextPointer& renderContext, const Input& input, Output& output);
// Class describing the uniform buffer with all the parameters common to the AO shaders
class AOParameters : public AmbientOcclusionParams {

View file

@ -0,0 +1,50 @@
//
// Created by Samuel Gateau on 2018/12/06
// Copyright 2013-2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "AssembleLightingStageTask.h"
#include <render/DrawTask.h>
void FetchCurrentFrames::run(const render::RenderContextPointer& renderContext, Output& output) {
auto lightStage = renderContext->_scene->getStage<LightStage>();
assert(lightStage);
output.edit0() = std::make_shared<LightStage::Frame>(lightStage->_currentFrame);
auto backgroundStage = renderContext->_scene->getStage<BackgroundStage>();
assert(backgroundStage);
output.edit1() = std::make_shared<BackgroundStage::Frame>(backgroundStage->_currentFrame);
auto hazeStage = renderContext->_scene->getStage<HazeStage>();
assert(hazeStage);
output.edit2() = std::make_shared<HazeStage::Frame>(hazeStage->_currentFrame);
auto bloomStage = renderContext->_scene->getStage<BloomStage>();
assert(bloomStage);
output.edit3() = std::make_shared<BloomStage::Frame>(bloomStage->_currentFrame);
}
void AssembleLightingStageTask::build(JobModel& task, const render::Varying& input, render::Varying& output) {
const auto& fetchCullSortOut = input.get<Input>();
const auto& items = fetchCullSortOut.get0();
//const auto& items = input.get<Input>();
const auto& lights = items[RenderFetchCullSortTask::LIGHT];
const auto& metas = items[RenderFetchCullSortTask::META];
// Clear Light, Haze, Bloom, and Skybox Stages and render zones from the general metas bucket
const auto zones = task.addJob<ZoneRendererTask>("ZoneRenderer", metas);
// Draw Lights just add the lights to the current list of lights to deal with. NOt really gpu job for now.
task.addJob<render::DrawLight>("DrawLight", lights);
// Fetch the current frame stacks from all the stages
const auto currentStageFrames = task.addJob<FetchCurrentFrames>("FetchCurrentFrames");
output = Output(currentStageFrames, zones);
}

View file

@ -0,0 +1,44 @@
//
// Created by Samuel Gateau on 2018/12/06
// Copyright 2013-2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AssembleLightingStageTask_h
#define hifi_AssembleLightingStageTask_h
#include <render/RenderFetchCullSortTask.h>
#include "LightingModel.h"
#include "LightStage.h"
#include "BackgroundStage.h"
#include "HazeStage.h"
#include "BloomStage.h"
#include "ZoneRenderer.h"
class FetchCurrentFrames {
public:
using Output = render::VaryingSet4<LightStage::FramePointer, BackgroundStage::FramePointer, HazeStage::FramePointer, BloomStage::FramePointer>;
using JobModel = render::Job::ModelO<FetchCurrentFrames, Output>;
FetchCurrentFrames() {}
void run(const render::RenderContextPointer& renderContext, Output& output);
};
class AssembleLightingStageTask {
public:
using Input = RenderFetchCullSortTask::Output;
using Output = render::VaryingSet2<FetchCurrentFrames::Output, ZoneRendererTask::Output>;
using JobModel = render::Task::ModelIO<AssembleLightingStageTask, Input, Output, render::Task::Config>;
AssembleLightingStageTask() {}
void build(JobModel& task, const render::Varying& input, render::Varying& output);
};
#endif

View file

@ -403,7 +403,7 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
auto& ambientOcclusionFramebuffer = inputs.get3();
auto& velocityFramebuffer = inputs.get4();
auto& frameTransform = inputs.get5();
auto& lightFrame = inputs.get6();
auto& shadowFrame = inputs.get6();
gpu::doInBatch("DebugDeferredBuffer::run", args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
@ -439,16 +439,14 @@ void DebugDeferredBuffer::run(const RenderContextPointer& renderContext, const I
batch.setResourceTexture(Textures::DebugTexture0, velocityFramebuffer->getVelocityTexture());
}
auto lightStage = renderContext->_scene->getStage<LightStage>();
assert(lightStage);
assert(lightStage->getNumLights() > 0);
auto lightAndShadow = lightStage->getCurrentKeyLightAndShadow(*lightFrame);
const auto& globalShadow = lightAndShadow.second;
if (globalShadow) {
batch.setResourceTexture(Textures::Shadow, globalShadow->map);
batch.setUniformBuffer(UBOs::ShadowParams, globalShadow->getBuffer());
batch.setUniformBuffer(UBOs::DeferredFrameTransform, frameTransform->getFrameTransformBuffer());
batch.setUniformBuffer(UBOs::DebugDeferredParams, _parameters);
if (!shadowFrame->_objects.empty()) {
const auto& globalShadow = shadowFrame->_objects[0];
if (globalShadow) {
batch.setResourceTexture(Textures::Shadow, globalShadow->map);
batch.setUniformBuffer(UBOs::ShadowParams, globalShadow->getBuffer());
batch.setUniformBuffer(UBOs::DeferredFrameTransform, frameTransform->getFrameTransformBuffer());
batch.setUniformBuffer(UBOs::DebugDeferredParams, _parameters);
}
}
if (linearDepthTarget) {

View file

@ -46,7 +46,7 @@ public:
AmbientOcclusionFramebufferPointer,
VelocityFramebufferPointer,
DeferredFrameTransformPointer,
LightStage::FramePointer>;
LightStage::ShadowFramePointer>;
using Config = DebugDeferredBufferConfig;
using JobModel = render::Job::ModelI<DebugDeferredBuffer, Inputs, Config>;

View file

@ -374,11 +374,11 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext,
const DeferredFramebufferPointer& deferredFramebuffer,
const LightingModelPointer& lightingModel,
const LightStage::FramePointer& lightFrame,
const LightStage::ShadowFramePointer& shadowFrame,
const HazeStage::FramePointer& hazeFrame,
const SurfaceGeometryFramebufferPointer& surfaceGeometryFramebuffer,
const AmbientOcclusionFramebufferPointer& ambientOcclusionFramebuffer,
const SubsurfaceScatteringResourcePointer& subsurfaceScatteringResource,
bool renderShadows) {
const SubsurfaceScatteringResourcePointer& subsurfaceScatteringResource) {
auto args = renderContext->args;
auto& batch = (*args->_batch);
@ -404,7 +404,7 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext,
batch.setResourceTexture(ru::Texture::DeferredDepth, deferredFramebuffer->getPrimaryDepthTexture());
// FIXME: Different render modes should have different tasks
if (args->_renderMode == RenderArgs::DEFAULT_RENDER_MODE && deferredLightingEffect->isAmbientOcclusionEnabled() && ambientOcclusionFramebuffer) {
if (lightingModel->isAmbientOcclusionEnabled() && ambientOcclusionFramebuffer) {
batch.setResourceTexture(ru::Texture::DeferredObscurance, ambientOcclusionFramebuffer->getOcclusionTexture());
} else {
// need to assign the white texture if ao is off
@ -429,24 +429,23 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext,
batch.setResourceTexture(ru::Texture::SsscSpecularBeckmann, subsurfaceScatteringResource->getScatteringSpecular());
}
// Global directional light and ambient pass
// Global directional light, maybe shadow and ambient pass
auto lightStage = renderContext->_scene->getStage<LightStage>();
assert(lightStage);
assert(lightStage->getNumLights() > 0);
auto lightAndShadow = lightStage->getCurrentKeyLightAndShadow(*lightFrame);
const auto& globalShadow = lightAndShadow.second;
auto keyLight = lightStage->getCurrentKeyLight(*lightFrame);
// Bind the shadow buffers
if (globalShadow) {
batch.setResourceTexture(ru::Texture::Shadow, globalShadow->map);
// Check if keylight casts shadows
bool keyLightCastShadows{ false };
LightStage::ShadowPointer globalShadow;
if (lightingModel->isShadowEnabled() && shadowFrame && !shadowFrame->_objects.empty()) {
globalShadow = shadowFrame->_objects.front();
if (globalShadow) {
keyLightCastShadows = true;
}
}
auto program = deferredLightingEffect->_directionalSkyboxLight;
LightLocationsPtr locations = deferredLightingEffect->_directionalSkyboxLightLocations;
auto keyLight = lightAndShadow.first;
// Global Ambient light
graphics::LightPointer ambientLight;
if (lightStage && lightFrame->_ambientLights.size()) {
ambientLight = lightStage->getLight(lightFrame->_ambientLights.front());
@ -454,18 +453,10 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext,
bool hasAmbientMap = (ambientLight != nullptr);
// Setup the global directional pass pipeline
auto program = deferredLightingEffect->_directionalSkyboxLight;
LightLocationsPtr locations = deferredLightingEffect->_directionalSkyboxLightLocations;
{
// Check if keylight casts shadows
bool keyLightCastShadows { false };
if (renderShadows && lightStage && lightFrame->_sunLights.size()) {
graphics::LightPointer keyLight = lightStage->getLight(lightFrame->_sunLights.front());
if (keyLight) {
keyLightCastShadows = keyLight->getCastShadows();
}
}
if (deferredLightingEffect->_shadowMapEnabled && keyLightCastShadows) {
if (keyLightCastShadows) {
// If the keylight has an ambient Map then use the Skybox version of the pass
// otherwise use the ambient sphere version
@ -488,7 +479,8 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext,
}
}
if (locations->shadowTransform && globalShadow) {
if (keyLightCastShadows && globalShadow) {
batch.setResourceTexture(ru::Texture::Shadow, globalShadow->map);
batch.setUniformBuffer(ru::Buffer::ShadowParams, globalShadow->getBuffer());
}
@ -510,10 +502,7 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext,
batch.draw(gpu::TRIANGLE_STRIP, 4);
deferredLightingEffect->unsetKeyLightBatch(batch);
for (auto i = 0; i < SHADOW_CASCADE_MAX_COUNT; i++) {
batch.setResourceTexture(ru::Texture::Shadow +i, nullptr);
}
batch.setResourceTexture(ru::Texture::Shadow, nullptr);
}
}
@ -606,9 +595,7 @@ void RenderDeferredCleanup::run(const render::RenderContextPointer& renderContex
}
}
RenderDeferred::RenderDeferred(bool renderShadows):
_renderShadows(renderShadows)
{
RenderDeferred::RenderDeferred() {
DependencyManager::get<DeferredLightingEffect>()->init();
}
@ -616,18 +603,21 @@ void RenderDeferred::configure(const Config& config) {
}
void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs& inputs) {
auto args = renderContext->args;
auto deferredTransform = inputs.get0();
auto deferredFramebuffer = inputs.get1();
auto lightingModel = inputs.get2();
auto surfaceGeometryFramebuffer = inputs.get3();
auto ssaoFramebuffer = inputs.get4();
auto subsurfaceScatteringResource = inputs.get5();
auto lightClusters = inputs.get6();
auto args = renderContext->args;
auto extraRenderBuffers = inputs.get2();
auto surfaceGeometryFramebuffer = extraRenderBuffers.get0();
auto ssaoFramebuffer = extraRenderBuffers.get1();
auto subsurfaceScatteringResource = extraRenderBuffers.get2();
const auto& lightFrame = inputs.get7();
const auto& hazeFrame = inputs.get8();
auto lightingModel = inputs.get3();
auto lightClusters = inputs.get4();
const auto& lightFrame = inputs.get5();
const auto& shadowFrame = inputs.get6();
const auto& hazeFrame = inputs.get7();
if (!_gpuTimer) {
_gpuTimer = std::make_shared < gpu::RangeTimer>(__FUNCTION__);
@ -638,7 +628,7 @@ void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs
args->_batch = &batch;
_gpuTimer->begin(batch);
setupJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, lightFrame, hazeFrame, surfaceGeometryFramebuffer, ssaoFramebuffer, subsurfaceScatteringResource, _renderShadows);
setupJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, lightFrame, shadowFrame, hazeFrame, surfaceGeometryFramebuffer, ssaoFramebuffer, subsurfaceScatteringResource);
lightsJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, lightClusters);
@ -696,7 +686,6 @@ void DefaultLightingSetup::run(const RenderContextPointer& renderContext) {
// Add the global light to the light stage (for later shadow rendering)
// Set this light to be the default
_defaultLightID = lightStage->addLight(lp, true);
lightStage->addShadow(_defaultLightID);
}
auto backgroundStage = renderContext->_scene->getStage<BackgroundStage>();

View file

@ -55,16 +55,9 @@ public:
static void setupLocalLightsBatch(gpu::Batch& batch, const LightClustersPointer& lightClusters);
static void unsetLocalLightsBatch(gpu::Batch& batch);
void setShadowMapEnabled(bool enable) { _shadowMapEnabled = enable; };
void setAmbientOcclusionEnabled(bool enable) { _ambientOcclusionEnabled = enable; }
bool isAmbientOcclusionEnabled() const { return _ambientOcclusionEnabled; }
private:
DeferredLightingEffect() = default;
bool _shadowMapEnabled{ true }; // note that this value is overwritten in the ::configure method
bool _ambientOcclusionEnabled{ false };
graphics::MeshPointer _pointLightMesh;
graphics::MeshPointer getPointLightMesh();
graphics::MeshPointer _spotLightMesh;
@ -146,11 +139,11 @@ public:
const DeferredFramebufferPointer& deferredFramebuffer,
const LightingModelPointer& lightingModel,
const LightStage::FramePointer& lightFrame,
const LightStage::ShadowFramePointer& shadowFrame,
const HazeStage::FramePointer& hazeFrame,
const SurfaceGeometryFramebufferPointer& surfaceGeometryFramebuffer,
const AmbientOcclusionFramebufferPointer& ambientOcclusionFramebuffer,
const SubsurfaceScatteringResourcePointer& subsurfaceScatteringResource,
bool renderShadows);
const SubsurfaceScatteringResourcePointer& subsurfaceScatteringResource);
};
class RenderDeferredLocals {
@ -167,7 +160,6 @@ public:
gpu::BufferView _localLightsBuffer;
RenderDeferredLocals();
};
@ -182,14 +174,14 @@ using RenderDeferredConfig = render::GPUJobConfig;
class RenderDeferred {
public:
using Inputs = render::VaryingSet9<
DeferredFrameTransformPointer, DeferredFramebufferPointer, LightingModelPointer, SurfaceGeometryFramebufferPointer,
AmbientOcclusionFramebufferPointer, SubsurfaceScatteringResourcePointer, LightClustersPointer, LightStage::FramePointer, HazeStage::FramePointer>;
using ExtraDeferredBuffer = render::VaryingSet3<SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer, SubsurfaceScatteringResourcePointer>;
using Inputs = render::VaryingSet8<
DeferredFrameTransformPointer, DeferredFramebufferPointer, ExtraDeferredBuffer, LightingModelPointer, LightClustersPointer, LightStage::FramePointer, LightStage::ShadowFramePointer, HazeStage::FramePointer>;
using Config = RenderDeferredConfig;
using JobModel = render::Job::ModelI<RenderDeferred, Inputs, Config>;
RenderDeferred(bool renderShadows = false);
RenderDeferred();
void configure(const Config& config);
@ -203,7 +195,6 @@ protected:
gpu::RangeTimerPointer _gpuTimer;
private:
bool _renderShadows { false };
};
class DefaultLightingSetup {

View file

@ -543,7 +543,7 @@ void LightClusteringPass::configure(const Config& config) {
_freeze = config.freeze;
}
void LightClusteringPass::run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& output) {
void LightClusteringPass::run(const render::RenderContextPointer& renderContext, const Input& inputs, Output& output) {
auto args = renderContext->args;
auto deferredTransform = inputs.get0();
@ -638,10 +638,9 @@ void DebugLightClusters::run(const render::RenderContextPointer& renderContext,
}
auto deferredTransform = inputs.get0();
auto deferredFramebuffer = inputs.get1();
auto lightingModel = inputs.get2();
auto linearDepthTarget = inputs.get3();
auto lightClusters = inputs.get4();
auto lightingModel = inputs.get1();
auto linearDepthTarget = inputs.get2();
auto lightClusters = inputs.get3();
auto args = renderContext->args;

View file

@ -167,16 +167,16 @@ protected:
class LightClusteringPass {
public:
using Inputs = render::VaryingSet4<DeferredFrameTransformPointer, LightingModelPointer, LightStage::FramePointer, LinearDepthFramebufferPointer>;
using Outputs = LightClustersPointer;
using Input = render::VaryingSet4<DeferredFrameTransformPointer, LightingModelPointer, LightStage::FramePointer, LinearDepthFramebufferPointer>;
using Output = LightClustersPointer;
using Config = LightClusteringPassConfig;
using JobModel = render::Job::ModelIO<LightClusteringPass, Inputs, Outputs, Config>;
using JobModel = render::Job::ModelIO<LightClusteringPass, Input, Output, Config>;
LightClusteringPass();
void configure(const Config& config);
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& output);
void run(const render::RenderContextPointer& renderContext, const Input& input, Output& output);
protected:
LightClustersPointer _lightClusters;
@ -213,7 +213,7 @@ protected:
class DebugLightClusters {
public:
using Inputs = render::VaryingSet5 < DeferredFrameTransformPointer, DeferredFramebufferPointer, LightingModelPointer, LinearDepthFramebufferPointer, LightClustersPointer>;
using Inputs = render::VaryingSet4 < DeferredFrameTransformPointer, LightingModelPointer, LinearDepthFramebufferPointer, LightClustersPointer>;
using Config = DebugLightClustersConfig;
using JobModel = render::Job::ModelI<DebugLightClusters, Inputs, Config>;

View file

@ -152,6 +152,11 @@ LightStage::Shadow::Shadow(graphics::LightPointer light, float maxDistance, unsi
setMaxDistance(maxDistance);
}
void LightStage::Shadow::setLight(graphics::LightPointer light) {
_light = light;
}
void LightStage::Shadow::setMaxDistance(float value) {
// This overlaping factor isn't really used directly for blending of shadow cascades. It
// just there to be sure the cascades do overlap. The blending width used is relative
@ -345,27 +350,9 @@ LightStage::Index LightStage::addLight(const LightPointer& light, const bool sho
return lightId;
}
LightStage::Index LightStage::addShadow(Index lightIndex, float maxDistance, unsigned int cascadeCount) {
auto light = getLight(lightIndex);
Index shadowId = INVALID_INDEX;
if (light) {
assert(_descs[lightIndex].shadowId == INVALID_INDEX);
shadowId = _shadows.newElement(std::make_shared<Shadow>(light, maxDistance, cascadeCount));
_descs[lightIndex].shadowId = shadowId;
}
return shadowId;
}
LightStage::LightPointer LightStage::removeLight(Index index) {
LightPointer removedLight = _lights.freeElement(index);
if (removedLight) {
auto shadowId = _descs[index].shadowId;
// Remove shadow if one exists for this light
if (shadowId != INVALID_INDEX) {
auto removedShadow = _shadows.freeElement(shadowId);
assert(removedShadow);
assert(removedShadow->getLight() == removedLight);
}
_lightMap.erase(removedLight);
_descs[index] = Desc();
}
@ -389,35 +376,6 @@ LightStage::LightPointer LightStage::getCurrentAmbientLight(const LightStage::Fr
return _lights.get(keyLightId);
}
LightStage::ShadowPointer LightStage::getCurrentKeyShadow(const LightStage::Frame& frame) const {
Index keyLightId { _defaultLightId };
if (!frame._sunLights.empty()) {
keyLightId = frame._sunLights.front();
}
auto shadow = getShadow(keyLightId);
assert(shadow == nullptr || shadow->getLight() == getLight(keyLightId));
return shadow;
}
LightStage::LightAndShadow LightStage::getCurrentKeyLightAndShadow(const LightStage::Frame& frame) const {
Index keyLightId { _defaultLightId };
if (!frame._sunLights.empty()) {
keyLightId = frame._sunLights.front();
}
auto shadow = getShadow(keyLightId);
auto light = getLight(keyLightId);
assert(shadow == nullptr || shadow->getLight() == light);
return LightAndShadow(light, shadow);
}
LightStage::Index LightStage::getShadowId(Index lightId) const {
if (checkLightId(lightId)) {
return _descs[lightId].shadowId;
} else {
return INVALID_INDEX;
}
}
void LightStage::updateLightArrayBuffer(Index lightId) {
auto lightSize = sizeof(graphics::Light::LightSchema);
if (!_lightArrayBuffer) {

View file

@ -76,6 +76,8 @@ public:
Shadow(graphics::LightPointer light, float maxDistance, unsigned int cascadeCount = 1);
void setLight(graphics::LightPointer light);
void setKeylightFrustum(const ViewFrustum& viewFrustum,
float nearDepth = 1.0f, float farDepth = 1000.0f);
void setKeylightCascadeFrustum(unsigned int cascadeIndex, const ViewFrustum& viewFrustum,
@ -93,10 +95,15 @@ public:
const graphics::LightPointer& getLight() const { return _light; }
gpu::TexturePointer map;
#include "Shadows_shared.slh"
class Schema : public ShadowParameters {
public:
Schema();
};
protected:
#include "Shadows_shared.slh"
using Cascades = std::vector<Cascade>;
@ -106,25 +113,17 @@ public:
float _maxDistance;
Cascades _cascades;
class Schema : public ShadowParameters {
public:
Schema();
};
UniformBufferView _schemaBuffer = nullptr;
};
using ShadowPointer = std::shared_ptr<Shadow>;
using Shadows = render::indexed_container::IndexedPointerVector<Shadow>;
Index findLight(const LightPointer& light) const;
Index addLight(const LightPointer& light, const bool shouldSetAsDefault = false);
Index getDefaultLight() { return _defaultLightId; }
Index addShadow(Index lightIndex, float maxDistance = 20.0f, unsigned int cascadeCount = 1U);
LightPointer removeLight(Index index);
bool checkLightId(Index index) const { return _lights.checkIndex(index); }
@ -133,23 +132,7 @@ public:
Index getNumFreeLights() const { return _lights.getNumFreeIndices(); }
Index getNumAllocatedLights() const { return _lights.getNumAllocatedIndices(); }
LightPointer getLight(Index lightId) const {
return _lights.get(lightId);
}
Index getShadowId(Index lightId) const;
ShadowPointer getShadow(Index lightId) const {
return _shadows.get(getShadowId(lightId));
}
using LightAndShadow = std::pair<LightPointer, ShadowPointer>;
LightAndShadow getLightAndShadow(Index lightId) const {
auto light = getLight(lightId);
auto shadow = getShadow(lightId);
assert(shadow == nullptr || shadow->getLight() == light);
return LightAndShadow(light, shadow);
}
LightPointer getLight(Index lightId) const { return _lights.get(lightId); }
LightStage();
@ -182,6 +165,24 @@ public:
};
using FramePointer = std::shared_ptr<Frame>;
class ShadowFrame {
public:
ShadowFrame() {}
void clear() {}
using Object = ShadowPointer;
using Objects = std::vector<Object>;
void pushShadow(const ShadowPointer& shadow) {
_objects.emplace_back(shadow);
}
Objects _objects;
};
using ShadowFramePointer = std::shared_ptr<ShadowFrame>;
Frame _currentFrame;
Index getAmbientOffLight() { return _ambientOffLightId; }
@ -191,8 +192,6 @@ public:
LightPointer getCurrentKeyLight(const LightStage::Frame& frame) const;
LightPointer getCurrentAmbientLight(const LightStage::Frame& frame) const;
ShadowPointer getCurrentKeyShadow(const LightStage::Frame& frame) const;
LightAndShadow getCurrentKeyLightAndShadow(const LightStage::Frame& frame) const;
protected:
@ -204,7 +203,6 @@ protected:
gpu::BufferPointer _lightArrayBuffer;
Lights _lights;
Shadows _shadows;
Descs _descs;
LightMap _lightMap;

View file

@ -187,6 +187,24 @@ bool LightingModel::isBlendshapeEnabled() const {
return (bool)_parametersBuffer.get<Parameters>().enableBlendshape;
}
void LightingModel::setAmbientOcclusion(bool enable) {
if (enable != isAmbientOcclusionEnabled()) {
_parametersBuffer.edit<Parameters>().enableAmbientOcclusion = (float)enable;
}
}
bool LightingModel::isAmbientOcclusionEnabled() const {
return (bool)_parametersBuffer.get<Parameters>().enableAmbientOcclusion;
}
void LightingModel::setShadow(bool enable) {
if (enable != isShadowEnabled()) {
_parametersBuffer.edit<Parameters>().enableShadow = (float)enable;
}
}
bool LightingModel::isShadowEnabled() const {
return (bool)_parametersBuffer.get<Parameters>().enableShadow;
}
MakeLightingModel::MakeLightingModel() {
_lightingModel = std::make_shared<LightingModel>();
}
@ -218,6 +236,9 @@ void MakeLightingModel::configure(const Config& config) {
_lightingModel->setSkinning(config.enableSkinning);
_lightingModel->setBlendshape(config.enableBlendshape);
_lightingModel->setAmbientOcclusion(config.enableAmbientOcclusion);
_lightingModel->setShadow(config.enableShadow);
}
void MakeLightingModel::run(const render::RenderContextPointer& renderContext, LightingModelPointer& lightingModel) {

View file

@ -76,6 +76,12 @@ public:
void setBlendshape(bool enable);
bool isBlendshapeEnabled() const;
void setAmbientOcclusion(bool enable);
bool isAmbientOcclusionEnabled() const;
void setShadow(bool enable);
bool isShadowEnabled() const;
UniformBufferView getParametersBuffer() const { return _parametersBuffer; }
protected:
@ -112,6 +118,11 @@ protected:
float enableSkinning{ 1.0f };
float enableBlendshape{ 1.0f };
float enableAmbientOcclusion{ 0.0f };
float enableShadow{ 1.0f };
float spare1{ 1.0f };
float spare2{ 1.0f };
Parameters() {}
};
UniformBufferView _parametersBuffer;
@ -152,6 +163,10 @@ class MakeLightingModelConfig : public render::Job::Config {
Q_PROPERTY(bool enableSkinning MEMBER enableSkinning NOTIFY dirty)
Q_PROPERTY(bool enableBlendshape MEMBER enableBlendshape NOTIFY dirty)
Q_PROPERTY(bool enableAmbientOcclusion READ isAmbientOcclusionEnabled WRITE setAmbientOcclusion NOTIFY dirty)
Q_PROPERTY(bool enableShadow READ isShadowEnabled WRITE setShadow NOTIFY dirty)
public:
MakeLightingModelConfig() : render::Job::Config() {} // Make Lighting Model is always on
@ -181,6 +196,17 @@ public:
bool enableSkinning{ true };
bool enableBlendshape{ true };
bool enableAmbientOcclusion{ true };
bool enableShadow{ true };
void setAmbientOcclusion(bool enable) { enableAmbientOcclusion = enable; emit dirty();}
bool isAmbientOcclusionEnabled() const { return enableAmbientOcclusion; }
void setShadow(bool enable) {
enableShadow = enable; emit dirty();
}
bool isShadowEnabled() const { return enableShadow; }
signals:
void dirty();
};

View file

@ -203,7 +203,7 @@ void ExtractFrustums::run(const render::RenderContextPointer& renderContext, con
RenderArgs* args = renderContext->args;
const auto& lightFrame = inputs;
const auto& shadowFrame = inputs;
// Return view frustum
auto& viewFrustum = output[VIEW_FRUSTUM].edit<ViewFrustumPointer>();
@ -214,38 +214,18 @@ void ExtractFrustums::run(const render::RenderContextPointer& renderContext, con
}
// Return shadow frustum
auto lightStage = args->_scene->getStage<LightStage>(LightStage::getName());
LightStage::ShadowPointer globalShadow;
if (shadowFrame && !shadowFrame->_objects.empty() && shadowFrame->_objects[0]) {
globalShadow = shadowFrame->_objects[0];
}
for (auto i = 0; i < SHADOW_CASCADE_FRUSTUM_COUNT; i++) {
auto& shadowFrustum = output[SHADOW_CASCADE0_FRUSTUM+i].edit<ViewFrustumPointer>();
if (lightStage) {
auto globalShadow = lightStage->getCurrentKeyShadow(*lightFrame);
if (globalShadow && i<(int)globalShadow->getCascadeCount()) {
auto& cascade = globalShadow->getCascade(i);
shadowFrustum = cascade.getFrustum();
} else {
shadowFrustum.reset();
}
if (globalShadow && i<(int)globalShadow->getCascadeCount()) {
auto& cascade = globalShadow->getCascade(i);
shadowFrustum = cascade.getFrustum();
} else {
shadowFrustum.reset();
}
}
}
void FetchCurrentFrames::run(const render::RenderContextPointer& renderContext, Outputs& outputs) {
auto lightStage = renderContext->_scene->getStage<LightStage>();
assert(lightStage);
outputs.edit0() = std::make_shared<LightStage::Frame>(lightStage->_currentFrame);
auto backgroundStage = renderContext->_scene->getStage<BackgroundStage>();
assert(backgroundStage);
outputs.edit1() = std::make_shared<BackgroundStage::Frame>(backgroundStage->_currentFrame);
auto hazeStage = renderContext->_scene->getStage<HazeStage>();
assert(hazeStage);
outputs.edit2() = std::make_shared<HazeStage::Frame>(hazeStage->_currentFrame);
auto bloomStage = renderContext->_scene->getStage<BloomStage>();
assert(bloomStage);
outputs.edit3() = std::make_shared<BloomStage::Frame>(bloomStage->_currentFrame);
}

View file

@ -10,13 +10,8 @@
#define hifi_RenderCommonTask_h
#include <gpu/Pipeline.h>
#include <render/RenderFetchCullSortTask.h>
#include "LightingModel.h"
#include "LightStage.h"
#include "BackgroundStage.h"
#include "HazeStage.h"
#include "BloomStage.h"
#include "LightingModel.h"
class BeginGPURangeTimer {
public:
@ -111,22 +106,11 @@ public:
FRUSTUM_COUNT
};
using Inputs = LightStage::FramePointer;
using Inputs = LightStage::ShadowFramePointer;
using Outputs = render::VaryingArray<ViewFrustumPointer, FRUSTUM_COUNT>;
using JobModel = render::Job::ModelIO<ExtractFrustums, Inputs, Outputs>;
void run(const render::RenderContextPointer& renderContext, const Inputs& inputs, Outputs& output);
};
class FetchCurrentFrames {
public:
using Outputs = render::VaryingSet4<LightStage::FramePointer, BackgroundStage::FramePointer, HazeStage::FramePointer, BloomStage::FramePointer>;
using JobModel = render::Job::ModelO<FetchCurrentFrames, Outputs>;
FetchCurrentFrames() {}
void run(const render::RenderContextPointer& renderContext, Outputs& outputs);
};
#endif // hifi_RenderDeferredTask_h

View file

@ -72,6 +72,23 @@ namespace gr {
}
class RenderDeferredTaskDebug {
public:
using ExtraBuffers = render::VaryingSet6<LinearDepthFramebufferPointer, SurfaceGeometryFramebufferPointer, AmbientOcclusionFramebufferPointer, gpu::BufferView, SubsurfaceScatteringResourcePointer, VelocityFramebufferPointer>;
using Input = render::VaryingSet9<RenderFetchCullSortTask::Output, RenderShadowTask::Output,
AssembleLightingStageTask::Output, LightClusteringPass::Output,
PrepareDeferred::Outputs, ExtraBuffers, GenerateDeferredFrameTransform::Output,
JitterSample::Output, LightingModel>;
using JobModel = render::Task::ModelI<RenderDeferredTaskDebug, Input>;
RenderDeferredTaskDebug();
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs);
private:
};
RenderDeferredTask::RenderDeferredTask()
{
}
@ -86,37 +103,45 @@ void RenderDeferredTask::configure(const Config& config) {
upsamplePrimaryBufferConfig->setProperty("factor", 1.0f / config.resolutionScale);
}
const render::Varying RenderDeferredTask::addSelectItemJobs(JobModel& task, const char* selectionName,
const render::Varying& metas,
const render::Varying& opaques,
const render::Varying& transparents) {
const auto selectMetaInput = SelectItems::Inputs(metas, Varying(), std::string()).asVarying();
const auto selectedMetas = task.addJob<SelectItems>("MetaSelection", selectMetaInput, selectionName);
const auto selectMetaAndOpaqueInput = SelectItems::Inputs(opaques, selectedMetas, std::string()).asVarying();
const auto selectedMetasAndOpaques = task.addJob<SelectItems>("OpaqueSelection", selectMetaAndOpaqueInput, selectionName);
const auto selectItemInput = SelectItems::Inputs(transparents, selectedMetasAndOpaques, std::string()).asVarying();
return task.addJob<SelectItems>("TransparentSelection", selectItemInput, selectionName);
}
void RenderDeferredTask::build(JobModel& task, const render::Varying& input, render::Varying& output, bool renderShadows) {
const auto& inputs = input.get<Input>();
const auto& items = inputs.get0();
void RenderDeferredTask::build(JobModel& task, const render::Varying& input, render::Varying& output) {
auto fadeEffect = DependencyManager::get<FadeEffect>();
// Prepare the ShapePipelines
ShapePlumberPointer shapePlumber = std::make_shared<ShapePlumber>();
initDeferredPipelines(*shapePlumber, fadeEffect->getBatchSetter(), fadeEffect->getItemUniformSetter());
// Extract opaques / transparents / lights / metas / overlays / background
const auto& opaques = items.get0()[RenderFetchCullSortTask::OPAQUE_SHAPE];
const auto& transparents = items.get0()[RenderFetchCullSortTask::TRANSPARENT_SHAPE];
const auto& lights = items.get0()[RenderFetchCullSortTask::LIGHT];
const auto& metas = items.get0()[RenderFetchCullSortTask::META];
const auto& overlayOpaques = items.get0()[RenderFetchCullSortTask::OVERLAY_OPAQUE_SHAPE];
const auto& overlayTransparents = items.get0()[RenderFetchCullSortTask::OVERLAY_TRANSPARENT_SHAPE];
//const auto& background = items.get0()[RenderFetchCullSortTask::BACKGROUND];
const auto& spatialSelection = items[1];
const auto& inputs = input.get<Input>();
// Separate the fetched items
const auto& fetchedItems = inputs.get0();
const auto& items = fetchedItems.get0();
// Extract opaques / transparents / lights / metas / overlays / background
const auto& opaques = items[RenderFetchCullSortTask::OPAQUE_SHAPE];
const auto& transparents = items[RenderFetchCullSortTask::TRANSPARENT_SHAPE];
const auto& overlaysInFrontOpaque = items[RenderFetchCullSortTask::LAYER_FRONT_OPAQUE_SHAPE];
const auto& overlaysInFrontTransparent = items[RenderFetchCullSortTask::LAYER_FRONT_TRANSPARENT_SHAPE];
const auto& overlaysHUDOpaque = items[RenderFetchCullSortTask::LAYER_HUD_OPAQUE_SHAPE];
const auto& overlaysHUDTransparent = items[RenderFetchCullSortTask::LAYER_HUD_TRANSPARENT_SHAPE];
// Lighting model comes next, the big configuration of the view
const auto& lightingModel = inputs[1];
// Extract the Lighting Stages Current frame ( and zones)
const auto& lightingStageInputs = inputs.get2();
// Fetch the current frame stacks from all the stages
const auto currentStageFrames = lightingStageInputs.get0();
const auto lightFrame = currentStageFrames[0];
const auto backgroundFrame = currentStageFrames[1];
const auto& hazeFrame = currentStageFrames[2];
const auto& bloomFrame = currentStageFrames[3];
// Shadow Task Outputs
const auto& shadowTaskOutputs = inputs.get3();
// Shadow Stage Frame
const auto shadowFrame = shadowTaskOutputs[1];
fadeEffect->build(task, opaques);
@ -127,7 +152,6 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
// Prepare deferred, generate the shared Deferred Frame Transform. Only valid with the scaled frame buffer
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform", jitter);
const auto lightingModel = task.addJob<MakeLightingModel>("LightingModel");
const auto opaqueRangeTimer = task.addJob<BeginGPURangeTimer>("BeginOpaqueRangeTimer", "DrawOpaques");
@ -164,38 +188,25 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
const auto scatteringResource = task.addJob<SubsurfaceScattering>("Scattering");
// AO job
const auto ambientOcclusionInputs = AmbientOcclusionEffect::Inputs(deferredFrameTransform, deferredFramebuffer, linearDepthTarget).asVarying();
const auto ambientOcclusionInputs = AmbientOcclusionEffect::Input(lightingModel, deferredFrameTransform, deferredFramebuffer, linearDepthTarget).asVarying();
const auto ambientOcclusionOutputs = task.addJob<AmbientOcclusionEffect>("AmbientOcclusion", ambientOcclusionInputs);
const auto ambientOcclusionFramebuffer = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Outputs>(0);
const auto ambientOcclusionUniforms = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Outputs>(1);
const auto ambientOcclusionFramebuffer = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Output>(0);
const auto ambientOcclusionUniforms = ambientOcclusionOutputs.getN<AmbientOcclusionEffect::Output>(1);
// Velocity
const auto velocityBufferInputs = VelocityBufferPass::Inputs(deferredFrameTransform, deferredFramebuffer).asVarying();
const auto velocityBufferOutputs = task.addJob<VelocityBufferPass>("VelocityBuffer", velocityBufferInputs);
const auto velocityBuffer = velocityBufferOutputs.getN<VelocityBufferPass::Outputs>(0);
// Clear Light, Haze, Bloom, and Skybox Stages and render zones from the general metas bucket
const auto zones = task.addJob<ZoneRendererTask>("ZoneRenderer", metas);
// Draw Lights just add the lights to the current list of lights to deal with. NOt really gpu job for now.
task.addJob<DrawLight>("DrawLight", lights);
// Fetch the current frame stacks from all the stages
const auto currentFrames = task.addJob<FetchCurrentFrames>("FetchCurrentFrames");
const auto lightFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(0);
const auto backgroundFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(1);
const auto hazeFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(2);
const auto bloomFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(3);
// Light Clustering
// Create the cluster grid of lights, cpu job for now
const auto lightClusteringPassInputs = LightClusteringPass::Inputs(deferredFrameTransform, lightingModel, lightFrame, linearDepthTarget).asVarying();
const auto lightClusteringPassInputs = LightClusteringPass::Input(deferredFrameTransform, lightingModel, lightFrame, linearDepthTarget).asVarying();
const auto lightClusters = task.addJob<LightClusteringPass>("LightClustering", lightClusteringPassInputs);
// DeferredBuffer is complete, now let's shade it into the LightingBuffer
const auto deferredLightingInputs = RenderDeferred::Inputs(deferredFrameTransform, deferredFramebuffer, lightingModel,
surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, scatteringResource, lightClusters, lightFrame, hazeFrame).asVarying();
task.addJob<RenderDeferred>("RenderDeferred", deferredLightingInputs, renderShadows);
const auto extraDeferredBuffer = RenderDeferred::ExtraDeferredBuffer(surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, scatteringResource).asVarying();
const auto deferredLightingInputs = RenderDeferred::Inputs(deferredFrameTransform, deferredFramebuffer, extraDeferredBuffer, lightingModel, lightClusters, lightFrame, shadowFrame, hazeFrame).asVarying();
task.addJob<RenderDeferred>("RenderDeferred", deferredLightingInputs);
// Similar to light stage, background stage has been filled by several potential render items and resolved for the frame in this job
const auto backgroundInputs = DrawBackgroundStage::Inputs(lightingModel, backgroundFrame).asVarying();
@ -205,43 +216,22 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
task.addJob<DrawHaze>("DrawHazeDeferred", drawHazeInputs);
// Render transparent objects forward in LightingBuffer
const auto transparentsInputs = DrawDeferred::Inputs(transparents, hazeFrame, lightFrame, lightingModel, lightClusters, jitter).asVarying();
const auto transparentsInputs = DrawDeferred::Inputs(transparents, hazeFrame, lightFrame, lightingModel, lightClusters, shadowFrame, jitter).asVarying();
task.addJob<DrawDeferred>("DrawTransparentDeferred", transparentsInputs, shapePlumber);
// Light Cluster Grid Debuging job
{
const auto debugLightClustersInputs = DebugLightClusters::Inputs(deferredFrameTransform, deferredFramebuffer, lightingModel, linearDepthTarget, lightClusters).asVarying();
task.addJob<DebugLightClusters>("DebugLightClusters", debugLightClustersInputs);
}
const auto outlineRangeTimer = task.addJob<BeginGPURangeTimer>("BeginHighlightRangeTimer", "Highlight");
// Select items that need to be outlined
const auto selectionBaseName = "contextOverlayHighlightList";
const auto selectedItems = addSelectItemJobs(task, selectionBaseName, metas, opaques, transparents);
const auto outlineInputs = DrawHighlightTask::Inputs(items.get0(), deferredFramebuffer, lightingFramebuffer, deferredFrameTransform, jitter).asVarying();
const auto outlineInputs = DrawHighlightTask::Inputs(items, deferredFramebuffer, lightingFramebuffer, deferredFrameTransform, jitter).asVarying();
task.addJob<DrawHighlightTask>("DrawHighlight", outlineInputs);
task.addJob<EndGPURangeTimer>("HighlightRangeTimer", outlineRangeTimer);
const auto overlaysInFrontRangeTimer = task.addJob<BeginGPURangeTimer>("BeginOverlaysInFrontRangeTimer", "BeginOverlaysInFrontRangeTimer");
// Layered Overlays
const auto filteredOverlaysOpaque = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredOpaque", overlayOpaques, render::hifi::LAYER_3D_FRONT);
const auto filteredOverlaysTransparent = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredTransparent", overlayTransparents, render::hifi::LAYER_3D_FRONT);
const auto overlaysInFrontOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(0);
const auto overlaysInFrontTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(0);
// We don't want the overlay to clear the deferred frame buffer depth because we would like to keep it for debugging visualisation
// task.addJob<SetSeparateDeferredDepthBuffer>("SeparateDepthForOverlay", deferredFramebuffer);
// Layered Over (in front)
const auto overlayInFrontOpaquesInputs = DrawOverlay3D::Inputs(overlaysInFrontOpaque, lightingModel, jitter).asVarying();
const auto overlayInFrontTransparentsInputs = DrawOverlay3D::Inputs(overlaysInFrontTransparent, lightingModel, jitter).asVarying();
task.addJob<DrawOverlay3D>("DrawOverlayInFrontOpaque", overlayInFrontOpaquesInputs, true);
task.addJob<DrawOverlay3D>("DrawOverlayInFrontTransparent", overlayInFrontTransparentsInputs, false);
task.addJob<EndGPURangeTimer>("OverlaysInFrontRangeTimer", overlaysInFrontRangeTimer);
const auto toneAndPostRangeTimer = task.addJob<BeginGPURangeTimer>("BeginToneAndPostRangeTimer", "PostToneOverlaysAntialiasing");
// AA job before bloom to limit flickering
@ -256,14 +246,115 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
const auto toneMappingInputs = ToneMappingDeferred::Inputs(lightingFramebuffer, scaledPrimaryFramebuffer).asVarying();
task.addJob<ToneMappingDeferred>("ToneMapping", toneMappingInputs);
// Debugging task is happening in the "over" layer after tone mapping and just before HUD
{ // Debug the bounds of the rendered items, still look at the zbuffer
const auto extraDebugBuffers = RenderDeferredTaskDebug::ExtraBuffers(linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, ambientOcclusionFramebuffer, scatteringResource, velocityBuffer);
const auto debugInputs = RenderDeferredTaskDebug::Input(fetchedItems, shadowTaskOutputs, lightingStageInputs, lightClusters, prepareDeferredOutputs, extraDebugBuffers,
deferredFrameTransform, jitter, lightingModel).asVarying();
task.addJob<RenderDeferredTaskDebug>("DebugRenderDeferredTask", debugInputs);
}
// Upscale to finale resolution
const auto primaryFramebuffer = task.addJob<render::Upsample>("PrimaryBufferUpscale", scaledPrimaryFramebuffer);
// Composite the HUD and HUD overlays
task.addJob<CompositeHUD>("HUD");
const auto nullJitter = Varying(glm::vec2(0.0f, 0.0f));
const auto overlayHUDOpaquesInputs = DrawOverlay3D::Inputs(overlaysHUDOpaque, lightingModel, nullJitter).asVarying();
const auto overlayHUDTransparentsInputs = DrawOverlay3D::Inputs(overlaysHUDTransparent, lightingModel, nullJitter).asVarying();
task.addJob<DrawOverlay3D>("DrawOverlayHUDOpaque", overlayHUDOpaquesInputs, true);
task.addJob<DrawOverlay3D>("DrawOverlayHUDTransparent", overlayHUDTransparentsInputs, false);
task.addJob<EndGPURangeTimer>("ToneAndPostRangeTimer", toneAndPostRangeTimer);
// Blit!
task.addJob<Blit>("Blit", primaryFramebuffer);
}
RenderDeferredTaskDebug::RenderDeferredTaskDebug() {
}
void RenderDeferredTaskDebug::build(JobModel& task, const render::Varying& input, render::Varying& outputs) {
const auto& inputs = input.get<Input>();
// RenderFetchCullSortTask out
const auto& fetchCullSortTaskOut = inputs.get0();
const auto& items = fetchCullSortTaskOut.get0();
// Extract opaques / transparents / lights / metas / overlays InFront and HUD / background
const auto& opaques = items[RenderFetchCullSortTask::OPAQUE_SHAPE];
const auto& transparents = items[RenderFetchCullSortTask::TRANSPARENT_SHAPE];
const auto& lights = items[RenderFetchCullSortTask::LIGHT];
const auto& metas = items[RenderFetchCullSortTask::META];
const auto& overlaysInFrontOpaque = items[RenderFetchCullSortTask::LAYER_FRONT_OPAQUE_SHAPE];
const auto& overlaysInFrontTransparent = items[RenderFetchCullSortTask::LAYER_FRONT_TRANSPARENT_SHAPE];
const auto& overlaysHUDOpaque = items[RenderFetchCullSortTask::LAYER_HUD_OPAQUE_SHAPE];
const auto& overlaysHUDTransparent = items[RenderFetchCullSortTask::LAYER_HUD_TRANSPARENT_SHAPE];
const auto& spatialSelection = fetchCullSortTaskOut[1];
// RenderShadowTask out
const auto& shadowOut = inputs.get1();
const auto& renderShadowTaskOut = shadowOut[0];
const auto& shadowFrame = shadowOut[1];
// Extract the Lighting Stages Current frame ( and zones)
const auto lightingStageInputs = inputs.get2();
// Fetch the current frame stacks from all the stages
const auto stageCurrentFrames = lightingStageInputs.get0();
const auto lightFrame = stageCurrentFrames[0];
const auto backgroundFrame = stageCurrentFrames[1];
const auto hazeFrame = stageCurrentFrames[2];
const auto bloomFrame = stageCurrentFrames[3];
// Zones
const auto& zones = lightingStageInputs[1];
// Light CLuster
const auto& lightClusters = inputs[3];
// PrepareDeferred out
const auto& prepareDeferredOutputs = inputs.get4();
const auto& deferredFramebuffer = prepareDeferredOutputs[0];
// extraDeferredBuffer
const auto& extraDeferredBuffer = inputs.get5();
const auto& linearDepthTarget = extraDeferredBuffer[0];
const auto& surfaceGeometryFramebuffer = extraDeferredBuffer[1];
const auto& ambientOcclusionFramebuffer = extraDeferredBuffer[2];
const auto& ambientOcclusionUniforms = extraDeferredBuffer[3];
const auto& scatteringResource = extraDeferredBuffer[4];
const auto& velocityBuffer = extraDeferredBuffer[5];
// GenerateDeferredFrameTransform out
const auto& deferredFrameTransform = inputs[6];
// Jitter out
const auto& jitter = inputs[7];
// Lighting Model out
const auto& lightingModel = inputs[8];
// Light Cluster Grid Debuging job
{
const auto debugLightClustersInputs = DebugLightClusters::Inputs(deferredFrameTransform, lightingModel, linearDepthTarget, lightClusters).asVarying();
task.addJob<DebugLightClusters>("DebugLightClusters", debugLightClustersInputs);
}
{ // Debug the bounds of the rendered items, still look at the zbuffer
task.addJob<DrawBounds>("DrawMetaBounds", metas);
task.addJob<DrawBounds>("DrawOpaqueBounds", opaques);
task.addJob<DrawBounds>("DrawTransparentBounds", transparents);
task.addJob<DrawBounds>("DrawLightBounds", lights);
task.addJob<DrawBounds>("DrawZones", zones);
const auto frustums = task.addJob<ExtractFrustums>("ExtractFrustums", lightFrame);
const auto frustums = task.addJob<ExtractFrustums>("ExtractFrustums", shadowFrame);
const auto viewFrustum = frustums.getN<ExtractFrustums::Outputs>(ExtractFrustums::VIEW_FRUSTUM);
task.addJob<DrawFrustum>("DrawViewFrustum", viewFrustum, glm::vec3(0.0f, 1.0f, 0.0f));
for (auto i = 0; i < ExtractFrustums::SHADOW_CASCADE_FRUSTUM_COUNT; i++) {
@ -272,13 +363,26 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
char jobName[64];
sprintf(jobName, "DrawShadowFrustum%d", i);
task.addJob<DrawFrustum>(jobName, shadowFrustum, glm::vec3(0.0f, tint, 1.0f));
if (!inputs[1].isNull()) {
const auto& shadowCascadeSceneBBoxes = inputs.get1();
if (!renderShadowTaskOut.isNull()) {
const auto& shadowCascadeSceneBBoxes = renderShadowTaskOut;
const auto shadowBBox = shadowCascadeSceneBBoxes[ExtractFrustums::SHADOW_CASCADE0_FRUSTUM + i];
sprintf(jobName, "DrawShadowBBox%d", i);
task.addJob<DrawAABox>(jobName, shadowBBox, glm::vec3(1.0f, tint, 0.0f));
}
}
}
{ // Debug Selection...
// TODO: It s busted
// Select items that need to be outlined and show them
const auto selectionBaseName = "contextOverlayHighlightList";
const auto selectMetaInput = SelectItems::Inputs(metas, Varying(), std::string()).asVarying();
const auto selectedMetas = task.addJob<SelectItems>("MetaSelection", selectMetaInput, selectionBaseName);
const auto selectMetaAndOpaqueInput = SelectItems::Inputs(opaques, selectedMetas, std::string()).asVarying();
const auto selectedMetasAndOpaques = task.addJob<SelectItems>("OpaqueSelection", selectMetaAndOpaqueInput, selectionBaseName);
const auto selectItemInput = SelectItems::Inputs(transparents, selectedMetasAndOpaques, std::string()).asVarying();
const auto selectedItems = task.addJob<SelectItems>("TransparentSelection", selectItemInput, selectionBaseName);
// Render.getConfig("RenderMainView.DrawSelectionBounds").enabled = true
task.addJob<DrawBounds>("DrawSelectionBounds", selectedItems);
@ -289,10 +393,16 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
task.addJob<DrawBounds>("DrawOverlayInFrontTransparentBounds", overlaysInFrontTransparent);
}
{ // Debug the bounds of the rendered Overlay items that are marked drawHUDLayer, still look at the zbuffer
task.addJob<DrawBounds>("DrawOverlayHUDOpaqueBounds", overlaysHUDOpaque);
task.addJob<DrawBounds>("DrawOverlayHUDTransparentBounds", overlaysHUDTransparent);
}
// Debugging stages
{
// Debugging Deferred buffer job
const auto debugFramebuffers = render::Varying(DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, velocityBuffer, deferredFrameTransform, lightFrame));
const auto debugFramebuffers = DebugDeferredBuffer::Inputs(deferredFramebuffer, linearDepthTarget, surfaceGeometryFramebuffer, ambientOcclusionFramebuffer, velocityBuffer, deferredFrameTransform, shadowFrame).asVarying();
task.addJob<DebugDeferredBuffer>("DebugDeferredBuffer", debugFramebuffers);
const auto debugSubsurfaceScatteringInputs = DebugSubsurfaceScattering::Inputs(deferredFrameTransform, deferredFramebuffer, lightingModel,
@ -310,7 +420,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
// Status icon rendering job
{
// Grab a texture map representing the different status icons and assign that to the drawStatsuJob
// Grab a texture map representing the different status icons and assign that to the drawStatusJob
auto iconMapPath = PathUtils::resourcesPath() + "icons/statusIconAtlas.svg";
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath, image::TextureUsage::STRICT_TEXTURE);
const auto drawStatusInputs = DrawStatus::Input(opaques, jitter).asVarying();
@ -319,34 +429,13 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
const auto debugZoneInputs = DebugZoneLighting::Inputs(deferredFrameTransform, lightFrame, backgroundFrame).asVarying();
task.addJob<DebugZoneLighting>("DrawZoneStack", debugZoneInputs);
}
// Upscale to finale resolution
const auto primaryFramebuffer = task.addJob<render::Upsample>("PrimaryBufferUpscale", scaledPrimaryFramebuffer);
// Composite the HUD and HUD overlays
task.addJob<CompositeHUD>("HUD");
const auto overlaysHUDOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(1);
const auto overlaysHUDTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(1);
const auto nullJitter = Varying(glm::vec2(0.0f, 0.0f));
const auto overlayHUDOpaquesInputs = DrawOverlay3D::Inputs(overlaysHUDOpaque, lightingModel, nullJitter).asVarying();
const auto overlayHUDTransparentsInputs = DrawOverlay3D::Inputs(overlaysHUDTransparent, lightingModel, nullJitter).asVarying();
task.addJob<DrawOverlay3D>("DrawOverlayHUDOpaque", overlayHUDOpaquesInputs, true);
task.addJob<DrawOverlay3D>("DrawOverlayHUDTransparent", overlayHUDTransparentsInputs, false);
{ // Debug the bounds of the rendered Overlay items that are marked drawHUDLayer, still look at the zbuffer
task.addJob<DrawBounds>("DrawOverlayHUDOpaqueBounds", overlaysHUDOpaque);
task.addJob<DrawBounds>("DrawOverlayHUDTransparentBounds", overlaysHUDTransparent);
}
task.addJob<EndGPURangeTimer>("ToneAndPostRangeTimer", toneAndPostRangeTimer);
// Blit!
task.addJob<Blit>("Blit", primaryFramebuffer);
}
void DrawDeferred::run(const RenderContextPointer& renderContext, const Inputs& inputs) {
assert(renderContext->args);
assert(renderContext->args->hasViewFrustum());
@ -358,7 +447,8 @@ void DrawDeferred::run(const RenderContextPointer& renderContext, const Inputs&
const auto& lightFrame = inputs.get2();
const auto& lightingModel = inputs.get3();
const auto& lightClusters = inputs.get4();
const auto jitter = inputs.get5();
// Not needed yet: const auto& shadowFrame = inputs.get5();
const auto jitter = inputs.get6();
auto deferredLightingEffect = DependencyManager::get<DeferredLightingEffect>();
RenderArgs* args = renderContext->args;

View file

@ -14,10 +14,10 @@
#include <gpu/Pipeline.h>
#include <render/RenderFetchCullSortTask.h>
#include "AssembleLightingStageTask.h"
#include "LightingModel.h"
#include "LightClusters.h"
#include "RenderShadowTask.h"
#include "HazeStage.h"
class DrawDeferredConfig : public render::Job::Config {
Q_OBJECT
@ -43,7 +43,7 @@ protected:
class DrawDeferred {
public:
using Inputs = render::VaryingSet6<render::ItemBounds, HazeStage::FramePointer, LightStage::FramePointer, LightingModelPointer, LightClustersPointer, glm::vec2>;
using Inputs = render::VaryingSet7<render::ItemBounds, HazeStage::FramePointer, LightStage::FramePointer, LightingModelPointer, LightClustersPointer, LightStage::ShadowFramePointer, glm::vec2>;
using Config = DrawDeferredConfig;
using JobModel = render::Job::ModelI<DrawDeferred, Inputs, Config>;
@ -137,21 +137,16 @@ signals:
class RenderDeferredTask {
public:
using Input = render::VaryingSet2<RenderFetchCullSortTask::Output, RenderShadowTask::Output>;
using Input = render::VaryingSet4<RenderFetchCullSortTask::Output, LightingModelPointer, AssembleLightingStageTask::Output, RenderShadowTask::Output>;
using Config = RenderDeferredTaskConfig;
using JobModel = render::Task::ModelI<RenderDeferredTask, Input, Config>;
RenderDeferredTask();
void configure(const Config& config);
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, bool renderShadows);
void build(JobModel& task, const render::Varying& input, render::Varying& output);
private:
static const render::Varying addSelectItemJobs(JobModel& task,
const char* selectionName,
const render::Varying& metas,
const render::Varying& opaques,
const render::Varying& transparents);
};
#endif // hifi_RenderDeferredTask_h

View file

@ -48,39 +48,46 @@ using namespace render;
extern void initForwardPipelines(ShapePlumber& plumber);
void RenderForwardTask::build(JobModel& task, const render::Varying& input, render::Varying& output) {
auto items = input.get<Input>();
auto fadeEffect = DependencyManager::get<FadeEffect>();
// Prepare the ShapePipelines
auto fadeEffect = DependencyManager::get<FadeEffect>();
ShapePlumberPointer shapePlumber = std::make_shared<ShapePlumber>();
initForwardPipelines(*shapePlumber);
// Extract opaques / transparents / lights / metas / overlays / background
const auto& opaques = items.get0()[RenderFetchCullSortTask::OPAQUE_SHAPE];
const auto& transparents = items.get0()[RenderFetchCullSortTask::TRANSPARENT_SHAPE];
//const auto& lights = items.get0()[RenderFetchCullSortTask::LIGHT];
const auto& metas = items.get0()[RenderFetchCullSortTask::META];
const auto& overlayOpaques = items.get0()[RenderFetchCullSortTask::OVERLAY_OPAQUE_SHAPE];
const auto& overlayTransparents = items.get0()[RenderFetchCullSortTask::OVERLAY_TRANSPARENT_SHAPE];
// Unpack inputs
const auto& inputs = input.get<Input>();
// Separate the fetched items
const auto& fetchedItems = inputs.get0();
//const auto& background = items.get0()[RenderFetchCullSortTask::BACKGROUND];
//const auto& spatialSelection = items[1];
const auto& items = fetchedItems.get0();
// Extract opaques / transparents / lights / metas / overlays / background
const auto& opaques = items[RenderFetchCullSortTask::OPAQUE_SHAPE];
const auto& transparents = items[RenderFetchCullSortTask::TRANSPARENT_SHAPE];
const auto& metas = items[RenderFetchCullSortTask::META];
const auto& overlaysInFrontOpaque = items[RenderFetchCullSortTask::LAYER_FRONT_OPAQUE_SHAPE];
const auto& overlaysInFrontTransparent = items[RenderFetchCullSortTask::LAYER_FRONT_TRANSPARENT_SHAPE];
// TODO: Re enable the rendering of the HUD overlayes
// const auto& overlaysHUDOpaque = items[RenderFetchCullSortTask::LAYER_HUD_OPAQUE_SHAPE];
// const auto& overlaysHUDTransparent = items[RenderFetchCullSortTask::LAYER_HUD_TRANSPARENT_SHAPE];
// Lighting model comes next, the big configuration of the view
const auto& lightingModel = inputs[1];
// Extract the Lighting Stages Current frame ( and zones)
const auto& lightingStageInputs = inputs.get2();
// Fetch the current frame stacks from all the stages
const auto currentStageFrames = lightingStageInputs.get0();
const auto lightFrame = currentStageFrames[0];
const auto backgroundFrame = currentStageFrames[1];
const auto& zones = lightingStageInputs[1];
// First job, alter faded
fadeEffect->build(task, opaques);
// Prepare objects shared by several jobs
const auto deferredFrameTransform = task.addJob<GenerateDeferredFrameTransform>("DeferredFrameTransform");
const auto lightingModel = task.addJob<MakeLightingModel>("LightingModel");
// Filter zones from the general metas bucket
const auto zones = task.addJob<ZoneRendererTask>("ZoneRenderer", metas);
// Fetch the current frame stacks from all the stages
const auto currentFrames = task.addJob<FetchCurrentFrames>("FetchCurrentFrames");
const auto lightFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(0);
const auto backgroundFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(1);
//const auto hazeFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(2);
//const auto bloomFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(3);
// GPU jobs: Start preparing the main framebuffer
const auto framebuffer = task.addJob<PrepareFramebuffer>("PrepareFramebuffer");
@ -91,12 +98,9 @@ void RenderForwardTask::build(JobModel& task, const render::Varying& input, rend
task.addJob<PrepareStencil>("PrepareStencil", framebuffer);
// Layered Overlays
const auto filteredOverlaysOpaque = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredOpaque", overlayOpaques, render::hifi::LAYER_3D_FRONT);
const auto filteredOverlaysTransparent = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredTransparent", overlayTransparents, render::hifi::LAYER_3D_FRONT);
const auto overlaysInFrontOpaque = filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(0);
const auto overlaysInFrontTransparent = filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(0);
const auto nullJitter = Varying(glm::vec2(0.0f, 0.0f));
// Layered Over (in front)
const auto overlayInFrontOpaquesInputs = DrawOverlay3D::Inputs(overlaysInFrontOpaque, lightingModel, nullJitter).asVarying();
const auto overlayInFrontTransparentsInputs = DrawOverlay3D::Inputs(overlaysInFrontTransparent, lightingModel, nullJitter).asVarying();
task.addJob<DrawOverlay3D>("DrawOverlayInFrontOpaque", overlayInFrontOpaquesInputs, true);

View file

@ -14,17 +14,17 @@
#include <gpu/Pipeline.h>
#include <render/RenderFetchCullSortTask.h>
#include "AssembleLightingStageTask.h"
#include "LightingModel.h"
#include "LightStage.h"
class RenderForwardTask {
public:
using Input = RenderFetchCullSortTask::Output;
using Input = render::VaryingSet3<RenderFetchCullSortTask::Output, LightingModelPointer, AssembleLightingStageTask::Output>;
using JobModel = render::Task::ModelI<RenderForwardTask, Input>;
RenderForwardTask() {}
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs);
void build(JobModel& task, const render::Varying& input, render::Varying& output);
};
class PrepareFramebuffer {

View file

@ -25,6 +25,7 @@
#include "RenderUtilsLogging.h"
#include "RenderCommonTask.h"
#include "AssembleLightingStageTask.h"
#include "FadeEffect.h"
@ -32,13 +33,15 @@
// but are readjusted afterwards
#define SHADOW_FRUSTUM_NEAR 1.0f
#define SHADOW_FRUSTUM_FAR 500.0f
static const unsigned int SHADOW_CASCADE_COUNT{ 4 };
static const float SHADOW_MAX_DISTANCE{ 40.0f };
using namespace render;
extern void initZPassPipelines(ShapePlumber& plumber, gpu::StatePointer state, const render::ShapePipeline::BatchSetter& batchSetter, const render::ShapePipeline::ItemSetter& itemSetter);
void RenderShadowTask::configure(const Config& configuration) {
DependencyManager::get<DeferredLightingEffect>()->setShadowMapEnabled(configuration.isEnabled());
//DependencyManager::get<DeferredLightingEffect>()->setShadowMapEnabled(configuration.isEnabled());
// This is a task, so must still propogate configure() to its Jobs
// Task::configure(configuration);
}
@ -57,11 +60,13 @@ void RenderShadowTask::build(JobModel& task, const render::Varying& input, rende
// FIXME: calling this here before the zones/lights are drawn during the deferred/forward passes means we're actually using the frames from the previous draw
// Fetch the current frame stacks from all the stages
const auto currentFrames = task.addJob<FetchCurrentFrames>("FetchCurrentFrames");
const auto lightFrame = currentFrames.getN<FetchCurrentFrames::Outputs>(0);
// Starting with the Light Frame genreated in previous tasks
const auto& lightFrame = input.getN<Input>(0);
const auto setupOutput = task.addJob<RenderShadowSetup>("ShadowSetup", lightFrame);
const auto queryResolution = setupOutput.getN<RenderShadowSetup::Outputs>(1);
const auto setupOutput = task.addJob<RenderShadowSetup>("ShadowSetup", input);
const auto queryResolution = setupOutput.getN<RenderShadowSetup::Output>(1);
const auto shadowFrame = setupOutput.getN<RenderShadowSetup::Output>(3);
// Fetch and cull the items from the scene
static const auto shadowCasterReceiverFilter = ItemFilter::Builder::visibleWorldItems().withTypeShape().withOpaque().withoutLayered().withTagBits(tagBits, tagMask);
@ -73,7 +78,7 @@ void RenderShadowTask::build(JobModel& task, const render::Varying& input, rende
// Cull objects that are not visible in camera view. Hopefully the cull functor only performs LOD culling, not
// frustum culling or this will make shadow casters out of the camera frustum disappear.
const auto cameraFrustum = setupOutput.getN<RenderShadowSetup::Outputs>(2);
const auto cameraFrustum = setupOutput.getN<RenderShadowSetup::Output>(2);
const auto applyFunctorInputs = ApplyCullFunctorOnItemBounds::Inputs(shadowItems, cameraFrustum).asVarying();
const auto culledShadowItems = task.addJob<ApplyCullFunctorOnItemBounds>("ShadowCullCamera", applyFunctorInputs, cameraCullFunctor);
@ -90,12 +95,12 @@ void RenderShadowTask::build(JobModel& task, const render::Varying& input, rende
#endif
};
Output cascadeSceneBBoxes;
render::VaryingArray<AABox,4> cascadeSceneBBoxes;
for (auto i = 0; i < SHADOW_CASCADE_MAX_COUNT; i++) {
char jobName[64];
sprintf(jobName, "ShadowCascadeSetup%d", i);
const auto cascadeSetupOutput = task.addJob<RenderShadowCascadeSetup>(jobName, lightFrame, i, tagBits, tagMask);
const auto cascadeSetupOutput = task.addJob<RenderShadowCascadeSetup>(jobName, shadowFrame, i, tagBits, tagMask);
const auto shadowFilter = cascadeSetupOutput.getN<RenderShadowCascadeSetup::Outputs>(0);
auto antiFrustum = render::Varying(ViewFrustumPointer());
cascadeFrustums[i] = cascadeSetupOutput.getN<RenderShadowCascadeSetup::Outputs>(1);
@ -110,17 +115,18 @@ void RenderShadowTask::build(JobModel& task, const render::Varying& input, rende
// GPU jobs: Render to shadow map
sprintf(jobName, "RenderShadowMap%d", i);
const auto shadowInputs = RenderShadowMap::Inputs(culledShadowItemsAndBounds.getN<CullShadowBounds::Outputs>(0),
culledShadowItemsAndBounds.getN<CullShadowBounds::Outputs>(1), lightFrame).asVarying();
culledShadowItemsAndBounds.getN<CullShadowBounds::Outputs>(1), shadowFrame).asVarying();
task.addJob<RenderShadowMap>(jobName, shadowInputs, shapePlumber, i);
sprintf(jobName, "ShadowCascadeTeardown%d", i);
task.addJob<RenderShadowCascadeTeardown>(jobName, shadowFilter);
cascadeSceneBBoxes[i] = culledShadowItemsAndBounds.getN<CullShadowBounds::Outputs>(1);
}
output = render::Varying(cascadeSceneBBoxes);
task.addJob<RenderShadowTeardown>("ShadowTeardown", setupOutput);
output = Output(cascadeSceneBBoxes, setupOutput.getN<RenderShadowSetup::Output>(3));
}
static void computeNearFar(const Triangle& triangle, const Plane shadowClipPlanes[4], float& near, float& far) {
@ -211,12 +217,12 @@ void RenderShadowMap::run(const render::RenderContextPointer& renderContext, con
const auto& inShapes = inputs.get0();
const auto& inShapeBounds = inputs.get1();
const auto& lightFrame = inputs.get2();
const auto& shadowFrame = inputs.get2();
auto lightStage = renderContext->_scene->getStage<LightStage>();
assert(lightStage);
auto shadow = lightStage->getCurrentKeyShadow(*lightFrame);
LightStage::ShadowPointer shadow;
if (shadowFrame && !shadowFrame->_objects.empty()) {
shadow = shadowFrame->_objects.front();
}
if (!shadow || _cascadeIndex >= shadow->getCascadeCount()) {
return;
}
@ -314,7 +320,7 @@ void RenderShadowMap::run(const render::RenderContextPointer& renderContext, con
RenderShadowSetup::RenderShadowSetup() :
_cameraFrustum{ std::make_shared<ViewFrustum>() },
_coarseShadowFrustum{ std::make_shared<ViewFrustum>() } {
_shadowFrameCache = std::make_shared<LightStage::ShadowFrame>();
}
void RenderShadowSetup::configure(const Config& configuration) {
@ -338,12 +344,19 @@ void RenderShadowSetup::setSlopeBias(int cascadeIndex, float value) {
_bias[cascadeIndex]._slope = value * value * value * 0.01f;
}
void RenderShadowSetup::run(const render::RenderContextPointer& renderContext, const Inputs& input, Outputs& output) {
void RenderShadowSetup::run(const render::RenderContextPointer& renderContext, const Input& input, Output& output) {
// Abort all jobs if not casting shadows
auto lightStage = renderContext->_scene->getStage<LightStage>();
auto lightFrame = *input;
assert(lightStage);
if (!lightStage->getCurrentKeyLight(lightFrame) || !lightStage->getCurrentKeyLight(lightFrame)->getCastShadows()) {
const auto lightFrame = *input.get0();
const auto lightingModel = input.get1();
// Clear previous shadow frame always
_shadowFrameCache->_objects.clear();
output.edit3() = _shadowFrameCache;
if (!lightingModel->isShadowEnabled() || !lightStage->getCurrentKeyLight(lightFrame) || !lightStage->getCurrentKeyLight(lightFrame)->getCastShadows()) {
renderContext->taskFlow.abortTask();
return;
}
@ -357,22 +370,29 @@ void RenderShadowSetup::run(const render::RenderContextPointer& renderContext, c
*_cameraFrustum = args->getViewFrustum();
output.edit2() = _cameraFrustum;
const auto globalShadow = lightStage->getCurrentKeyShadow(lightFrame);
if (globalShadow) {
globalShadow->setKeylightFrustum(args->getViewFrustum(), SHADOW_FRUSTUM_NEAR, SHADOW_FRUSTUM_FAR);
if (!_globalShadowObject) {
_globalShadowObject = std::make_shared<LightStage::Shadow>(graphics::LightPointer(), SHADOW_MAX_DISTANCE, SHADOW_CASCADE_COUNT);
}
const auto theGlobalLight = lightStage->getCurrentKeyLight(lightFrame);
if (theGlobalLight && theGlobalLight->getCastShadows()) {
_globalShadowObject->setLight(theGlobalLight);
_globalShadowObject->setKeylightFrustum(args->getViewFrustum(), SHADOW_FRUSTUM_NEAR, SHADOW_FRUSTUM_FAR);
auto& firstCascade = globalShadow->getCascade(0);
auto& firstCascade = _globalShadowObject->getCascade(0);
auto& firstCascadeFrustum = firstCascade.getFrustum();
unsigned int cascadeIndex;
// Adjust each cascade frustum
for (cascadeIndex = 0; cascadeIndex < globalShadow->getCascadeCount(); ++cascadeIndex) {
for (cascadeIndex = 0; cascadeIndex < _globalShadowObject->getCascadeCount(); ++cascadeIndex) {
auto& bias = _bias[cascadeIndex];
globalShadow->setKeylightCascadeFrustum(cascadeIndex, args->getViewFrustum(),
_globalShadowObject->setKeylightCascadeFrustum(cascadeIndex, args->getViewFrustum(),
SHADOW_FRUSTUM_NEAR, SHADOW_FRUSTUM_FAR,
bias._constant, bias._slope);
}
_shadowFrameCache->pushShadow(_globalShadowObject);
// Now adjust coarse frustum bounds
auto frustumPosition = firstCascadeFrustum->getPosition();
auto farTopLeft = firstCascadeFrustum->getFarTopLeft() - frustumPosition;
@ -385,8 +405,8 @@ void RenderShadowSetup::run(const render::RenderContextPointer& renderContext, c
auto near = firstCascadeFrustum->getNearClip();
auto far = firstCascadeFrustum->getFarClip();
for (cascadeIndex = 1; cascadeIndex < globalShadow->getCascadeCount(); ++cascadeIndex) {
auto& cascadeFrustum = globalShadow->getCascade(cascadeIndex).getFrustum();
for (cascadeIndex = 1; cascadeIndex < _globalShadowObject->getCascadeCount(); ++cascadeIndex) {
auto& cascadeFrustum = _globalShadowObject->getCascade(cascadeIndex).getFrustum();
farTopLeft = cascadeFrustum->getFarTopLeft() - frustumPosition;
farBottomRight = cascadeFrustum->getFarBottomRight() - frustumPosition;
@ -425,36 +445,42 @@ void RenderShadowSetup::run(const render::RenderContextPointer& renderContext, c
}
void RenderShadowCascadeSetup::run(const render::RenderContextPointer& renderContext, const Inputs& input, Outputs& output) {
auto lightStage = renderContext->_scene->getStage<LightStage>();
const auto& lightFrame = *input;
assert(lightStage);
const auto shadowFrame = input;
// Cache old render args
RenderArgs* args = renderContext->args;
RenderShadowTask::CullFunctor cullFunctor;
if (shadowFrame && !shadowFrame->_objects.empty() && shadowFrame->_objects[0]) {
const auto globalShadow = shadowFrame->_objects[0];
const auto globalShadow = lightStage->getCurrentKeyShadow(lightFrame);
if (globalShadow && _cascadeIndex < globalShadow->getCascadeCount()) {
// Second item filter is to filter items to keep in shadow frustum computation (here we need to keep shadow receivers)
output.edit0() = ItemFilter::Builder::visibleWorldItems().withTypeShape().withOpaque().withoutLayered().withTagBits(_tagBits, _tagMask);
if (globalShadow && _cascadeIndex < globalShadow->getCascadeCount()) {
// Second item filter is to filter items to keep in shadow frustum computation (here we need to keep shadow receivers)
output.edit0() = ItemFilter::Builder::visibleWorldItems().withTypeShape().withOpaque().withoutLayered().withTagBits(_tagBits, _tagMask);
// Set the keylight render args
auto& cascade = globalShadow->getCascade(_cascadeIndex);
auto& cascadeFrustum = cascade.getFrustum();
args->pushViewFrustum(*cascadeFrustum);
auto texelSize = glm::min(cascadeFrustum->getHeight(), cascadeFrustum->getWidth()) / cascade.framebuffer->getSize().x;
// Set the cull threshold to 24 shadow texels. This is totally arbitrary
const auto minTexelCount = 24.0f;
// TODO : maybe adapt that with LOD management system?
texelSize *= minTexelCount;
cullFunctor._minSquareSize = texelSize * texelSize;
// Set the keylight render args
auto& cascade = globalShadow->getCascade(_cascadeIndex);
auto& cascadeFrustum = cascade.getFrustum();
args->pushViewFrustum(*cascadeFrustum);
auto texelSize = glm::min(cascadeFrustum->getHeight(), cascadeFrustum->getWidth()) / cascade.framebuffer->getSize().x;
// Set the cull threshold to 24 shadow texels. This is totally arbitrary
const auto minTexelCount = 24.0f;
// TODO : maybe adapt that with LOD management system?
texelSize *= minTexelCount;
cullFunctor._minSquareSize = texelSize * texelSize;
output.edit1() = cascadeFrustum;
} else {
output.edit1() = cascadeFrustum;
} else {
output.edit0() = ItemFilter::Builder::nothing();
output.edit1() = ViewFrustumPointer();
}
}
else {
output.edit0() = ItemFilter::Builder::nothing();
output.edit1() = ViewFrustumPointer();
}
output.edit2() = cullFunctor;
}

View file

@ -19,13 +19,14 @@
#include "Shadows_shared.slh"
#include "LightingModel.h"
#include "LightStage.h"
class ViewFrustum;
class RenderShadowMap {
public:
using Inputs = render::VaryingSet3<render::ShapeBounds, AABox, LightStage::FramePointer>;
using Inputs = render::VaryingSet3<render::ShapeBounds, AABox, LightStage::ShadowFramePointer>;
using JobModel = render::Job::ModelI<RenderShadowMap, Inputs>;
RenderShadowMap(render::ShapePlumberPointer shapePlumber, unsigned int cascadeIndex) : _shapePlumber{ shapePlumber }, _cascadeIndex{ cascadeIndex } {}
@ -36,10 +37,12 @@ protected:
unsigned int _cascadeIndex;
};
class RenderShadowTaskConfig : public render::Task::Config::Persistent {
//class RenderShadowTaskConfig : public render::Task::Config::Persistent {
class RenderShadowTaskConfig : public render::Task::Config {
Q_OBJECT
public:
RenderShadowTaskConfig() : render::Task::Config::Persistent(QStringList() << "Render" << "Engine" << "Shadows", true) {}
// RenderShadowTaskConfig() : render::Task::Config::Persistent(QStringList() << "Render" << "Engine" << "Shadows", true) {}
RenderShadowTaskConfig() {}
signals:
void dirty();
@ -49,9 +52,10 @@ class RenderShadowTask {
public:
// There is one AABox per shadow cascade
using Output = render::VaryingArray<AABox, SHADOW_CASCADE_MAX_COUNT>;
using Input = render::VaryingSet2<LightStage::FramePointer, LightingModelPointer>;
using Output = render::VaryingSet2<render::VaryingArray<AABox, SHADOW_CASCADE_MAX_COUNT>, LightStage::ShadowFramePointer>;
using Config = RenderShadowTaskConfig;
using JobModel = render::Task::ModelO<RenderShadowTask, Output, Config>;
using JobModel = render::Task::ModelIO<RenderShadowTask, Input, Output, Config>;
RenderShadowTask() {}
void build(JobModel& task, const render::Varying& inputs, render::Varying& outputs, render::CullFunctor cameraCullFunctor, uint8_t tagBits = 0x00, uint8_t tagMask = 0x00);
@ -99,14 +103,14 @@ signals:
class RenderShadowSetup {
public:
using Inputs = LightStage::FramePointer;
using Outputs = render::VaryingSet3<RenderArgs::RenderMode, glm::ivec2, ViewFrustumPointer>;
using Input = RenderShadowTask::Input;
using Output = render::VaryingSet4<RenderArgs::RenderMode, glm::ivec2, ViewFrustumPointer, LightStage::ShadowFramePointer>;
using Config = RenderShadowSetupConfig;
using JobModel = render::Job::ModelIO<RenderShadowSetup, Inputs, Outputs, Config>;
using JobModel = render::Job::ModelIO<RenderShadowSetup, Input, Output, Config>;
RenderShadowSetup();
void configure(const Config& configuration);
void run(const render::RenderContextPointer& renderContext, const Inputs& input, Outputs& output);
void run(const render::RenderContextPointer& renderContext, const Input& input, Output& output);
private:
@ -117,13 +121,16 @@ private:
float _slope;
} _bias[SHADOW_CASCADE_MAX_COUNT];
LightStage::ShadowFrame::Object _globalShadowObject;
LightStage::ShadowFramePointer _shadowFrameCache;
void setConstantBias(int cascadeIndex, float value);
void setSlopeBias(int cascadeIndex, float value);
};
class RenderShadowCascadeSetup {
public:
using Inputs = LightStage::FramePointer;
using Inputs = LightStage::ShadowFramePointer;
using Outputs = render::VaryingSet3<render::ItemFilter, ViewFrustumPointer, RenderShadowTask::CullFunctor>;
using JobModel = render::Job::ModelIO<RenderShadowCascadeSetup, Inputs, Outputs>;
@ -147,7 +154,7 @@ public:
class RenderShadowTeardown {
public:
using Input = RenderShadowSetup::Outputs;
using Input = RenderShadowSetup::Output;
using JobModel = render::Job::ModelI<RenderShadowTeardown, Input>;
void run(const render::RenderContextPointer& renderContext, const Input& input);
};

View file

@ -10,24 +10,32 @@
//
#include "RenderViewTask.h"
#include "AssembleLightingStageTask.h"
#include "RenderShadowTask.h"
#include "RenderDeferredTask.h"
#include "RenderForwardTask.h"
void RenderViewTask::build(JobModel& task, const render::Varying& input, render::Varying& output, render::CullFunctor cullFunctor, bool isDeferred, uint8_t tagBits, uint8_t tagMask) {
// auto items = input.get<Input>();
const auto items = task.addJob<RenderFetchCullSortTask>("FetchCullSort", cullFunctor, tagBits, tagMask);
assert(items.canCast<RenderFetchCullSortTask::Output>());
// Issue the lighting model, aka the big global settings for the view
const auto lightingModel = task.addJob<MakeLightingModel>("LightingModel");
// Assemble the lighting stages current frames
const auto lightingStageFramesAndZones = task.addJob<AssembleLightingStageTask>("AssembleStages", items);
if (isDeferred) {
// Warning : the cull functor passed to the shadow pass should only be testing for LOD culling. If frustum culling
// is performed, then casters not in the view frustum will be removed, which is not what we wish.
const auto cascadeSceneBBoxes = task.addJob<RenderShadowTask>("RenderShadowTask", cullFunctor, tagBits, tagMask);
const auto renderInput = RenderDeferredTask::Input(items, cascadeSceneBBoxes).asVarying();
task.addJob<RenderDeferredTask>("RenderDeferredTask", renderInput, true);
const auto shadowTaskIn = RenderShadowTask::Input(lightingStageFramesAndZones.get<AssembleLightingStageTask::Output>().get0()[0], lightingModel).asVarying();
const auto shadowTaskOut = task.addJob<RenderShadowTask>("RenderShadowTask", shadowTaskIn, cullFunctor, tagBits, tagMask);
const auto renderInput = RenderDeferredTask::Input(items, lightingModel, lightingStageFramesAndZones, shadowTaskOut).asVarying();
task.addJob<RenderDeferredTask>("RenderDeferredTask", renderInput);
} else {
task.addJob<RenderForwardTask>("Forward", items);
const auto renderInput = RenderForwardTask::Input(items, lightingModel, lightingStageFramesAndZones).asVarying();
task.addJob<RenderForwardTask>("Forward", renderInput);
}
}

View file

@ -51,7 +51,7 @@ void ZoneRendererTask::build(JobModel& task, const Varying& input, Varying& outp
output = zoneItems;
}
void SetupZones::run(const RenderContextPointer& context, const Inputs& inputs) {
void SetupZones::run(const RenderContextPointer& context, const Input& input) {
// Grab light, background, haze, and bloom stages and clear them
auto lightStage = context->_scene->getStage<LightStage>();
assert(lightStage);
@ -70,7 +70,7 @@ void SetupZones::run(const RenderContextPointer& context, const Inputs& inputs)
bloomStage->_currentFrame.clear();
// call render over the zones to grab their components in the correct order first...
render::renderItems(context, inputs);
render::renderItems(context, input);
// Finally add the default lights and background:
lightStage->_currentFrame.pushSunLight(lightStage->getDefaultLight());

View file

@ -21,12 +21,12 @@
class SetupZones {
public:
using Inputs = render::ItemBounds;
using JobModel = render::Job::ModelI<SetupZones, Inputs>;
using Input = render::ItemBounds;
using JobModel = render::Job::ModelI<SetupZones, Input>;
SetupZones() {}
void run(const render::RenderContextPointer& context, const Inputs& inputs);
void run(const render::RenderContextPointer& context, const Input& input);
};
class ZoneRendererConfig : public render::Task::Config {
@ -51,13 +51,14 @@ public:
static const render::Selection::Name ZONES_SELECTION;
using Inputs = render::ItemBounds;
using Input = render::ItemBounds;
using Output = render::ItemBounds;
using Config = ZoneRendererConfig;
using JobModel = render::Task::ModelI<ZoneRendererTask, Inputs, Config>;
using JobModel = render::Task::ModelIO<ZoneRendererTask, Input, Output, Config>;
ZoneRendererTask() {}
void build(JobModel& task, const render::Varying& inputs, render::Varying& output);
void build(JobModel& task, const render::Varying& input, render::Varying& output);
void configure(const Config& config) { _maxDrawn = config.maxDrawn; }

View file

@ -114,7 +114,8 @@ namespace render {
class SelectItems {
public:
using Inputs = VaryingSet3<ItemBounds, ItemBounds, std::string>;
using JobModel = Job::ModelIO<SelectItems, Inputs, ItemBounds>;
using Outputs = ItemBounds;
using JobModel = Job::ModelIO<SelectItems, Inputs, Outputs>;
std::string _name;
SelectItems() {}

View file

@ -70,5 +70,13 @@ void RenderFetchCullSortTask::build(JobModel& task, const Varying& input, Varyin
const auto overlayTransparents = task.addJob<DepthSortItems>("DepthSortOverlayTransparent", filteredNonspatialBuckets[TRANSPARENT_SHAPE_BUCKET], DepthSortItems(false));
const auto background = filteredNonspatialBuckets[BACKGROUND_BUCKET];
output = Output(BucketList{ opaques, transparents, lights, metas, overlayOpaques, overlayTransparents, background }, spatialSelection);
// split up the overlays into 3D front, hud
const auto filteredOverlaysOpaque = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredOpaque", overlayOpaques, ItemKey::Layer::LAYER_1);
const auto filteredOverlaysTransparent = task.addJob<FilterLayeredItems>("FilterOverlaysLayeredTransparent", overlayTransparents, ItemKey::Layer::LAYER_1);
output = Output(BucketList{ opaques, transparents, lights, metas, overlayOpaques, overlayTransparents,
filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(0), filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(0),
filteredOverlaysOpaque.getN<FilterLayeredItems::Outputs>(1), filteredOverlaysTransparent.getN<FilterLayeredItems::Outputs>(1),
background }, spatialSelection);
}

View file

@ -25,6 +25,11 @@ public:
META,
OVERLAY_OPAQUE_SHAPE,
OVERLAY_TRANSPARENT_SHAPE,
LAYER_FRONT_OPAQUE_SHAPE,
LAYER_FRONT_TRANSPARENT_SHAPE,
LAYER_HUD_OPAQUE_SHAPE,
LAYER_HUD_TRANSPARENT_SHAPE,
BACKGROUND,
NUM_BUCKETS

View file

@ -29,5 +29,3 @@ void TaskFlow::abortTask() {
bool TaskFlow::doAbortTask() const {
return _doAbortTask;
}

View file

@ -146,7 +146,7 @@ public:
Concept(name, config),
_data(Data(std::forward<A>(args)...)),
_input(input),
_output(Output()) {
_output(Output(), name + ".o") {
applyConfiguration();
}
@ -419,6 +419,7 @@ protected:
template < typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6 > using VaryingSet7 = task::VaryingSet7<T0, T1, T2, T3, T4, T5, T6>; \
template < typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7 > using VaryingSet8 = task::VaryingSet8<T0, T1, T2, T3, T4, T5, T6, T7>; \
template < typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8 > using VaryingSet9 = task::VaryingSet9<T0, T1, T2, T3, T4, T5, T6, T7, T8>; \
template < typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9 > using VaryingSet10 = task::VaryingSet10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>; \
template < class T, int NUM > using VaryingArray = task::VaryingArray<T, NUM>;

View file

@ -12,10 +12,13 @@
#ifndef hifi_task_Varying_h
#define hifi_task_Varying_h
#include <type_traits>
#include <tuple>
#include <array>
namespace task {
class Varying;
// A varying piece of data, to be used as Job/Task I/O
class Varying {
@ -23,15 +26,16 @@ public:
Varying() {}
Varying(const Varying& var) : _concept(var._concept) {}
Varying& operator=(const Varying& var) {
_concept = var._concept;
_concept = var._concept;
return (*this);
}
template <class T> Varying(const T& data) : _concept(std::make_shared<Model<T>>(data)) {}
template <class T> Varying(const T& data, const std::string& name = "noname") : _concept(std::make_shared<Model<T>>(data, name)) {}
template <class T> bool canCast() const { return !!std::dynamic_pointer_cast<Model<T>>(_concept); }
template <class T> const T& get() const { return std::static_pointer_cast<const Model<T>>(_concept)->_data; }
template <class T> T& edit() { return std::static_pointer_cast<Model<T>>(_concept)->_data; }
const std::string name() const { return _concept->name(); }
// access potential sub varyings contained in this one.
Varying operator[] (uint8_t index) const { return (*_concept)[index]; }
@ -45,23 +49,30 @@ public:
protected:
class Concept {
public:
Concept(const std::string& name) : _name(name) {}
virtual ~Concept() = default;
virtual Varying operator[] (uint8_t index) const = 0;
virtual uint8_t length() const = 0;
const std::string name() { return _name; }
const std::string _name;
};
template <class T> class Model : public Concept {
public:
using Data = T;
Model(const Data& data) : _data(data) {}
Model(const Data& data, const std::string& name) : Concept(name), _data(data) {}
virtual ~Model() = default;
virtual Varying operator[] (uint8_t index) const override {
Varying var;
return var;
return Varying();
}
virtual uint8_t length() const override {
return 0;
}
virtual uint8_t length() const override { return 0; }
Data _data;
};
@ -69,11 +80,10 @@ protected:
std::shared_ptr<Concept> _concept;
};
using VaryingPairBase = std::pair<Varying, Varying>;
template < typename T0, typename T1 >
class VaryingSet2 : public VaryingPairBase {
class VaryingSet2 : public std::pair<Varying, Varying> {
public:
using Parent = VaryingPairBase;
using Parent = std::pair<Varying, Varying>;
typedef void is_proxy_tag;
VaryingSet2() : Parent(Varying(T0()), Varying(T1())) {}
@ -98,7 +108,6 @@ public:
Varying asVarying() const { return Varying((*this)); }
};
template <class T0, class T1, class T2>
class VaryingSet3 : public std::tuple<Varying, Varying,Varying>{
public:
@ -168,7 +177,6 @@ public:
Varying asVarying() const { return Varying((*this)); }
};
template <class T0, class T1, class T2, class T3, class T4>
class VaryingSet5 : public std::tuple<Varying, Varying, Varying, Varying, Varying>{
public:
@ -289,6 +297,26 @@ public:
const T6& get6() const { return std::get<6>((*this)).template get<T6>(); }
T6& edit6() { return std::get<6>((*this)).template edit<T6>(); }
virtual Varying operator[] (uint8_t index) const {
switch (index) {
default:
return std::get<0>((*this));
case 1:
return std::get<1>((*this));
case 2:
return std::get<2>((*this));
case 3:
return std::get<3>((*this));
case 4:
return std::get<4>((*this));
case 5:
return std::get<5>((*this));
case 6:
return std::get<6>((*this));
};
}
virtual uint8_t length() const { return 7; }
Varying asVarying() const { return Varying((*this)); }
};
@ -325,6 +353,28 @@ public:
const T7& get7() const { return std::get<7>((*this)).template get<T7>(); }
T7& edit7() { return std::get<7>((*this)).template edit<T7>(); }
virtual Varying operator[] (uint8_t index) const {
switch (index) {
default:
return std::get<0>((*this));
case 1:
return std::get<1>((*this));
case 2:
return std::get<2>((*this));
case 3:
return std::get<3>((*this));
case 4:
return std::get<4>((*this));
case 5:
return std::get<5>((*this));
case 6:
return std::get<6>((*this));
case 7:
return std::get<7>((*this));
};
}
virtual uint8_t length() const { return 8; }
Varying asVarying() const { return Varying((*this)); }
};
@ -363,6 +413,98 @@ public:
const T8& get8() const { return std::get<8>((*this)).template get<T8>(); }
T8& edit8() { return std::get<8>((*this)).template edit<T8>(); }
virtual Varying operator[] (uint8_t index) const {
switch (index) {
default:
return std::get<0>((*this));
case 1:
return std::get<1>((*this));
case 2:
return std::get<2>((*this));
case 3:
return std::get<3>((*this));
case 4:
return std::get<4>((*this));
case 5:
return std::get<5>((*this));
case 6:
return std::get<6>((*this));
case 7:
return std::get<7>((*this));
case 8:
return std::get<8>((*this));
};
}
virtual uint8_t length() const { return 9; }
Varying asVarying() const { return Varying((*this)); }
};
template <class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7, class T8, class T9>
class VaryingSet10 : public std::tuple<Varying, Varying, Varying, Varying, Varying, Varying, Varying, Varying, Varying, Varying> {
public:
using Parent = std::tuple<Varying, Varying, Varying, Varying, Varying, Varying, Varying, Varying, Varying, Varying>;
VaryingSet10() : Parent(Varying(T0()), Varying(T1()), Varying(T2()), Varying(T3()), Varying(T4()), Varying(T5()), Varying(T6()), Varying(T7()), Varying(T8()), Varying(T9())) {}
VaryingSet10(const VaryingSet10& src) : Parent(std::get<0>(src), std::get<1>(src), std::get<2>(src), std::get<3>(src), std::get<4>(src), std::get<5>(src), std::get<6>(src), std::get<7>(src), std::get<8>(src), std::get<9>(src)) {}
VaryingSet10(const Varying& first, const Varying& second, const Varying& third, const Varying& fourth, const Varying& fifth, const Varying& sixth, const Varying& seventh, const Varying& eighth, const Varying& nine, const Varying& ten) : Parent(first, second, third, fourth, fifth, sixth, seventh, eighth, nine, ten) {}
const T0& get0() const { return std::get<0>((*this)).template get<T0>(); }
T0& edit0() { return std::get<0>((*this)).template edit<T0>(); }
const T1& get1() const { return std::get<1>((*this)).template get<T1>(); }
T1& edit1() { return std::get<1>((*this)).template edit<T1>(); }
const T2& get2() const { return std::get<2>((*this)).template get<T2>(); }
T2& edit2() { return std::get<2>((*this)).template edit<T2>(); }
const T3& get3() const { return std::get<3>((*this)).template get<T3>(); }
T3& edit3() { return std::get<3>((*this)).template edit<T3>(); }
const T4& get4() const { return std::get<4>((*this)).template get<T4>(); }
T4& edit4() { return std::get<4>((*this)).template edit<T4>(); }
const T5& get5() const { return std::get<5>((*this)).template get<T5>(); }
T5& edit5() { return std::get<5>((*this)).template edit<T5>(); }
const T6& get6() const { return std::get<6>((*this)).template get<T6>(); }
T6& edit6() { return std::get<6>((*this)).template edit<T6>(); }
const T7& get7() const { return std::get<7>((*this)).template get<T7>(); }
T7& edit7() { return std::get<7>((*this)).template edit<T7>(); }
const T8& get8() const { return std::get<8>((*this)).template get<T8>(); }
T8& edit8() { return std::get<8>((*this)).template edit<T8>(); }
const T9& get9() const { return std::get<9>((*this)).template get<T9>(); }
T9& edit9() { return std::get<9>((*this)).template edit<T9>(); }
virtual Varying operator[] (uint8_t index) const {
switch (index) {
default:
return std::get<0>((*this));
case 1:
return std::get<1>((*this));
case 2:
return std::get<2>((*this));
case 3:
return std::get<3>((*this));
case 4:
return std::get<4>((*this));
case 5:
return std::get<5>((*this));
case 6:
return std::get<6>((*this));
case 7:
return std::get<7>((*this));
case 8:
return std::get<8>((*this));
case 9:
return std::get<9>((*this));
};
}
virtual uint8_t length() const { return 10; }
Varying asVarying() const { return Varying((*this)); }
};
@ -381,6 +523,7 @@ public:
std::copy(list.begin(), list.end(), std::array<Varying, NUM>::begin());
}
};
}
#endif // hifi_task_Varying_h

View file

@ -45,6 +45,24 @@ function job_propKeys(job) {
return propKeys;
}
// Access job inputs
// return all the inputs of a job
function job_inoutKeys(job) {
var keys = Object.keys(job)
var inoutKeys = [];
for (var k=0; k < keys.length;k++) {
// Filter for relevant property
var key = keys[k]
if ((typeof job[key]) !== "function") {
if ((key == "input") || (key == "output")) {
inoutKeys.push(keys[k]);
}
}
}
return inoutKeys;
}
// Use this function to create a functor that will fill the specifed array with one entry name per task and job and it s rank
function job_list_functor(jobList, maxDepth) {
if (maxDepth === undefined) maxDepth = 100
@ -55,7 +73,7 @@ function job_list_functor(jobList, maxDepth) {
}
// Use this function to create a functor that will print the content of the Job visited calling the specified 'printout' function
function job_print_functor(printout, showProps, maxDepth) {
function job_print_functor(printout, showProps, showInOuts, maxDepth) {
if (maxDepth === undefined) maxDepth = 100
return function (job, depth, index) {
var tab = " "
@ -69,6 +87,14 @@ function job_print_functor(printout, showProps, maxDepth) {
printout(depthTab + tab + tab + typeof prop + " " + keys[p] + " " + prop);
}
}
if (showInOuts) {
printout("jsdkfkjdskflj")
var inouts = job_inoutKeys(job);
for (var p=0; p < inouts.length;p++) {
var prop = job[inouts[p]]
printout(depthTab + tab + tab + typeof prop + " " + inouts[p] + " " + prop);
}
}
return depth < maxDepth;
}
}

View file

@ -32,7 +32,7 @@ Rectangle {
Component.onCompleted: {
var message = ""
var functor = Jet.job_print_functor(function (line) { message += line + "\n"; }, false);
var functor = Jet.job_print_functor(function (line) { message += line + "\n"; }, false, true);
Jet.task_traverseTree(rootConfig, functor);
textArea.append(message);
}

View file

@ -47,8 +47,8 @@ Rectangle {
"Emissive:LightingModel:enableEmissive",
"Lightmap:LightingModel:enableLightmap",
"Background:LightingModel:enableBackground",
"Haze:LightingModel:enableHaze",
"ssao:AmbientOcclusion:enabled",
"Haze:LightingModel:enableHaze",
"ssao:LightingModel:enableAmbientOcclusion",
"Textures:LightingModel:enableMaterialTexturing"
]
HifiControls.CheckBox {
@ -93,7 +93,7 @@ Rectangle {
"Spot:LightingModel:enableSpotLight",
"Light Contour:LightingModel:showLightContour",
"Zone Stack:DrawZoneStack:enabled",
"Shadow:RenderShadowTask:enabled"
"Shadow:LightingModel:enableShadow"
]
HifiControls.CheckBox {
boxSize: 20

View file

@ -0,0 +1,13 @@
function openEngineTaskView() {
// Set up the qml ui
var qml = Script.resolvePath('engineList.qml');
var window = new OverlayWindow({
title: 'Render Engine',
source: qml,
width: 300,
height: 400
});
window.setPosition(200, 50);
//window.closed.connect(function() { Script.stop(); });
}
openEngineTaskView();

View file

@ -0,0 +1,30 @@
//
// engineList.qml
//
// Created by Sam Gateau on 12/3/2018
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
//
import QtQuick 2.7
import QtQuick.Controls 1.4
import QtQuick.Layouts 1.3
import stylesUit 1.0
import controlsUit 1.0 as HifiControls
import "../lib/jet/qml" as Jet
Item {
HifiConstants { id: hifi;}
id: render;
anchors.fill: parent
property var mainViewTask: Render.getConfig("RenderMainView")
Jet.TaskList {
rootConfig: Render
anchors.fill: render
}
}

View file

@ -0,0 +1,129 @@
"use strict";
//
// Avatars.js
// tablet-engine app
//
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
(function() {
var TABLET_BUTTON_NAME = "Avatars";
var QMLAPP_URL = Script.resolvePath("./avatars.qml");
var ICON_URL = Script.resolvePath("../../../system/assets/images/lod-i.svg");
var ACTIVE_ICON_URL = Script.resolvePath("../../../system/assets/images/lod-a.svg");
var onTablet = false; // set this to true to use the tablet, false use a floating window
var onAppScreen = false;
var tablet = Tablet.getTablet("com.highfidelity.interface.tablet.system");
var button = tablet.addButton({
text: TABLET_BUTTON_NAME,
icon: ICON_URL,
activeIcon: ACTIVE_ICON_URL
});
var hasEventBridge = false;
var onScreen = false;
var window;
function onClicked() {
if (onTablet) {
if (onAppScreen) {
tablet.gotoHomeScreen();
} else {
tablet.loadQMLSource(QMLAPP_URL);
}
} else {
if (onScreen) {
killWindow()
} else {
createWindow()
}
}
}
function createWindow() {
var qml = Script.resolvePath(QMLAPP_URL);
window = Desktop.createWindow(Script.resolvePath(QMLAPP_URL), {
title: TABLET_BUTTON_NAME,
flags: Desktop.ALWAYS_ON_TOP,
presentationMode: Desktop.PresentationMode.NATIVE,
size: {x: 400, y: 600}
});
window.closed.connect(killWindow);
window.fromQml.connect(fromQml);
onScreen = true
button.editProperties({isActive: true});
}
function killWindow() {
if (window !== undefined) {
window.closed.disconnect(killWindow);
window.fromQml.disconnect(fromQml);
window.close()
window = undefined
}
onScreen = false
button.editProperties({isActive: false})
}
function wireEventBridge(on) {
if (!tablet) {
print("Warning in wireEventBridge(): 'tablet' undefined!");
return;
}
if (on) {
if (!hasEventBridge) {
tablet.fromQml.connect(fromQml);
hasEventBridge = true;
}
} else {
if (hasEventBridge) {
tablet.fromQml.disconnect(fromQml);
hasEventBridge = false;
}
}
}
function onScreenChanged(type, url) {
if (onTablet) {
onAppScreen = (url === QMLAPP_URL);
button.editProperties({isActive: onAppScreen});
wireEventBridge(onAppScreen);
}
}
button.clicked.connect(onClicked);
tablet.screenChanged.connect(onScreenChanged);
Script.scriptEnding.connect(function () {
killWindow()
if (onAppScreen) {
tablet.gotoHomeScreen();
}
button.clicked.disconnect(onClicked);
tablet.screenChanged.disconnect(onScreenChanged);
tablet.removeButton(button);
});
function fromQml(message) {
}
function sendToQml(message) {
if (onTablet) {
tablet.sendToQml(message);
} else {
if (window) {
window.sendToQml(message);
}
}
}
}());

View file

@ -0,0 +1,78 @@
//
// avatars.qml
// scripts/developer/utilities/workload
//
// Created by Sam Gateau on 2018.11.28
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
//
import QtQuick 2.5
import QtQuick.Controls 1.4
import stylesUit 1.0
import controlsUit 1.0 as HifiControls
import "../lib/plotperf"
import "../render/configSlider"
Item {
id: root
anchors.fill:parent
Component.onCompleted: {
}
Component.onDestruction: {
}
Column {
id: topHeader
spacing: 8
anchors.right: parent.right
anchors.left: parent.left
}
Column {
id: stats
spacing: 4
anchors.right: parent.right
anchors.left: parent.left
anchors.top: topHeader.bottom
anchors.bottom: parent.bottom
function evalEvenHeight() {
// Why do we have to do that manually ? cannot seem to find a qml / anchor / layout mode that does that ?
var numPlots = (children.length + - 2)
return (height - topLine.height - bottomLine.height - spacing * (numPlots - 1)) / (numPlots)
}
Separator {
id: topLine
}
PlotPerf {
title: "Avatars"
height: parent.evalEvenHeight()
object: Stats
valueScale: 1
valueUnit: "num"
plots: [
{
prop: "updatedAvatarCount",
label: "updatedAvatarCount",
color: "#FFFF00"
},
{
prop: "notUpdatedAvatarCount",
label: "notUpdatedAvatarCount",
color: "#00FF00"
}
]
}
Separator {
id: bottomLine
}
}
}