Merge branch 'master' of https://github.com/highfidelity/hifi into punk

This commit is contained in:
samcake 2018-05-21 15:52:51 -07:00
commit f536f7b6c9
26 changed files with 466 additions and 315 deletions

View file

@ -25,6 +25,7 @@ public class UserStoryDomainProvider implements DomainProvider {
private static final String INCLUDE_ACTIONS_FOR_PLACES = "concurrency";
private static final String INCLUDE_ACTIONS_FOR_FULL_SEARCH = "concurrency,announcements,snapshot";
private static final String TAGS_TO_SEARCH = "mobile";
private static final int MAX_PAGES_TO_GET = 10;
private String mProtocol;
@ -78,6 +79,7 @@ public class UserStoryDomainProvider implements DomainProvider {
"open",
true,
mProtocol,
TAGS_TO_SEARCH,
pageNumber);
Log.d("API-USER-STORY-DOMAINS", "Protocol [" + mProtocol + "] include_actions [" + INCLUDE_ACTIONS_FOR_PLACES + "]");
userStories.enqueue(new retrofit2.Callback<UserStories>() {
@ -152,45 +154,13 @@ public class UserStoryDomainProvider implements DomainProvider {
}
}
public void retrieveNot(DomainCallback domainCallback) {
// TODO Call multiple pages
Call<UserStories> userStories = mUserStoryDomainProviderService.getUserStories(
INCLUDE_ACTIONS_FOR_PLACES,
"open",
true,
mProtocol,
1);
Log.d("API-USER-STORY-DOMAINS", "Protocol [" + mProtocol + "] include_actions [" + INCLUDE_ACTIONS_FOR_PLACES + "]");
userStories.enqueue(new retrofit2.Callback<UserStories>() {
@Override
public void onResponse(Call<UserStories> call, Response<UserStories> response) {
UserStories userStories = response.body();
if (userStories == null) {
domainCallback.retrieveOk(new ArrayList<>(0));
}
List<DomainAdapter.Domain> domains = new ArrayList<>(userStories.total_entries);
userStories.user_stories.forEach(userStory -> {
domains.add(userStory.toDomain());
});
domainCallback.retrieveOk(domains);
}
@Override
public void onFailure(Call<UserStories> call, Throwable t) {
domainCallback.retrieveError(new Exception(t), t.getMessage());
}
});
}
public interface UserStoryDomainProviderService {
@GET("api/v1/user_stories")
Call<UserStories> getUserStories(@Query("include_actions") String includeActions,
@Query("restriction") String restriction,
@Query("require_online") boolean requireOnline,
@Query("protocol") String protocol,
@Query("tags") String tagsCommaSeparated,
@Query("page") int pageNumber);
}

View file

@ -33,6 +33,10 @@
#include "ModelBakingLoggingCategory.h"
#include "TextureBaker.h"
#ifdef HIFI_DUMP_FBX
#include "FBXToJSON.h"
#endif
void FBXBaker::bake() {
qDebug() << "FBXBaker" << _modelURL << "bake starting";
@ -187,6 +191,21 @@ void FBXBaker::importScene() {
qCDebug(model_baking) << "Parsing" << _modelURL;
_rootNode = reader._rootNode = reader.parseFBX(&fbxFile);
#ifdef HIFI_DUMP_FBX
{
FBXToJSON fbxToJSON;
fbxToJSON << _rootNode;
QFileInfo modelFile(_originalModelFilePath);
QString outFilename(_bakedOutputDir + "/" + modelFile.completeBaseName() + "_FBX.json");
QFile jsonFile(outFilename);
if (jsonFile.open(QIODevice::WriteOnly)) {
jsonFile.write(fbxToJSON.str().c_str(), fbxToJSON.str().length());
jsonFile.close();
}
}
#endif
_geometry = reader.extractFBXGeometry({}, _modelURL.toString());
_textureContentMap = reader._textureContent;
}
@ -231,39 +250,40 @@ void FBXBaker::rewriteAndBakeSceneModels() {
} else if (hasWarnings()) {
continue;
}
}
objectChild.children.push_back(dracoMeshNode);
} else {
objectChild.children.push_back(dracoMeshNode);
static const std::vector<QString> nodeNamesToDelete {
// Node data that is packed into the draco mesh
"Vertices",
"PolygonVertexIndex",
"LayerElementNormal",
"LayerElementColor",
"LayerElementUV",
"LayerElementMaterial",
"LayerElementTexture",
static const std::vector<QString> nodeNamesToDelete {
// Node data that is packed into the draco mesh
"Vertices",
"PolygonVertexIndex",
"LayerElementNormal",
"LayerElementColor",
"LayerElementUV",
"LayerElementMaterial",
"LayerElementTexture",
// Node data that we don't support
"Edges",
"LayerElementTangent",
"LayerElementBinormal",
"LayerElementSmoothing"
};
auto& children = objectChild.children;
auto it = children.begin();
while (it != children.end()) {
auto begin = nodeNamesToDelete.begin();
auto end = nodeNamesToDelete.end();
if (find(begin, end, it->name) != end) {
it = children.erase(it);
} else {
++it;
// Node data that we don't support
"Edges",
"LayerElementTangent",
"LayerElementBinormal",
"LayerElementSmoothing"
};
auto& children = objectChild.children;
auto it = children.begin();
while (it != children.end()) {
auto begin = nodeNamesToDelete.begin();
auto end = nodeNamesToDelete.end();
if (find(begin, end, it->name) != end) {
it = children.erase(it);
} else {
++it;
}
}
}
}
}
} // Geometry Object
} // foreach root child
}
}
}

View file

@ -52,7 +52,6 @@ private:
void embedTextureMetaData();
void rewriteAndBakeSceneModels();
void rewriteAndBakeSceneTextures();
void exportScene();
FBXGeometry* _geometry;
QHash<QString, int> _textureNameMatchCount;

View file

@ -24,6 +24,10 @@
#include <draco/mesh/triangle_soup_mesh_builder.h>
#include <draco/compression/encode.h>
#ifdef HIFI_DUMP_FBX
#include "FBXToJSON.h"
#endif
#ifdef _WIN32
#pragma warning( pop )
#endif
@ -605,5 +609,19 @@ void ModelBaker::exportScene() {
_outputFiles.push_back(_bakedModelFilePath);
#ifdef HIFI_DUMP_FBX
{
FBXToJSON fbxToJSON;
fbxToJSON << _rootNode;
QFileInfo modelFile(_bakedModelFilePath);
QString outFilename(modelFile.dir().absolutePath() + "/" + modelFile.completeBaseName() + "_FBX.json");
QFile jsonFile(outFilename);
if (jsonFile.open(QIODevice::WriteOnly)) {
jsonFile.write(fbxToJSON.str().c_str(), fbxToJSON.str().length());
jsonFile.close();
}
}
#endif
qCDebug(model_baking) << "Exported" << _modelURL << "with re-written paths to" << _bakedModelFilePath;
}

View file

@ -28,8 +28,10 @@
#include <graphics/Geometry.h>
#include <graphics/Material.h>
static const QByteArray FBX_BINARY_PROLOG = "Kaydara FBX Binary ";
// See comment in FBXReader::parseFBX().
static const int FBX_HEADER_BYTES_BEFORE_VERSION = 23;
static const QByteArray FBX_BINARY_PROLOG("Kaydara FBX Binary ");
static const QByteArray FBX_BINARY_PROLOG2("\0\x1a\0", 3);
static const quint32 FBX_VERSION_2015 = 7400;
static const quint32 FBX_VERSION_2016 = 7500;

View file

@ -1603,7 +1603,7 @@ FBXGeometry* FBXReader::extractFBXGeometry(const QVariantHash& mapping, const QS
// NOTE: shapeVertices are in joint-frame
std::vector<ShapeVertices> shapeVertices;
shapeVertices.resize(geometry.joints.size());
shapeVertices.resize(std::max(1, geometry.joints.size()) );
// find our special joints
geometry.leftEyeJointIndex = modelIDs.indexOf(jointEyeLeftID);

View file

@ -214,10 +214,7 @@ FBXNode parseBinaryFBXNode(QDataStream& in, int& position, bool has64BitPosition
while (endOffset > position) {
FBXNode child = parseBinaryFBXNode(in, position, has64BitPositions);
if (child.name.isNull()) {
return node;
} else {
if (!child.name.isNull()) {
node.children.append(child);
}
}

View file

@ -0,0 +1,114 @@
//
// FBXToJSON.cpp
// libraries/fbx/src
//
// Created by Simon Walton on 5/4/2018.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "FBXToJSON.h"
#include "FBX.h"
using std::string;
template<typename T>
inline FBXToJSON& FBXToJSON::operator<<(const QVector<T>& arrayProp) {
*this << "[";
char comma = ' ';
for (auto& prop : arrayProp) {
*(std::ostringstream*)this << comma << prop;
comma = ',';
}
*this << "] ";
if (arrayProp.size() > 4) {
*this << "// " << arrayProp.size() << " items";
}
*this << '\n';
return *this;
}
FBXToJSON& FBXToJSON::operator<<(const FBXNode& fbxNode) {
string nodeName(fbxNode.name);
if (nodeName.empty()) {
nodeName = "node";
} else {
nodeName = stringEscape(nodeName);
}
*this << string(_indentLevel * 4, ' ') << '"' << nodeName << "\": {\n";
++_indentLevel;
int p = 0;
const char* eol = "";
for (auto& prop : fbxNode.properties) {
*this << eol << string(_indentLevel * 4, ' ') << "\"p" << p++ << "\": ";
switch (prop.userType()) {
case QMetaType::Short:
case QMetaType::Bool:
case QMetaType::Int:
case QMetaType::LongLong:
case QMetaType::Double:
case QMetaType::Float:
*this << prop.toString().toStdString();
break;
case QMetaType::QString:
case QMetaType::QByteArray:
*this << '"' << stringEscape(prop.toByteArray().toStdString()) << '"';
break;
default:
if (prop.canConvert<QVector<float>>()) {
*this << prop.value<QVector<float>>();
} else if (prop.canConvert<QVector<double>>()) {
*this << prop.value<QVector<double>>();
} else if (prop.canConvert<QVector<bool>>()) {
*this << prop.value<QVector<bool>>();
} else if (prop.canConvert<QVector<qint32>>()) {
*this << prop.value<QVector<qint32>>();
} else if (prop.canConvert<QVector<qint64>>()) {
*this << prop.value<QVector<qint64>>();
} else {
*this << "<unimplemented value>";
}
break;
}
eol = ",\n";
}
for (auto& child : fbxNode.children) {
*this << eol;
*this << child;
eol = ",\n";
}
*this << "\n" << string(_indentLevel * 4, ' ') << "}";
--_indentLevel;
return *this;
}
string FBXToJSON::stringEscape(const string& in) {
string out;
out.reserve(in.length());
for (unsigned char inChar: in) {
if (inChar == '"') {
out.append(R"(\")");
}
else if (inChar == '\\') {
out.append(R"(\\)");
}
else if (inChar < 0x20 || inChar == 0x7f) {
char h[5];
sprintf(h, "\\x%02x", inChar);
out.append(h);
}
else out.append(1, inChar);
}
return out;
}

View file

@ -0,0 +1,32 @@
//
// FBXToJSON.h
// libraries/fbx/src
//
// Created by Simon Walton on 5/4/2018.
// Copyright 2018 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_FBXToJSON_h
#define hifi_FBXToJSON_h
#include <sstream>
#include <string>
// Forward declarations.
class FBXNode;
template<typename T> class QVector;
class FBXToJSON : public std::ostringstream {
public:
FBXToJSON& operator<<(const FBXNode& fbxNode);
private:
template<typename T> FBXToJSON& operator<<(const QVector<T>& arrayProp);
static std::string stringEscape(const std::string& in);
int _indentLevel { 0 };
};
#endif // hifi_FBXToJSON_h

View file

@ -62,8 +62,7 @@ QByteArray FBXWriter::encodeFBX(const FBXNode& root) {
out.setVersion(QDataStream::Qt_4_5);
out.writeRawData(FBX_BINARY_PROLOG, FBX_BINARY_PROLOG.size());
auto bytes = QByteArray(FBX_HEADER_BYTES_BEFORE_VERSION - FBX_BINARY_PROLOG.size(), '\0');
out.writeRawData(bytes, bytes.size());
out.writeRawData(FBX_BINARY_PROLOG2, FBX_BINARY_PROLOG2.size());
#ifdef USE_FBX_2016_FORMAT
out << FBX_VERSION_2016;

View file

@ -159,9 +159,13 @@ GLExternalTexture::~GLExternalTexture() {
}
GLVariableAllocationSupport::GLVariableAllocationSupport() {
Backend::textureResourceCount.increment();
}
GLVariableAllocationSupport::~GLVariableAllocationSupport() {
Backend::textureResourceCount.decrement();
Backend::textureResourceGPUMemSize.update(_size, 0);
Backend::textureResourcePopulatedGPUMemSize.update(_populatedSize, 0);
}
void GLVariableAllocationSupport::incrementPopulatedSize(Size delta) const {
@ -235,7 +239,6 @@ TransferJob::TransferJob(const Texture& texture,
_transferLambda = [=](const TexturePointer& texture) {
if (_mipData) {
auto gltexture = Backend::getGPUObject<GLTexture>(*texture);
;
gltexture->copyMipFaceLinesFromTexture(targetMip, face, transferDimensions, lineOffset, internalFormat, format,
type, _mipData->size(), _mipData->readData());
_mipData.reset();
@ -246,8 +249,8 @@ TransferJob::TransferJob(const Texture& texture,
};
}
TransferJob::TransferJob(const std::function<void()>& transferLambda) :
_bufferingRequired(false), _transferLambda([=](const TexturePointer&) { transferLambda(); }) {}
TransferJob::TransferJob(uint16_t sourceMip, const std::function<void()>& transferLambda) :
_sourceMip(sourceMip), _bufferingRequired(false), _transferLambda([=](const TexturePointer&) { transferLambda(); }) {}
TransferJob::~TransferJob() {
Backend::texturePendingGPUTransferMemSize.update(_transferSize, 0);

View file

@ -70,14 +70,16 @@ private:
Texture::PixelsPointer _mipData;
size_t _transferOffset{ 0 };
size_t _transferSize{ 0 };
uint16_t _sourceMip{ 0 };
bool _bufferingRequired{ true };
Lambda _transferLambda{ [](const TexturePointer&) {} };
Lambda _bufferingLambda{ [](const TexturePointer&) {} };
public:
TransferJob(const TransferJob& other) = delete;
TransferJob(const std::function<void()>& transferLambda);
TransferJob(uint16_t sourceMip, const std::function<void()>& transferLambda);
TransferJob(const Texture& texture, uint16_t sourceMip, uint16_t targetMip, uint8_t face, uint32_t lines = 0, uint32_t lineOffset = 0);
~TransferJob();
const uint16_t& sourceMip() const { return _sourceMip; }
const size_t& size() const { return _transferSize; }
bool bufferingRequired() const { return _bufferingRequired; }
void buffer(const TexturePointer& texture) { _bufferingLambda(texture); }
@ -96,6 +98,7 @@ public:
virtual void populateTransferQueue(TransferQueue& pendingTransfers) = 0;
void sanityCheck() const;
uint16 populatedMip() const { return _populatedMip; }
bool canPromote() const { return _allocatedMip > _minAllocatedMip; }
bool canDemote() const { return _allocatedMip < _maxAllocatedMip; }
bool hasPendingTransfers() const { return _populatedMip > _allocatedMip; }
@ -109,7 +112,6 @@ public:
static const size_t MAX_BUFFER_SIZE;
protected:
// THe amount of memory currently allocated
Size _size { 0 };

View file

@ -43,7 +43,6 @@ using QueuePair = std::pair<TextureWeakPointer, float>;
// Uses a weak pointer to the texture to avoid keeping it in scope if the client stops using it
using WorkQueue = std::priority_queue<QueuePair, std::vector<QueuePair>, LessPairSecond<QueuePair>>;
using ImmediateQueuePair = std::pair<TexturePointer, float>;
// Contains a priority sorted list of textures on which work is to be done in the current frame
using ImmediateWorkQueue = std::priority_queue<ImmediateQueuePair, std::vector<ImmediateQueuePair>, LessPairSecond<ImmediateQueuePair>>;
@ -53,9 +52,10 @@ using TransferMap = std::map<TextureWeakPointer, TransferQueue, std::owner_less<
class GLTextureTransferEngineDefault : public GLTextureTransferEngine {
using Parent = GLTextureTransferEngine;
public:
// Called once per frame by the GLBackend to manage texture memory
// Will deallocate textures if oversubscribed,
// Will deallocate textures if oversubscribed,
void manageMemory() override;
void shutdown() override;
@ -92,11 +92,11 @@ protected:
private:
std::atomic<bool> _shutdown{ false };
// Contains a priority sorted list of weak texture pointers that have been determined to be eligible for additional allocation
// While the memory state is 'undersubscribed', items will be removed from this list and processed, allocating additional memory
// While the memory state is 'undersubscribed', items will be removed from this list and processed, allocating additional memory
// per frame
WorkQueue _promoteQueue;
// This queue contains jobs that will buffer data from the texture backing store (ideally a memory mapped KTX file)
// to a CPU memory buffer. This queue is populated on the main GPU thread, and drained on a dedicated thread.
// to a CPU memory buffer. This queue is populated on the main GPU thread, and drained on a dedicated thread.
// When an item on the _activeBufferQueue is completed it is put into the _activeTransferQueue
ActiveTransferQueue _activeBufferQueue;
// This queue contains jobs that will upload data from a CPU buffer into a GPU. This queue is populated on the background
@ -129,16 +129,19 @@ void GLBackend::killTextureManagementStage() {
}
std::vector<TexturePointer> GLTextureTransferEngine::getAllTextures() {
std::remove_if(_registeredTextures.begin(), _registeredTextures.end(), [&](const std::weak_ptr<Texture>& weak) -> bool {
return weak.expired();
});
std::vector<TexturePointer> result;
result.reserve(_registeredTextures.size());
std::remove_if(_registeredTextures.begin(), _registeredTextures.end(), [&](const std::weak_ptr<Texture>& weak)->bool {
for (const auto& weak : _registeredTextures) {
auto strong = weak.lock();
bool strongResult = strong.operator bool();
if (strongResult) {
result.push_back(strong);
if (!strong) {
continue;
}
return strongResult;
});
result.push_back(strong);
}
return result;
}
@ -158,13 +161,12 @@ void GLTextureTransferEngineDefault::shutdown() {
#endif
}
void GLTextureTransferEngineDefault::manageMemory() {
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
// reset the count used to limit the number of textures created per frame
resetFrameTextureCreated();
// Determine the current memory management state. It will be either idle (no work to do),
// undersubscribed (need to do more allocation) or transfer (need to upload content from the
// undersubscribed (need to do more allocation) or transfer (need to upload content from the
// backing store to the GPU
updateMemoryPressure();
if (MemoryPressureState::Undersubscribed == _memoryPressureState) {
@ -176,7 +178,7 @@ void GLTextureTransferEngineDefault::manageMemory() {
}
}
// Each frame we will check if our memory pressure state has changed.
// Each frame we will check if our memory pressure state has changed.
void GLTextureTransferEngineDefault::updateMemoryPressure() {
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
@ -225,7 +227,6 @@ void GLTextureTransferEngineDefault::updateMemoryPressure() {
return;
}
auto newState = MemoryPressureState::Idle;
if (pressure < UNDERSUBSCRIBED_PRESSURE_VALUE && (unallocated != 0 && canPromote)) {
newState = MemoryPressureState::Undersubscribed;
@ -270,11 +271,10 @@ void GLTextureTransferEngineDefault::processTransferQueues() {
}
#endif
// From the pendingTransferMap, queue jobs into the _activeBufferQueue
// Doing so will lock the weak texture pointer so that it can't be destroyed
// Doing so will lock the weak texture pointer so that it can't be destroyed
// while the background thread is working.
//
//
// This will queue jobs until _queuedBufferSize can't be increased without exceeding
// GLVariableAllocationTexture::MAX_BUFFER_SIZE or there is no more work to be done
populateActiveBufferQueue();
@ -294,15 +294,19 @@ void GLTextureTransferEngineDefault::processTransferQueues() {
while (!activeTransferQueue.empty()) {
const auto& activeTransferJob = activeTransferQueue.front();
const auto& texturePointer = activeTransferJob.first;
GLTexture* gltexture = Backend::getGPUObject<GLTexture>(*texturePointer);
GLVariableAllocationSupport* vargltexture = dynamic_cast<GLVariableAllocationSupport*>(gltexture);
const auto& tranferJob = activeTransferJob.second;
tranferJob->transfer(texturePointer);
if (tranferJob->sourceMip() < vargltexture->populatedMip()) {
tranferJob->transfer(texturePointer);
}
// The pop_front MUST be the last call since all of these varaibles in scope are
// references that will be invalid after the pop
activeTransferQueue.pop_front();
}
}
// If we have no more work in any of the structures, reset the memory state to idle to
// If we have no more work in any of the structures, reset the memory state to idle to
// force reconstruction of the _pendingTransfersMap if necessary
{
Lock lock(_bufferMutex);
@ -322,7 +326,7 @@ void GLTextureTransferEngineDefault::populateActiveBufferQueue() {
ActiveTransferQueue newBufferJobs;
size_t newTransferSize{ 0 };
for (auto itr = _pendingTransfersMap.begin(); itr != _pendingTransfersMap.end(); ) {
for (auto itr = _pendingTransfersMap.begin(); itr != _pendingTransfersMap.end();) {
const auto& weakTexture = itr->first;
const auto texture = weakTexture.lock();
@ -384,7 +388,7 @@ bool GLTextureTransferEngineDefault::processActiveBufferQueue() {
for (const auto& activeJob : activeBufferQueue) {
const auto& texture = activeJob.first;
const auto& transferJob = activeJob.second;
// Some jobs don't require buffering, but they pass through this queue to ensure that we don't re-order
// Some jobs don't require buffering, but they pass through this queue to ensure that we don't re-order
// the buffering & transfer operations. All jobs in the queue must be processed in order.
if (!transferJob->bufferingRequired()) {
continue;
@ -488,14 +492,14 @@ void GLTextureTransferEngineDefault::processDemotes(size_t reliefRequired, const
// FIXME hack for stats display
QString getTextureMemoryPressureModeString() {
switch (_memoryPressureState) {
case MemoryPressureState::Undersubscribed:
return "Undersubscribed";
case MemoryPressureState::Undersubscribed:
return "Undersubscribed";
case MemoryPressureState::Transfer:
return "Transfer";
case MemoryPressureState::Transfer:
return "Transfer";
case MemoryPressureState::Idle:
return "Idle";
case MemoryPressureState::Idle:
return "Idle";
}
Q_UNREACHABLE();
return "Unknown";

View file

@ -279,8 +279,6 @@ using GL41VariableAllocationTexture = GL41Backend::GL41VariableAllocationTexture
GL41VariableAllocationTexture::GL41VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) :
GL41Texture(backend, texture)
{
Backend::textureResourceCount.increment();
auto mipLevels = texture.getNumMips();
_allocatedMip = mipLevels;
_maxAllocatedMip = _populatedMip = mipLevels;
@ -303,9 +301,6 @@ GL41VariableAllocationTexture::GL41VariableAllocationTexture(const std::weak_ptr
}
GL41VariableAllocationTexture::~GL41VariableAllocationTexture() {
Backend::textureResourceCount.decrement();
Backend::textureResourceGPUMemSize.update(_size, 0);
Backend::textureResourcePopulatedGPUMemSize.update(_populatedSize, 0);
}
void GL41VariableAllocationTexture::allocateStorage(uint16 allocatedMip) {
@ -605,7 +600,7 @@ void GL41VariableAllocationTexture::populateTransferQueue(TransferQueue& pending
}
// queue up the sampler and populated mip change for after the transfer has completed
pendingTransfers.emplace(new TransferJob([=] {
pendingTransfers.emplace(new TransferJob(sourceMip, [=] {
_populatedMip = sourceMip;
incrementPopulatedSize(_gpuObject.evalMipSize(sourceMip));
sanityCheck();

View file

@ -31,13 +31,9 @@ using GL45Texture = GL45Backend::GL45Texture;
using GL45VariableAllocationTexture = GL45Backend::GL45VariableAllocationTexture;
GL45VariableAllocationTexture::GL45VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45Texture(backend, texture) {
Backend::textureResourceCount.increment();
}
GL45VariableAllocationTexture::~GL45VariableAllocationTexture() {
Backend::textureResourceCount.decrement();
Backend::textureResourceGPUMemSize.update(_size, 0);
Backend::textureResourcePopulatedGPUMemSize.update(_populatedSize, 0);
}
#if GPU_BINDLESS_TEXTURES
@ -286,7 +282,7 @@ void GL45ResourceTexture::populateTransferQueue(TransferQueue& pendingTransfers)
}
// queue up the sampler and populated mip change for after the transfer has completed
pendingTransfers.emplace(new TransferJob([=] {
pendingTransfers.emplace(new TransferJob(sourceMip, [=] {
_populatedMip = sourceMip;
incrementPopulatedSize(_gpuObject.evalMipSize(sourceMip));
sanityCheck();

View file

@ -341,8 +341,6 @@ using GLESVariableAllocationTexture = GLESBackend::GLESVariableAllocationTexture
GLESVariableAllocationTexture::GLESVariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) :
GLESTexture(backend, texture)
{
Backend::textureResourceCount.increment();
auto mipLevels = texture.getNumMips();
_allocatedMip = mipLevels;
_maxAllocatedMip = _populatedMip = mipLevels;
@ -366,9 +364,6 @@ GLESVariableAllocationTexture::GLESVariableAllocationTexture(const std::weak_ptr
}
GLESVariableAllocationTexture::~GLESVariableAllocationTexture() {
Backend::textureResourceCount.decrement();
Backend::textureResourceGPUMemSize.update(_size, 0);
Backend::textureResourcePopulatedGPUMemSize.update(_populatedSize, 0);
}
void GLESVariableAllocationTexture::allocateStorage(uint16 allocatedMip) {
@ -669,7 +664,7 @@ void GLESVariableAllocationTexture::populateTransferQueue(TransferJob::Queue& qu
}
// queue up the sampler and populated mip change for after the transfer has completed
queue.emplace(new TransferJob([=] {
queue.emplace(new TransferJob(sourceMip, [=] {
_populatedMip = sourceMip;
incrementPopulatedSize(_gpuObject.evalMipSize(sourceMip));
sanityCheck();

View file

@ -19,8 +19,7 @@
#if defined(NSIGHT_FOUND)
#include "nvToolsExt.h"
ProfileRangeBatch::ProfileRangeBatch(gpu::Batch& batch, const char *name) : _batch(batch) {
ProfileRangeBatch::ProfileRangeBatch(gpu::Batch& batch, const char* name) : _batch(batch) {
_batch.pushProfileRange(name);
}
@ -38,19 +37,15 @@ static const int MAX_NUM_UNIFORM_BUFFERS = 14;
static const int MAX_NUM_RESOURCE_BUFFERS = 16;
static const int MAX_NUM_RESOURCE_TEXTURES = 16;
size_t Batch::_commandsMax { BATCH_PREALLOCATE_MIN };
size_t Batch::_commandOffsetsMax { BATCH_PREALLOCATE_MIN };
size_t Batch::_paramsMax { BATCH_PREALLOCATE_MIN };
size_t Batch::_dataMax { BATCH_PREALLOCATE_MIN };
size_t Batch::_objectsMax { BATCH_PREALLOCATE_MIN };
size_t Batch::_drawCallInfosMax { BATCH_PREALLOCATE_MIN };
size_t Batch::_commandsMax{ BATCH_PREALLOCATE_MIN };
size_t Batch::_commandOffsetsMax{ BATCH_PREALLOCATE_MIN };
size_t Batch::_paramsMax{ BATCH_PREALLOCATE_MIN };
size_t Batch::_dataMax{ BATCH_PREALLOCATE_MIN };
size_t Batch::_objectsMax{ BATCH_PREALLOCATE_MIN };
size_t Batch::_drawCallInfosMax{ BATCH_PREALLOCATE_MIN };
Batch::Batch(const char* name) {
#ifdef DEBUG
if (name) {
_name = name;
}
#endif
_name = name;
_commands.reserve(_commandsMax);
_commandOffsets.reserve(_commandOffsetsMax);
_params.reserve(_paramsMax);
@ -59,38 +54,6 @@ Batch::Batch(const char* name) {
_drawCallInfos.reserve(_drawCallInfosMax);
}
Batch::Batch(const Batch& batch_) {
Batch& batch = *const_cast<Batch*>(&batch_);
#ifdef DEBUG
_name = batch_._name;
#endif
_commands.swap(batch._commands);
_commandOffsets.swap(batch._commandOffsets);
_params.swap(batch._params);
_data.swap(batch._data);
_invalidModel = batch._invalidModel;
_currentModel = batch._currentModel;
_objects.swap(batch._objects);
_currentNamedCall = batch._currentNamedCall;
_buffers._items.swap(batch._buffers._items);
_textures._items.swap(batch._textures._items);
_textureTables._items.swap(batch._textureTables._items);
_streamFormats._items.swap(batch._streamFormats._items);
_transforms._items.swap(batch._transforms._items);
_pipelines._items.swap(batch._pipelines._items);
_framebuffers._items.swap(batch._framebuffers._items);
_swapChains._items.swap(batch._swapChains._items);
_drawCallInfos.swap(batch._drawCallInfos);
_queries._items.swap(batch._queries._items);
_lambdas._items.swap(batch._lambdas._items);
_profileRanges._items.swap(batch._profileRanges._items);
_names._items.swap(batch._names._items);
_namedData.swap(batch._namedData);
_enableStereo = batch._enableStereo;
_enableSkybox = batch._enableSkybox;
}
Batch::~Batch() {
_commandsMax = std::max(_commands.size(), _commandsMax);
_commandOffsetsMax = std::max(_commandOffsets.size(), _commandOffsetsMax);
@ -100,6 +63,10 @@ Batch::~Batch() {
_drawCallInfosMax = std::max(_drawCallInfos.size(), _drawCallInfosMax);
}
void Batch::setName(const char* name) {
_name = name;
}
void Batch::clear() {
_commandsMax = std::max(_commands.size(), _commandsMax);
_commandOffsetsMax = std::max(_commandOffsets.size(), _commandOffsetsMax);
@ -110,18 +77,30 @@ void Batch::clear() {
_commands.clear();
_commandOffsets.clear();
_params.clear();
_data.clear();
_drawCallInfos.clear();
_buffers.clear();
_framebuffers.clear();
_lambdas.clear();
_names.clear();
_namedData.clear();
_objects.clear();
_params.clear();
_pipelines.clear();
_profileRanges.clear();
_queries.clear();
_swapChains.clear();
_streamFormats.clear();
_textures.clear();
_textureTables.clear();
_streamFormats.clear();
_transforms.clear();
_pipelines.clear();
_framebuffers.clear();
_swapChains.clear();
_objects.clear();
_drawCallInfos.clear();
_name = nullptr;
_invalidModel = true;
_currentModel = Transform();
_projectionJitter = glm::vec2(0.0f);
_enableStereo = true;
_enableSkybox = false;
}
size_t Batch::cacheData(size_t size, const void* data) {
@ -177,7 +156,6 @@ void Batch::drawIndexedInstanced(uint32 numInstances, Primitive primitiveType, u
captureDrawCallInfo();
}
void Batch::multiDrawIndirect(uint32 numCommands, Primitive primitiveType) {
ADD_COMMAND(multiDrawIndirect);
_params.emplace_back(numCommands);
@ -244,7 +222,6 @@ void Batch::setIndirectBuffer(const BufferPointer& buffer, Offset offset, Offset
_params.emplace_back(stride);
}
void Batch::setModelTransform(const Transform& model) {
ADD_COMMAND(setModelTransform);
@ -266,19 +243,19 @@ void Batch::setProjectionTransform(const Mat4& proj) {
}
void Batch::setProjectionJitter(float jx, float jy) {
_projectionJitter.x = jx;
_projectionJitter.y = jy;
pushProjectionJitter(jx, jy);
_projectionJitter.x = jx;
_projectionJitter.y = jy;
pushProjectionJitter(jx, jy);
}
void Batch::pushProjectionJitter(float jx, float jy) {
ADD_COMMAND(setProjectionJitter);
_params.emplace_back(jx);
_params.emplace_back(jy);
void Batch::pushProjectionJitter(float jx, float jy) {
ADD_COMMAND(setProjectionJitter);
_params.emplace_back(jx);
_params.emplace_back(jy);
}
void Batch::popProjectionJitter() {
pushProjectionJitter(_projectionJitter.x, _projectionJitter.y);
void Batch::popProjectionJitter() {
pushProjectionJitter(_projectionJitter.x, _projectionJitter.y);
}
void Batch::setViewportTransform(const Vec4i& viewport) {
@ -374,7 +351,6 @@ void Batch::setFramebuffer(const FramebufferPointer& framebuffer) {
ADD_COMMAND(setFramebuffer);
_params.emplace_back(_framebuffers.cache(framebuffer));
}
void Batch::setFramebufferSwapChain(const FramebufferSwapChainPointer& framebuffer, unsigned int swapChainIndex) {
@ -487,7 +463,7 @@ void Batch::runLambda(std::function<void()> f) {
void Batch::startNamedCall(const std::string& name) {
ADD_COMMAND(startNamedCall);
_params.emplace_back(_names.cache(name));
_currentNamedCall = name;
}
@ -556,7 +532,7 @@ void Batch::captureDrawCallInfoImpl() {
TransformObject object;
_currentModel.getMatrix(object._model);
// FIXME - we don't want to be using glm::inverse() here but it fixes the flickering issue we are
// FIXME - we don't want to be using glm::inverse() here but it fixes the flickering issue we are
// seeing with planky blocks in toybox. Our implementation of getInverseMatrix() is buggy in cases
// of non-uniform scale. We need to fix that. In the mean time, glm::inverse() works.
//_model.getInverseMatrix(_object._modelInverse);
@ -582,9 +558,9 @@ void Batch::captureDrawCallInfo() {
}
void Batch::captureNamedDrawCallInfo(std::string name) {
std::swap(_currentNamedCall, name); // Set and save _currentNamedCall
std::swap(_currentNamedCall, name); // Set and save _currentNamedCall
captureDrawCallInfoImpl();
std::swap(_currentNamedCall, name); // Restore _currentNamedCall
std::swap(_currentNamedCall, name); // Restore _currentNamedCall
}
// Debugging

View file

@ -92,9 +92,12 @@ public:
void captureNamedDrawCallInfo(std::string name);
Batch(const char* name = nullptr);
Batch(const Batch& batch);
// Disallow copy construction and assignement of batches
Batch(const Batch& batch) = delete;
Batch& operator=(const Batch& batch) = delete;
~Batch();
void setName(const char* name);
void clear();
// Batches may need to override the context level stereo settings
@ -506,10 +509,7 @@ public:
bool _enableSkybox { false };
protected:
#ifdef DEBUG
std::string _name;
#endif
const char* _name;
friend class Context;
friend class Frame;

View file

@ -47,6 +47,10 @@ Context::Context(const Context& context) {
}
Context::~Context() {
for (auto batch : _batchPool) {
delete batch;
}
_batchPool.clear();
}
const std::string& Context::getBackendVersion() const {
@ -65,7 +69,7 @@ void Context::beginFrame(const glm::mat4& renderView, const glm::mat4& renderPos
}
}
void Context::appendFrameBatch(Batch& batch) {
void Context::appendFrameBatch(const BatchPointer& batch) {
if (!_frameActive) {
qWarning() << "Batch executed outside of frame boundaries";
return;
@ -115,7 +119,7 @@ void Context::executeFrame(const FramePointer& frame) const {
// Execute the frame rendering commands
for (auto& batch : frame->batches) {
_backend->render(batch);
_backend->render(*batch);
}
Batch endBatch("Context::executeFrame::end");
@ -323,6 +327,7 @@ Size Context::getTextureExternalGPUMemSize() {
uint32_t Context::getTexturePendingGPUTransferCount() {
return Backend::texturePendingGPUTransferCount.getValue();
}
Size Context::getTexturePendingGPUTransferMemSize() {
return Backend::texturePendingGPUTransferMemSize.getValue();
}
@ -334,3 +339,34 @@ Size Context::getTextureResourcePopulatedGPUMemSize() {
Size Context::getTextureResourceIdealGPUMemSize() {
return Backend::textureResourceIdealGPUMemSize.getValue();
}
BatchPointer Context::acquireBatch(const char* name) {
Batch* rawBatch = nullptr;
{
Lock lock(_batchPoolMutex);
if (!_batchPool.empty()) {
rawBatch = _batchPool.front();
_batchPool.pop_front();
}
}
if (!rawBatch) {
rawBatch = new Batch();
}
rawBatch->setName(name);
return BatchPointer(rawBatch, [this](Batch* batch) { releaseBatch(batch); });
}
void Context::releaseBatch(Batch* batch) {
batch->clear();
Lock lock(_batchPoolMutex);
_batchPool.push_back(batch);
}
void gpu::doInBatch(const char* name,
const std::shared_ptr<gpu::Context>& context,
const std::function<void(Batch& batch)>& f) {
auto batch = context->acquireBatch(name);
f(*batch);
context->appendFrameBatch(batch);
}

View file

@ -43,17 +43,16 @@ public:
int _DSNumTriangles = 0;
int _PSNumSetPipelines = 0;
ContextStats() {}
ContextStats(const ContextStats& stats) = default;
void evalDelta(const ContextStats& begin, const ContextStats& end);
void evalDelta(const ContextStats& begin, const ContextStats& end);
};
class Backend {
public:
virtual~ Backend() {};
virtual ~Backend(){};
virtual const std::string& getVersion() const = 0;
@ -78,12 +77,11 @@ public:
TransformCamera getEyeCamera(int eye, const StereoState& stereo, const Transform& xformView, Vec2 normalizedJitter) const;
};
template<typename T, typename U>
template <typename T, typename U>
static void setGPUObject(const U& object, T* gpuObject) {
object.gpuObject.setGPUObject(gpuObject);
}
template<typename T, typename U>
template <typename T, typename U>
static T* getGPUObject(const U& object) {
return reinterpret_cast<T*>(object.gpuObject.getGPUObject());
}
@ -95,26 +93,25 @@ public:
// These should only be accessed by Backend implementation to report the buffer and texture allocations,
// they are NOT public objects
static ContextMetricSize freeGPUMemSize;
static ContextMetricSize freeGPUMemSize;
static ContextMetricCount bufferCount;
static ContextMetricSize bufferGPUMemSize;
static ContextMetricSize bufferGPUMemSize;
static ContextMetricCount textureResidentCount;
static ContextMetricCount textureFramebufferCount;
static ContextMetricCount textureResourceCount;
static ContextMetricCount textureExternalCount;
static ContextMetricSize textureResidentGPUMemSize;
static ContextMetricSize textureFramebufferGPUMemSize;
static ContextMetricSize textureResourceGPUMemSize;
static ContextMetricSize textureExternalGPUMemSize;
static ContextMetricSize textureResidentGPUMemSize;
static ContextMetricSize textureFramebufferGPUMemSize;
static ContextMetricSize textureResourceGPUMemSize;
static ContextMetricSize textureExternalGPUMemSize;
static ContextMetricCount texturePendingGPUTransferCount;
static ContextMetricSize texturePendingGPUTransferMemSize;
static ContextMetricSize textureResourcePopulatedGPUMemSize;
static ContextMetricSize textureResourceIdealGPUMemSize;
static ContextMetricSize texturePendingGPUTransferMemSize;
static ContextMetricSize textureResourcePopulatedGPUMemSize;
static ContextMetricSize textureResourceIdealGPUMemSize;
protected:
virtual bool isStereo() {
@ -144,7 +141,6 @@ public:
typedef BackendPointer (*CreateBackend)();
typedef bool (*MakeProgram)(Shader& shader, const Shader::BindingSet& bindings, const Shader::CompilationHandler& handler);
// This one call must happen before any context is created or used (Shader::MakeProgram) in order to setup the Backend and any singleton data needed
template <class T>
static void init() {
@ -161,40 +157,43 @@ public:
const std::string& getBackendVersion() const;
void beginFrame(const glm::mat4& renderView = glm::mat4(), const glm::mat4& renderPose = glm::mat4());
void appendFrameBatch(Batch& batch);
void appendFrameBatch(const BatchPointer& batch);
FramePointer endFrame();
BatchPointer acquireBatch(const char* name = nullptr);
void releaseBatch(Batch* batch);
// MUST only be called on the rendering thread
//
//
// Handle any pending operations to clean up (recycle / deallocate) resources no longer in use
void recycle() const;
// MUST only be called on the rendering thread
//
//
// Execute a batch immediately, rather than as part of a frame
void executeBatch(Batch& batch) const;
// MUST only be called on the rendering thread
//
//
// Executes a frame, applying any updates contained in the frame batches to the rendering
// thread shadow copies. Either executeFrame or consumeFrameUpdates MUST be called on every frame
// generated, IN THE ORDER they were generated.
void executeFrame(const FramePointer& frame) const;
// MUST only be called on the rendering thread.
// MUST only be called on the rendering thread.
//
// Consuming a frame applies any updates queued from the recording thread and applies them to the
// shadow copy used by the rendering thread.
// Consuming a frame applies any updates queued from the recording thread and applies them to the
// shadow copy used by the rendering thread.
//
// EVERY frame generated MUST be consumed, regardless of whether the frame is actually executed,
// or the buffer shadow copies can become unsynced from the recording thread copies.
//
//
// Consuming a frame is idempotent, as the frame encapsulates the updates and clears them out as
// it applies them, so calling it more than once on a given frame will have no effect after the
// it applies them, so calling it more than once on a given frame will have no effect after the
// first time
//
//
// This is automatically called by executeFrame, so you only need to call it if you
// This is automatically called by executeFrame, so you only need to call it if you
// have frames you aren't going to otherwise execute, for instance when a display plugin is
// being disabled, or in the null display plugin where no rendering actually occurs
void consumeFrameUpdates(const FramePointer& frame) const;
@ -212,7 +211,7 @@ public:
// It s here for convenience to easily capture a snapshot
void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage);
// Repporting stats of the context
// Repporting stats of the context
void resetStats() const;
void getStats(ContextStats& stats) const;
@ -237,7 +236,7 @@ public:
static Size getTextureGPUMemSize();
static Size getTextureResidentGPUMemSize();
static Size getTextureFramebufferGPUMemSize();
static Size getTextureResourceGPUMemSize();
static Size getTextureResourceGPUMemSize();
static Size getTextureExternalGPUMemSize();
static uint32_t getTexturePendingGPUTransferCount();
@ -250,10 +249,12 @@ protected:
Context(const Context& context);
std::shared_ptr<Backend> _backend;
bool _frameActive { false };
std::mutex _batchPoolMutex;
std::list<Batch*> _batchPool;
bool _frameActive{ false };
FramePointer _currentFrame;
RangeTimerPointer _frameRangeTimer;
StereoState _stereo;
StereoState _stereo;
// Sampled at the end of every frame, the stats of all the counters
mutable ContextStats _frameStats;
@ -261,7 +262,7 @@ protected:
// This function can only be called by "static Shader::makeProgram()"
// makeProgramShader(...) make a program shader ready to be used in a Batch.
// It compiles the sub shaders, link them and defines the Slots and their bindings.
// If the shader passed is not a program, nothing happens.
// If the shader passed is not a program, nothing happens.
static bool makeProgram(Shader& shader, const Shader::BindingSet& bindings, const Shader::CompilationHandler& handler);
static CreateBackend _createBackendCallback;
@ -273,14 +274,8 @@ protected:
};
typedef std::shared_ptr<Context> ContextPointer;
template<typename F>
void doInBatch(const char* name, std::shared_ptr<gpu::Context> context, F f) {
gpu::Batch batch(name);
f(batch);
context->appendFrameBatch(batch);
}
};
void doInBatch(const char* name, const std::shared_ptr<gpu::Context>& context, const std::function<void(Batch& batch)>& f);
}; // namespace gpu
#endif

View file

@ -21,6 +21,7 @@ namespace gpu {
using Lock = std::unique_lock<Mutex>;
class Batch;
using BatchPointer = std::shared_ptr<Batch>;
class Backend;
using BackendPointer = std::shared_ptr<Backend>;
class Context;

View file

@ -28,8 +28,8 @@ Frame::~Frame() {
}
void Frame::finish() {
for (Batch& batch : batches) {
batch.finishFrame(bufferUpdates);
for (const auto& batch : batches) {
batch->finishFrame(bufferUpdates);
}
}

View file

@ -22,7 +22,7 @@ namespace gpu {
public:
Frame();
virtual ~Frame();
using Batches = std::vector<Batch>;
using Batches = std::vector<BatchPointer>;
using FramebufferRecycler = std::function<void(const FramebufferPointer&)>;
using OverlayRecycler = std::function<void(const TexturePointer&)>;

View file

@ -767,10 +767,10 @@ bool AddressManager::handleUsername(const QString& lookupString) {
}
bool AddressManager::setHost(const QString& host, LookupTrigger trigger, quint16 port) {
if (host != _domainURL.host() || port != _domainURL.port()) {
bool hostHasChanged = QString::compare(host, _domainURL.host(), Qt::CaseInsensitive);
if (hostHasChanged || port != _domainURL.port()) {
addCurrentAddressToHistory(trigger);
bool emitHostChanged = host != _domainURL.host();
_domainURL = QUrl();
_domainURL.setScheme(URL_SCHEME_HIFI);
_domainURL.setHost(host);
@ -781,7 +781,7 @@ bool AddressManager::setHost(const QString& host, LookupTrigger trigger, quint16
// any host change should clear the shareable place name
_shareablePlaceName.clear();
if (emitHostChanged) {
if (hostHasChanged) {
emit hostChanged(host);
}

View file

@ -738,21 +738,20 @@ void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs
}
auto previousBatch = args->_batch;
gpu::Batch batch;
args->_batch = &batch;
_gpuTimer->begin(batch);
gpu::doInBatch(nullptr, args->_context, [&](gpu::Batch& batch) {
args->_batch = &batch;
_gpuTimer->begin(batch);
setupJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, haze, surfaceGeometryFramebuffer, ssaoFramebuffer, subsurfaceScatteringResource);
setupJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, haze, surfaceGeometryFramebuffer, ssaoFramebuffer, subsurfaceScatteringResource);
lightsJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, lightClusters);
lightsJob.run(renderContext, deferredTransform, deferredFramebuffer, lightingModel, surfaceGeometryFramebuffer, lightClusters);
cleanupJob.run(renderContext);
cleanupJob.run(renderContext);
_gpuTimer->end(batch);
args->_context->appendFrameBatch(batch);
_gpuTimer->end(batch);
});
args->_batch = previousBatch;
auto config = std::static_pointer_cast<Config>(renderContext->jobConfig);
config->setGPUBatchRunTime(_gpuTimer->getGPUAverage(), _gpuTimer->getBatchAverage());
}

View file

@ -707,85 +707,83 @@ void DebugLightClusters::run(const render::RenderContextPointer& renderContext,
auto args = renderContext->args;
gpu::Batch batch;
gpu::doInBatch(nullptr, args->_context, [&](gpu::Batch& batch) {
batch.enableStereo(false);
batch.enableStereo(false);
// Assign the camera transform
batch.setViewportTransform(args->_viewport);
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat, true);
// Assign the camera transform
batch.setViewportTransform(args->_viewport);
glm::mat4 projMat;
Transform viewMat;
args->getViewFrustum().evalProjectionMatrix(projMat);
args->getViewFrustum().evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat, true);
// Then the actual ClusterGrid attributes
batch.setModelTransform(Transform());
// Then the actual ClusterGrid attributes
batch.setModelTransform(Transform());
// Bind the Light CLuster data strucutre
batch.setUniformBuffer(LIGHT_GPU_SLOT, lightClusters->_lightStage->getLightArrayBuffer());
batch.setUniformBuffer(LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT, lightClusters->_frustumGridBuffer);
batch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT, lightClusters->_clusterGridBuffer);
batch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT, lightClusters->_clusterContentBuffer);
// Bind the Light CLuster data strucutre
batch.setUniformBuffer(LIGHT_GPU_SLOT, lightClusters->_lightStage->getLightArrayBuffer());
batch.setUniformBuffer(LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT, lightClusters->_frustumGridBuffer);
batch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT, lightClusters->_clusterGridBuffer);
batch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT, lightClusters->_clusterContentBuffer);
if (doDrawClusterFromDepth) {
batch.setPipeline(getDrawClusterFromDepthPipeline());
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, deferredTransform->getFrameTransformBuffer());
if (doDrawClusterFromDepth) {
batch.setPipeline(getDrawClusterFromDepthPipeline());
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, deferredTransform->getFrameTransformBuffer());
if (linearDepthTarget) {
batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, linearDepthTarget->getLinearDepthTexture());
if (linearDepthTarget) {
batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, linearDepthTarget->getLinearDepthTexture());
}
batch.draw(gpu::TRIANGLE_STRIP, 4, 0);
batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, nullptr);
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, nullptr);
}
batch.draw(gpu::TRIANGLE_STRIP, 4, 0);
if (doDrawContent) {
// bind the one gpu::Pipeline we need
batch.setPipeline(getDrawClusterContentPipeline());
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, deferredTransform->getFrameTransformBuffer());
if (linearDepthTarget) {
batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, linearDepthTarget->getLinearDepthTexture());
}
batch.draw(gpu::TRIANGLE_STRIP, 4, 0);
batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, nullptr);
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, nullptr);
}
batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, nullptr);
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, nullptr);
}
});
if (doDrawContent) {
// bind the one gpu::Pipeline we need
batch.setPipeline(getDrawClusterContentPipeline());
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, deferredTransform->getFrameTransformBuffer());
if (linearDepthTarget) {
batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, linearDepthTarget->getLinearDepthTexture());
gpu::doInBatch(nullptr, args->_context, [&](gpu::Batch& batch) {
auto& drawGridAndCleanBatch = batch;
if (doDrawGrid) {
// bind the one gpu::Pipeline we need
drawGridAndCleanBatch.setPipeline(getDrawClusterGridPipeline());
auto dims = lightClusters->_frustumGridBuffer->dims;
glm::ivec3 summedDims(dims.x * dims.y * dims.z, dims.x * dims.y, dims.x);
drawGridAndCleanBatch.drawInstanced(summedDims.x, gpu::LINES, 24, 0);
}
batch.draw(gpu::TRIANGLE_STRIP, 4, 0);
batch.setResourceTexture(DEFERRED_BUFFER_LINEAR_DEPTH_UNIT, nullptr);
batch.setUniformBuffer(DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT, nullptr);
}
drawGridAndCleanBatch.setUniformBuffer(LIGHT_GPU_SLOT, nullptr);
drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT, nullptr);
drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT, nullptr);
drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT, nullptr);
gpu::Batch drawGridAndCleanBatch;
if (doDrawGrid) {
// bind the one gpu::Pipeline we need
drawGridAndCleanBatch.setPipeline(getDrawClusterGridPipeline());
auto dims = lightClusters->_frustumGridBuffer->dims;
glm::ivec3 summedDims(dims.x*dims.y * dims.z, dims.x*dims.y, dims.x);
drawGridAndCleanBatch.drawInstanced(summedDims.x, gpu::LINES, 24, 0);
}
drawGridAndCleanBatch.setUniformBuffer(LIGHT_GPU_SLOT, nullptr);
drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT, nullptr);
drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT, nullptr);
drawGridAndCleanBatch.setUniformBuffer(LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT, nullptr);
drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);
args->_context->appendFrameBatch(batch);
args->_context->appendFrameBatch(drawGridAndCleanBatch);
drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
drawGridAndCleanBatch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);
});
}