mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-13 03:52:45 +02:00
Merge pull request #9958 from highfidelity/smarter_textures
New texture streaming system
This commit is contained in:
commit
73227f6598
86 changed files with 4684 additions and 1837 deletions
|
@ -24,7 +24,7 @@
|
|||
#include <QtCore/QString>
|
||||
|
||||
#include <SharedUtil.h>
|
||||
#include <ServerPathUtils.h>
|
||||
#include <PathUtils.h>
|
||||
|
||||
#include "NetworkLogging.h"
|
||||
#include "NodeType.h"
|
||||
|
@ -162,7 +162,7 @@ void AssetServer::completeSetup() {
|
|||
if (assetsPath.isRelative()) {
|
||||
// if the domain settings passed us a relative path, make an absolute path that is relative to the
|
||||
// default data directory
|
||||
absoluteFilePath = ServerPathUtils::getDataFilePath("assets/" + assetsPathString);
|
||||
absoluteFilePath = PathUtils::getAppDataFilePath("assets/" + assetsPathString);
|
||||
}
|
||||
|
||||
_resourcesDirectory = QDir(absoluteFilePath);
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "OctreeQueryNode.h"
|
||||
#include "OctreeServerConsts.h"
|
||||
#include <QtCore/QStandardPaths>
|
||||
#include <ServerPathUtils.h>
|
||||
#include <PathUtils.h>
|
||||
#include <QtCore/QDir>
|
||||
|
||||
int OctreeServer::_clientCount = 0;
|
||||
|
@ -279,8 +279,7 @@ OctreeServer::~OctreeServer() {
|
|||
|
||||
void OctreeServer::initHTTPManager(int port) {
|
||||
// setup the embedded web server
|
||||
|
||||
QString documentRoot = QString("%1/web").arg(ServerPathUtils::getDataDirectory());
|
||||
QString documentRoot = QString("%1/web").arg(PathUtils::getAppDataPath());
|
||||
|
||||
// setup an httpManager with us as the request handler and the parent
|
||||
_httpManager = new HTTPManager(QHostAddress::AnyIPv4, port, documentRoot, this, this);
|
||||
|
@ -1179,7 +1178,7 @@ void OctreeServer::domainSettingsRequestComplete() {
|
|||
if (persistPath.isRelative()) {
|
||||
// if the domain settings passed us a relative path, make an absolute path that is relative to the
|
||||
// default data directory
|
||||
persistAbsoluteFilePath = QDir(ServerPathUtils::getDataFilePath("entities/")).absoluteFilePath(_persistFilePath);
|
||||
persistAbsoluteFilePath = QDir(PathUtils::getAppDataFilePath("entities/")).absoluteFilePath(_persistFilePath);
|
||||
}
|
||||
|
||||
static const QString ENTITY_PERSIST_EXTENSION = ".json.gz";
|
||||
|
@ -1245,7 +1244,7 @@ void OctreeServer::domainSettingsRequestComplete() {
|
|||
QDir backupDirectory { _backupDirectoryPath };
|
||||
QString absoluteBackupDirectory;
|
||||
if (backupDirectory.isRelative()) {
|
||||
absoluteBackupDirectory = QDir(ServerPathUtils::getDataFilePath("entities/")).absoluteFilePath(_backupDirectoryPath);
|
||||
absoluteBackupDirectory = QDir(PathUtils::getAppDataFilePath("entities/")).absoluteFilePath(_backupDirectoryPath);
|
||||
absoluteBackupDirectory = QDir(absoluteBackupDirectory).absolutePath();
|
||||
} else {
|
||||
absoluteBackupDirectory = backupDirectory.absolutePath();
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include <ShutdownEventListener.h>
|
||||
#include <UUID.h>
|
||||
#include <LogHandler.h>
|
||||
#include <ServerPathUtils.h>
|
||||
#include <PathUtils.h>
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
#include "DomainServerNodeData.h"
|
||||
|
@ -1618,7 +1618,7 @@ QJsonObject DomainServer::jsonObjectForNode(const SharedNodePointer& node) {
|
|||
QDir pathForAssignmentScriptsDirectory() {
|
||||
static const QString SCRIPTS_DIRECTORY_NAME = "/scripts/";
|
||||
|
||||
QDir directory(ServerPathUtils::getDataDirectory() + SCRIPTS_DIRECTORY_NAME);
|
||||
QDir directory(PathUtils::getAppDataPath() + SCRIPTS_DIRECTORY_NAME);
|
||||
if (!directory.exists()) {
|
||||
directory.mkpath(".");
|
||||
qInfo() << "Created path to " << directory.path();
|
||||
|
|
|
@ -189,7 +189,7 @@ endif()
|
|||
|
||||
# link required hifi libraries
|
||||
link_hifi_libraries(
|
||||
shared octree gpu gl gpu-gl procedural model render
|
||||
shared octree ktx gpu gl gpu-gl procedural model render
|
||||
recording fbx networking model-networking entities avatars
|
||||
audio audio-client animation script-engine physics
|
||||
render-utils entities-renderer ui auto-updater
|
||||
|
|
|
@ -266,7 +266,7 @@ Item {
|
|||
text: "GPU Textures: ";
|
||||
}
|
||||
StatText {
|
||||
text: " Sparse Enabled: " + (0 == root.gpuSparseTextureEnabled ? "false" : "true");
|
||||
text: " Pressure State: " + root.gpuTextureMemoryPressureState;
|
||||
}
|
||||
StatText {
|
||||
text: " Count: " + root.gpuTextures;
|
||||
|
@ -278,14 +278,10 @@ Item {
|
|||
text: " Decimated: " + root.decimatedTextureCount;
|
||||
}
|
||||
StatText {
|
||||
text: " Sparse Count: " + root.gpuTexturesSparse;
|
||||
visible: 0 != root.gpuSparseTextureEnabled;
|
||||
text: " Pending Transfer: " + root.texturePendingTransfers + " MB";
|
||||
}
|
||||
StatText {
|
||||
text: " Virtual Memory: " + root.gpuTextureVirtualMemory + " MB";
|
||||
}
|
||||
StatText {
|
||||
text: " Commited Memory: " + root.gpuTextureMemory + " MB";
|
||||
text: " Resource Memory: " + root.gpuTextureMemory + " MB";
|
||||
}
|
||||
StatText {
|
||||
text: " Framebuffer Memory: " + root.gpuTextureFramebufferMemory + " MB";
|
||||
|
|
|
@ -1444,7 +1444,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
scriptEngines->loadScript(testScript, false);
|
||||
} else {
|
||||
// Get sandbox content set version, if available
|
||||
auto acDirPath = PathUtils::getRootDataDirectory() + BuildInfo::MODIFIED_ORGANIZATION + "/assignment-client/";
|
||||
auto acDirPath = PathUtils::getAppDataPath() + "../../" + BuildInfo::MODIFIED_ORGANIZATION + "/assignment-client/";
|
||||
auto contentVersionPath = acDirPath + "content-version.txt";
|
||||
qCDebug(interfaceapp) << "Checking " << contentVersionPath << " for content version";
|
||||
auto contentVersion = 0;
|
||||
|
|
|
@ -95,7 +95,7 @@ void ApplicationOverlay::renderQmlUi(RenderArgs* renderArgs) {
|
|||
PROFILE_RANGE(app, __FUNCTION__);
|
||||
|
||||
if (!_uiTexture) {
|
||||
_uiTexture = gpu::TexturePointer(gpu::Texture::createExternal2D(OffscreenQmlSurface::getDiscardLambda()));
|
||||
_uiTexture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
|
||||
_uiTexture->setSource(__FUNCTION__);
|
||||
}
|
||||
// Once we move UI rendering and screen rendering to different
|
||||
|
@ -229,13 +229,13 @@ void ApplicationOverlay::buildFramebufferObject() {
|
|||
auto width = uiSize.x;
|
||||
auto height = uiSize.y;
|
||||
if (!_overlayFramebuffer->getDepthStencilBuffer()) {
|
||||
auto overlayDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(DEPTH_FORMAT, width, height, DEFAULT_SAMPLER));
|
||||
auto overlayDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(DEPTH_FORMAT, width, height, DEFAULT_SAMPLER));
|
||||
_overlayFramebuffer->setDepthStencilBuffer(overlayDepthTexture, DEPTH_FORMAT);
|
||||
}
|
||||
|
||||
if (!_overlayFramebuffer->getRenderBuffer(0)) {
|
||||
const gpu::Sampler OVERLAY_SAMPLER(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP);
|
||||
auto colorBuffer = gpu::TexturePointer(gpu::Texture::create2D(COLOR_FORMAT, width, height, OVERLAY_SAMPLER));
|
||||
auto colorBuffer = gpu::TexturePointer(gpu::Texture::createRenderBuffer(COLOR_FORMAT, width, height, OVERLAY_SAMPLER));
|
||||
_overlayFramebuffer->setRenderBuffer(0, colorBuffer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,8 @@ using namespace std;
|
|||
|
||||
static Stats* INSTANCE{ nullptr };
|
||||
|
||||
QString getTextureMemoryPressureModeString();
|
||||
|
||||
Stats* Stats::getInstance() {
|
||||
if (!INSTANCE) {
|
||||
Stats::registerType();
|
||||
|
@ -340,10 +342,12 @@ void Stats::updateStats(bool force) {
|
|||
STAT_UPDATE(glContextSwapchainMemory, (int)BYTES_TO_MB(gl::Context::getSwapchainMemoryUsage()));
|
||||
|
||||
STAT_UPDATE(qmlTextureMemory, (int)BYTES_TO_MB(OffscreenQmlSurface::getUsedTextureMemory()));
|
||||
STAT_UPDATE(texturePendingTransfers, (int)BYTES_TO_MB(gpu::Texture::getTextureTransferPendingSize()));
|
||||
STAT_UPDATE(gpuTextureMemory, (int)BYTES_TO_MB(gpu::Texture::getTextureGPUMemoryUsage()));
|
||||
STAT_UPDATE(gpuTextureVirtualMemory, (int)BYTES_TO_MB(gpu::Texture::getTextureGPUVirtualMemoryUsage()));
|
||||
STAT_UPDATE(gpuTextureFramebufferMemory, (int)BYTES_TO_MB(gpu::Texture::getTextureGPUFramebufferMemoryUsage()));
|
||||
STAT_UPDATE(gpuTextureSparseMemory, (int)BYTES_TO_MB(gpu::Texture::getTextureGPUSparseMemoryUsage()));
|
||||
STAT_UPDATE(gpuTextureMemoryPressureState, getTextureMemoryPressureModeString());
|
||||
STAT_UPDATE(gpuSparseTextureEnabled, gpuContext->getBackend()->isTextureManagementSparseEnabled() ? 1 : 0);
|
||||
STAT_UPDATE(gpuFreeMemory, (int)BYTES_TO_MB(gpu::Context::getFreeGPUMemory()));
|
||||
STAT_UPDATE(rectifiedTextureCount, (int)RECTIFIED_TEXTURE_COUNT.load());
|
||||
|
|
|
@ -117,11 +117,13 @@ class Stats : public QQuickItem {
|
|||
STATS_PROPERTY(int, gpuTexturesSparse, 0)
|
||||
STATS_PROPERTY(int, glContextSwapchainMemory, 0)
|
||||
STATS_PROPERTY(int, qmlTextureMemory, 0)
|
||||
STATS_PROPERTY(int, texturePendingTransfers, 0)
|
||||
STATS_PROPERTY(int, gpuTextureMemory, 0)
|
||||
STATS_PROPERTY(int, gpuTextureVirtualMemory, 0)
|
||||
STATS_PROPERTY(int, gpuTextureFramebufferMemory, 0)
|
||||
STATS_PROPERTY(int, gpuTextureSparseMemory, 0)
|
||||
STATS_PROPERTY(int, gpuSparseTextureEnabled, 0)
|
||||
STATS_PROPERTY(QString, gpuTextureMemoryPressureState, QString())
|
||||
STATS_PROPERTY(int, gpuFreeMemory, 0)
|
||||
STATS_PROPERTY(float, gpuFrameTime, 0)
|
||||
STATS_PROPERTY(float, batchFrameTime, 0)
|
||||
|
@ -232,6 +234,7 @@ signals:
|
|||
void timingStatsChanged();
|
||||
void glContextSwapchainMemoryChanged();
|
||||
void qmlTextureMemoryChanged();
|
||||
void texturePendingTransfersChanged();
|
||||
void gpuBuffersChanged();
|
||||
void gpuBufferMemoryChanged();
|
||||
void gpuTexturesChanged();
|
||||
|
@ -240,6 +243,7 @@ signals:
|
|||
void gpuTextureVirtualMemoryChanged();
|
||||
void gpuTextureFramebufferMemoryChanged();
|
||||
void gpuTextureSparseMemoryChanged();
|
||||
void gpuTextureMemoryPressureStateChanged();
|
||||
void gpuSparseTextureEnabledChanged();
|
||||
void gpuFreeMemoryChanged();
|
||||
void gpuFrameTimeChanged();
|
||||
|
|
|
@ -260,7 +260,7 @@ void Web3DOverlay::render(RenderArgs* args) {
|
|||
|
||||
if (!_texture) {
|
||||
auto webSurface = _webSurface;
|
||||
_texture = gpu::TexturePointer(gpu::Texture::createExternal2D(OffscreenQmlSurface::getDiscardLambda()));
|
||||
_texture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
|
||||
_texture->setSource(__FUNCTION__);
|
||||
}
|
||||
OffscreenQmlSurface::TextureAndFence newTextureAndFence;
|
||||
|
|
|
@ -355,14 +355,16 @@ void OpenGLDisplayPlugin::customizeContext() {
|
|||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
|
||||
cursorData.texture.reset(
|
||||
gpu::Texture::create2D(
|
||||
gpu::Texture::createStrict(
|
||||
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
|
||||
image.width(), image.height(),
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
cursorData.texture->setSource("cursor texture");
|
||||
auto usage = gpu::Texture::Usage::Builder().withColor().withAlpha();
|
||||
cursorData.texture->setUsage(usage.build());
|
||||
cursorData.texture->assignStoredMip(0, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA), image.byteCount(), image.constBits());
|
||||
cursorData.texture->setStoredMipFormat(gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
|
||||
cursorData.texture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||
cursorData.texture->autoGenerateMips(-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -296,33 +296,32 @@ void HmdDisplayPlugin::internalPresent() {
|
|||
image = image.convertToFormat(QImage::Format_RGBA8888);
|
||||
if (!_previewTexture) {
|
||||
_previewTexture.reset(
|
||||
gpu::Texture::create2D(
|
||||
gpu::Texture::createStrict(
|
||||
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
|
||||
image.width(), image.height(),
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
_previewTexture->setSource("HMD Preview Texture");
|
||||
_previewTexture->setUsage(gpu::Texture::Usage::Builder().withColor().build());
|
||||
_previewTexture->assignStoredMip(0, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA), image.byteCount(), image.constBits());
|
||||
_previewTexture->setStoredMipFormat(gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
|
||||
_previewTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||
_previewTexture->autoGenerateMips(-1);
|
||||
}
|
||||
|
||||
if (getGLBackend()->isTextureReady(_previewTexture)) {
|
||||
auto viewport = getViewportForSourceSize(uvec2(_previewTexture->getDimensions()));
|
||||
auto viewport = getViewportForSourceSize(uvec2(_previewTexture->getDimensions()));
|
||||
|
||||
render([&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.resetViewTransform();
|
||||
batch.setFramebuffer(gpu::FramebufferPointer());
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
|
||||
batch.setStateScissorRect(viewport);
|
||||
batch.setViewportTransform(viewport);
|
||||
batch.setResourceTexture(0, _previewTexture);
|
||||
batch.setPipeline(_presentPipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
});
|
||||
_clearPreviewFlag = false;
|
||||
swapBuffers();
|
||||
}
|
||||
render([&](gpu::Batch& batch) {
|
||||
batch.enableStereo(false);
|
||||
batch.resetViewTransform();
|
||||
batch.setFramebuffer(gpu::FramebufferPointer());
|
||||
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, vec4(0));
|
||||
batch.setStateScissorRect(viewport);
|
||||
batch.setViewportTransform(viewport);
|
||||
batch.setResourceTexture(0, _previewTexture);
|
||||
batch.setPipeline(_presentPipeline);
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
});
|
||||
_clearPreviewFlag = false;
|
||||
swapBuffers();
|
||||
}
|
||||
postPreview();
|
||||
|
||||
|
|
|
@ -216,7 +216,7 @@ void RenderableWebEntityItem::render(RenderArgs* args) {
|
|||
|
||||
if (!_texture) {
|
||||
auto webSurface = _webSurface;
|
||||
_texture = gpu::TexturePointer(gpu::Texture::createExternal2D(OffscreenQmlSurface::getDiscardLambda()));
|
||||
_texture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
|
||||
_texture->setSource(__FUNCTION__);
|
||||
}
|
||||
OffscreenQmlSurface::TextureAndFence newTextureAndFence;
|
||||
|
|
|
@ -1468,6 +1468,9 @@ FBXGeometry* FBXReader::extractFBXGeometry(const QVariantHash& mapping, const QS
|
|||
// Create the Material Library
|
||||
consolidateFBXMaterials(mapping);
|
||||
|
||||
// We can't allow the scaling of a given image to different sizes, because the hash used for the KTX cache is based on the original image
|
||||
// Allowing scaling of the same image to different sizes would cause different KTX files to target the same cache key
|
||||
#if 0
|
||||
// HACK: until we get proper LOD management we're going to cap model textures
|
||||
// according to how many unique textures the model uses:
|
||||
// 1 - 8 textures --> 2048
|
||||
|
@ -1481,6 +1484,7 @@ FBXGeometry* FBXReader::extractFBXGeometry(const QVariantHash& mapping, const QS
|
|||
int numTextures = uniqueTextures.size();
|
||||
const int MAX_NUM_TEXTURES_AT_MAX_RESOLUTION = 8;
|
||||
int maxWidth = sqrt(MAX_NUM_PIXELS_FOR_FBX_TEXTURE);
|
||||
|
||||
if (numTextures > MAX_NUM_TEXTURES_AT_MAX_RESOLUTION) {
|
||||
int numTextureThreshold = MAX_NUM_TEXTURES_AT_MAX_RESOLUTION;
|
||||
const int MIN_MIP_TEXTURE_WIDTH = 64;
|
||||
|
@ -1494,7 +1498,7 @@ FBXGeometry* FBXReader::extractFBXGeometry(const QVariantHash& mapping, const QS
|
|||
material.setMaxNumPixelsPerTexture(maxWidth * maxWidth);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
geometry.materials = _fbxMaterials;
|
||||
|
||||
// see if any materials have texture children
|
||||
|
|
|
@ -62,8 +62,6 @@ BackendPointer GLBackend::createBackend() {
|
|||
INSTANCE = result.get();
|
||||
void* voidInstance = &(*result);
|
||||
qApp->setProperty(hifi::properties::gl::BACKEND, QVariant::fromValue(voidInstance));
|
||||
|
||||
gl::GLTexture::initTextureTransferHelper();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -209,7 +207,7 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
|
|||
}
|
||||
}
|
||||
|
||||
{ // Sync all the buffers
|
||||
{ // Sync all the transform states
|
||||
PROFILE_RANGE(render_gpu_gl_detail, "syncCPUTransform");
|
||||
_transform._cameras.clear();
|
||||
_transform._cameraOffsets.clear();
|
||||
|
@ -277,7 +275,7 @@ void GLBackend::renderPassDraw(const Batch& batch) {
|
|||
updateInput();
|
||||
updateTransform(batch);
|
||||
updatePipeline();
|
||||
|
||||
|
||||
CommandCall call = _commandCalls[(*command)];
|
||||
(this->*(call))(batch, *offset);
|
||||
break;
|
||||
|
@ -623,6 +621,7 @@ void GLBackend::queueLambda(const std::function<void()> lambda) const {
|
|||
}
|
||||
|
||||
void GLBackend::recycle() const {
|
||||
PROFILE_RANGE(render_gpu_gl, __FUNCTION__)
|
||||
{
|
||||
std::list<std::function<void()>> lamdbasTrash;
|
||||
{
|
||||
|
@ -745,10 +744,6 @@ void GLBackend::recycle() const {
|
|||
glDeleteQueries((GLsizei)ids.size(), ids.data());
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef THREADED_TEXTURE_TRANSFER
|
||||
gl::GLTexture::_textureTransferHelper->process();
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::setCameraCorrection(const Mat4& correction) {
|
||||
|
|
|
@ -187,10 +187,15 @@ public:
|
|||
virtual void do_setStateScissorRect(const Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual GLuint getFramebufferID(const FramebufferPointer& framebuffer) = 0;
|
||||
virtual GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) = 0;
|
||||
virtual GLuint getTextureID(const TexturePointer& texture) final;
|
||||
virtual GLuint getBufferID(const Buffer& buffer) = 0;
|
||||
virtual GLuint getQueryID(const QueryPointer& query) = 0;
|
||||
virtual bool isTextureReady(const TexturePointer& texture);
|
||||
|
||||
virtual GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) = 0;
|
||||
virtual GLBuffer* syncGPUObject(const Buffer& buffer) = 0;
|
||||
virtual GLTexture* syncGPUObject(const TexturePointer& texture);
|
||||
virtual GLQuery* syncGPUObject(const Query& query) = 0;
|
||||
//virtual bool isTextureReady(const TexturePointer& texture);
|
||||
|
||||
virtual void releaseBuffer(GLuint id, Size size) const;
|
||||
virtual void releaseExternalTexture(GLuint id, const Texture::ExternalRecycler& recycler) const;
|
||||
|
@ -206,10 +211,6 @@ public:
|
|||
protected:
|
||||
|
||||
void recycle() const override;
|
||||
virtual GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) = 0;
|
||||
virtual GLBuffer* syncGPUObject(const Buffer& buffer) = 0;
|
||||
virtual GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) = 0;
|
||||
virtual GLQuery* syncGPUObject(const Query& query) = 0;
|
||||
|
||||
static const size_t INVALID_OFFSET = (size_t)-1;
|
||||
bool _inRenderTransferPass { false };
|
||||
|
|
|
@ -14,12 +14,56 @@
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
bool GLBackend::isTextureReady(const TexturePointer& texture) {
|
||||
// DO not transfer the texture, this call is expected for rendering texture
|
||||
GLTexture* object = syncGPUObject(texture, true);
|
||||
return object && object->isReady();
|
||||
|
||||
GLuint GLBackend::getTextureID(const TexturePointer& texture) {
|
||||
GLTexture* object = syncGPUObject(texture);
|
||||
|
||||
if (!object) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return object->_id;
|
||||
}
|
||||
|
||||
GLTexture* GLBackend::syncGPUObject(const TexturePointer& texturePointer) {
|
||||
const Texture& texture = *texturePointer;
|
||||
// Special case external textures
|
||||
if (TextureUsageType::EXTERNAL == texture.getUsageType()) {
|
||||
Texture::ExternalUpdates updates = texture.getUpdates();
|
||||
if (!updates.empty()) {
|
||||
Texture::ExternalRecycler recycler = texture.getExternalRecycler();
|
||||
Q_ASSERT(recycler);
|
||||
// Discard any superfluous updates
|
||||
while (updates.size() > 1) {
|
||||
const auto& update = updates.front();
|
||||
// Superfluous updates will never have been read, but we want to ensure the previous
|
||||
// writes to them are complete before they're written again, so return them with the
|
||||
// same fences they arrived with. This can happen on any thread because no GL context
|
||||
// work is involved
|
||||
recycler(update.first, update.second);
|
||||
updates.pop_front();
|
||||
}
|
||||
|
||||
// The last texture remaining is the one we'll use to create the GLTexture
|
||||
const auto& update = updates.front();
|
||||
// Check for a fence, and if it exists, inject a wait into the command stream, then destroy the fence
|
||||
if (update.second) {
|
||||
GLsync fence = static_cast<GLsync>(update.second);
|
||||
glWaitSync(fence, 0, GL_TIMEOUT_IGNORED);
|
||||
glDeleteSync(fence);
|
||||
}
|
||||
|
||||
// Create the new texture object (replaces any previous texture object)
|
||||
new GLExternalTexture(shared_from_this(), texture, update.first);
|
||||
}
|
||||
|
||||
// Return the texture object (if any) associated with the texture, without extensive logic
|
||||
// (external textures are
|
||||
return Backend::getGPUObject<GLTexture>(texture);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void GLBackend::do_generateTextureMips(const Batch& batch, size_t paramOffset) {
|
||||
TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
|
||||
|
@ -28,7 +72,7 @@ void GLBackend::do_generateTextureMips(const Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
|
||||
// DO not transfer the texture, this call is expected for rendering texture
|
||||
GLTexture* object = syncGPUObject(resourceTexture, false);
|
||||
GLTexture* object = syncGPUObject(resourceTexture);
|
||||
if (!object) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -21,13 +21,12 @@ GLFramebuffer::~GLFramebuffer() {
|
|||
}
|
||||
}
|
||||
|
||||
bool GLFramebuffer::checkStatus(GLenum target) const {
|
||||
bool result = false;
|
||||
bool GLFramebuffer::checkStatus() const {
|
||||
switch (_status) {
|
||||
case GL_FRAMEBUFFER_COMPLETE:
|
||||
// Success !
|
||||
result = true;
|
||||
break;
|
||||
return true;
|
||||
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
|
||||
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.";
|
||||
break;
|
||||
|
@ -44,5 +43,5 @@ bool GLFramebuffer::checkStatus(GLenum target) const {
|
|||
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_UNSUPPORTED.";
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public:
|
|||
protected:
|
||||
GLenum _status { GL_FRAMEBUFFER_COMPLETE };
|
||||
virtual void update() = 0;
|
||||
bool checkStatus(GLenum target) const;
|
||||
bool checkStatus() const;
|
||||
|
||||
GLFramebuffer(const std::weak_ptr<GLBackend>& backend, const Framebuffer& framebuffer, GLuint id) : GLObject(backend, framebuffer, id) {}
|
||||
~GLFramebuffer();
|
||||
|
|
|
@ -17,6 +17,7 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
|||
switch (dstFormat.getDimension()) {
|
||||
case gpu::SCALAR: {
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::RED:
|
||||
case gpu::RGB:
|
||||
case gpu::RGBA:
|
||||
case gpu::SRGB:
|
||||
|
@ -262,6 +263,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::RED:
|
||||
case gpu::RGB:
|
||||
case gpu::RGBA:
|
||||
texel.internalFormat = GL_R8;
|
||||
|
@ -272,8 +274,10 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
break;
|
||||
|
||||
case gpu::DEPTH:
|
||||
texel.format = GL_DEPTH_COMPONENT;
|
||||
texel.internalFormat = GL_DEPTH_COMPONENT32;
|
||||
break;
|
||||
|
||||
case gpu::DEPTH_STENCIL:
|
||||
texel.type = GL_UNSIGNED_INT_24_8;
|
||||
texel.format = GL_DEPTH_STENCIL;
|
||||
|
@ -403,6 +407,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
texel.internalFormat = GL_COMPRESSED_RED_RGTC1;
|
||||
break;
|
||||
}
|
||||
case gpu::RED:
|
||||
case gpu::RGB:
|
||||
case gpu::RGBA:
|
||||
case gpu::SRGB:
|
||||
|
|
|
@ -10,15 +10,13 @@
|
|||
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
#include "GLTextureTransfer.h"
|
||||
#include "GLBackend.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
std::shared_ptr<GLTextureTransferHelper> GLTexture::_textureTransferHelper;
|
||||
|
||||
const GLenum GLTexture::CUBE_FACE_LAYOUT[6] = {
|
||||
const GLenum GLTexture::CUBE_FACE_LAYOUT[GLTexture::TEXTURE_CUBE_NUM_FACES] = {
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
|
||||
|
@ -67,6 +65,17 @@ GLenum GLTexture::getGLTextureType(const Texture& texture) {
|
|||
}
|
||||
|
||||
|
||||
uint8_t GLTexture::getFaceCount(GLenum target) {
|
||||
switch (target) {
|
||||
case GL_TEXTURE_2D:
|
||||
return TEXTURE_2D_NUM_FACES;
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
return TEXTURE_CUBE_NUM_FACES;
|
||||
default:
|
||||
Q_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
const std::vector<GLenum>& GLTexture::getFaceTargets(GLenum target) {
|
||||
static std::vector<GLenum> cubeFaceTargets {
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
|
||||
|
@ -89,216 +98,34 @@ const std::vector<GLenum>& GLTexture::getFaceTargets(GLenum target) {
|
|||
return faceTargets;
|
||||
}
|
||||
|
||||
// Default texture memory = GPU total memory - 2GB
|
||||
#define GPU_MEMORY_RESERVE_BYTES MB_TO_BYTES(2048)
|
||||
// Minimum texture memory = 1GB
|
||||
#define TEXTURE_MEMORY_MIN_BYTES MB_TO_BYTES(1024)
|
||||
|
||||
|
||||
float GLTexture::getMemoryPressure() {
|
||||
// Check for an explicit memory limit
|
||||
auto availableTextureMemory = Texture::getAllowedGPUMemoryUsage();
|
||||
|
||||
|
||||
// If no memory limit has been set, use a percentage of the total dedicated memory
|
||||
if (!availableTextureMemory) {
|
||||
#if 0
|
||||
auto totalMemory = getDedicatedMemory();
|
||||
if ((GPU_MEMORY_RESERVE_BYTES + TEXTURE_MEMORY_MIN_BYTES) > totalMemory) {
|
||||
availableTextureMemory = TEXTURE_MEMORY_MIN_BYTES;
|
||||
} else {
|
||||
availableTextureMemory = totalMemory - GPU_MEMORY_RESERVE_BYTES;
|
||||
}
|
||||
#else
|
||||
// Hardcode texture limit for sparse textures at 1 GB for now
|
||||
availableTextureMemory = TEXTURE_MEMORY_MIN_BYTES;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Return the consumed texture memory divided by the available texture memory.
|
||||
auto consumedGpuMemory = Context::getTextureGPUMemoryUsage() - Context::getTextureGPUFramebufferMemoryUsage();
|
||||
float memoryPressure = (float)consumedGpuMemory / (float)availableTextureMemory;
|
||||
static Context::Size lastConsumedGpuMemory = 0;
|
||||
if (memoryPressure > 1.0f && lastConsumedGpuMemory != consumedGpuMemory) {
|
||||
lastConsumedGpuMemory = consumedGpuMemory;
|
||||
qCDebug(gpugllogging) << "Exceeded max allowed texture memory: " << consumedGpuMemory << " / " << availableTextureMemory;
|
||||
}
|
||||
return memoryPressure;
|
||||
}
|
||||
|
||||
|
||||
// Create the texture and allocate storage
|
||||
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable) :
|
||||
GLObject(backend, texture, id),
|
||||
_external(false),
|
||||
_source(texture.source()),
|
||||
_storageStamp(texture.getStamp()),
|
||||
_target(getGLTextureType(texture)),
|
||||
_internalFormat(gl::GLTexelFormat::evalGLTexelFormatInternal(texture.getTexelFormat())),
|
||||
_maxMip(texture.maxMip()),
|
||||
_minMip(texture.minMip()),
|
||||
_virtualSize(texture.evalTotalSize()),
|
||||
_transferrable(transferrable)
|
||||
{
|
||||
auto strongBackend = _backend.lock();
|
||||
strongBackend->recycle();
|
||||
Backend::incrementTextureGPUCount();
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
|
||||
Backend::setGPUObject(texture, this);
|
||||
}
|
||||
|
||||
GLTexture::GLTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id) :
|
||||
GLObject(backend, texture, id),
|
||||
_external(true),
|
||||
_source(texture.source()),
|
||||
_storageStamp(0),
|
||||
_target(getGLTextureType(texture)),
|
||||
_internalFormat(GL_RGBA8),
|
||||
// FIXME force mips to 0?
|
||||
_maxMip(texture.maxMip()),
|
||||
_minMip(texture.minMip()),
|
||||
_virtualSize(0),
|
||||
_transferrable(false)
|
||||
_target(getGLTextureType(texture))
|
||||
{
|
||||
Backend::setGPUObject(texture, this);
|
||||
|
||||
// FIXME Is this necessary?
|
||||
//withPreservedTexture([this] {
|
||||
// syncSampler();
|
||||
// if (_gpuObject.isAutogenerateMips()) {
|
||||
// generateMips();
|
||||
// }
|
||||
//});
|
||||
}
|
||||
|
||||
GLTexture::~GLTexture() {
|
||||
auto backend = _backend.lock();
|
||||
if (backend && _id) {
|
||||
backend->releaseTexture(_id, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
GLExternalTexture::GLExternalTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint id)
|
||||
: Parent(backend, texture, id) { }
|
||||
|
||||
GLExternalTexture::~GLExternalTexture() {
|
||||
auto backend = _backend.lock();
|
||||
if (backend) {
|
||||
if (_external) {
|
||||
auto recycler = _gpuObject.getExternalRecycler();
|
||||
if (recycler) {
|
||||
backend->releaseExternalTexture(_id, recycler);
|
||||
} else {
|
||||
qWarning() << "No recycler available for texture " << _id << " possible leak";
|
||||
}
|
||||
} else if (_id) {
|
||||
// WARNING! Sparse textures do not use this code path. See GL45BackendTexture for
|
||||
// the GL45Texture destructor for doing any required work tracking GPU stats
|
||||
backend->releaseTexture(_id, _size);
|
||||
auto recycler = _gpuObject.getExternalRecycler();
|
||||
if (recycler) {
|
||||
backend->releaseExternalTexture(_id, recycler);
|
||||
} else {
|
||||
qWarning() << "No recycler available for texture " << _id << " possible leak";
|
||||
}
|
||||
|
||||
if (!_external && !_transferrable) {
|
||||
Backend::updateTextureGPUFramebufferMemoryUsage(_size, 0);
|
||||
}
|
||||
}
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
|
||||
}
|
||||
|
||||
void GLTexture::createTexture() {
|
||||
withPreservedTexture([&] {
|
||||
allocateStorage();
|
||||
(void)CHECK_GL_ERROR();
|
||||
syncSampler();
|
||||
(void)CHECK_GL_ERROR();
|
||||
});
|
||||
}
|
||||
|
||||
void GLTexture::withPreservedTexture(std::function<void()> f) const {
|
||||
GLint boundTex = -1;
|
||||
switch (_target) {
|
||||
case GL_TEXTURE_2D:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
|
||||
break;
|
||||
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
|
||||
break;
|
||||
|
||||
default:
|
||||
qFatal("Unsupported texture type");
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
glBindTexture(_target, _texture);
|
||||
f();
|
||||
glBindTexture(_target, boundTex);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLTexture::setSize(GLuint size) const {
|
||||
if (!_external && !_transferrable) {
|
||||
Backend::updateTextureGPUFramebufferMemoryUsage(_size, size);
|
||||
}
|
||||
Backend::updateTextureGPUMemoryUsage(_size, size);
|
||||
const_cast<GLuint&>(_size) = size;
|
||||
}
|
||||
|
||||
bool GLTexture::isInvalid() const {
|
||||
return _storageStamp < _gpuObject.getStamp();
|
||||
}
|
||||
|
||||
bool GLTexture::isOutdated() const {
|
||||
return GLSyncState::Idle == _syncState && _contentStamp < _gpuObject.getDataStamp();
|
||||
}
|
||||
|
||||
bool GLTexture::isReady() const {
|
||||
// If we have an invalid texture, we're never ready
|
||||
if (isInvalid()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto syncState = _syncState.load();
|
||||
if (isOutdated() || Idle != syncState) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Do any post-transfer operations that might be required on the main context / rendering thread
|
||||
void GLTexture::postTransfer() {
|
||||
setSyncState(GLSyncState::Idle);
|
||||
++_transferCount;
|
||||
|
||||
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
|
||||
switch (_gpuObject.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuObject.isStoredMipFaceAvailable(i)) {
|
||||
_gpuObject.notifyMipFaceGPULoaded(i);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
// transfer pixels from each faces
|
||||
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
|
||||
_gpuObject.notifyMipFaceGPULoaded(i, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
qCWarning(gpugllogging) << __FUNCTION__ << " case for Texture Type " << _gpuObject.getType() << " not supported";
|
||||
break;
|
||||
const_cast<GLuint&>(_id) = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void GLTexture::initTextureTransferHelper() {
|
||||
_textureTransferHelper = std::make_shared<GLTextureTransferHelper>();
|
||||
}
|
||||
|
||||
void GLTexture::startTransfer() {
|
||||
createTexture();
|
||||
}
|
||||
|
||||
void GLTexture::finishTransfer() {
|
||||
if (_gpuObject.isAutogenerateMips()) {
|
||||
generateMips();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#define hifi_gpu_gl_GLTexture_h
|
||||
|
||||
#include "GLShared.h"
|
||||
#include "GLTextureTransfer.h"
|
||||
#include "GLBackend.h"
|
||||
#include "GLTexelFormat.h"
|
||||
|
||||
|
@ -20,210 +19,48 @@ struct GLFilterMode {
|
|||
GLint magFilter;
|
||||
};
|
||||
|
||||
|
||||
class GLTexture : public GLObject<Texture> {
|
||||
using Parent = GLObject<Texture>;
|
||||
friend class GLBackend;
|
||||
public:
|
||||
static const uint16_t INVALID_MIP { (uint16_t)-1 };
|
||||
static const uint8_t INVALID_FACE { (uint8_t)-1 };
|
||||
|
||||
static void initTextureTransferHelper();
|
||||
static std::shared_ptr<GLTextureTransferHelper> _textureTransferHelper;
|
||||
|
||||
template <typename GLTextureType>
|
||||
static GLTexture* sync(GLBackend& backend, const TexturePointer& texturePointer, bool needTransfer) {
|
||||
const Texture& texture = *texturePointer;
|
||||
|
||||
// Special case external textures
|
||||
if (texture.getUsage().isExternal()) {
|
||||
Texture::ExternalUpdates updates = texture.getUpdates();
|
||||
if (!updates.empty()) {
|
||||
Texture::ExternalRecycler recycler = texture.getExternalRecycler();
|
||||
Q_ASSERT(recycler);
|
||||
// Discard any superfluous updates
|
||||
while (updates.size() > 1) {
|
||||
const auto& update = updates.front();
|
||||
// Superfluous updates will never have been read, but we want to ensure the previous
|
||||
// writes to them are complete before they're written again, so return them with the
|
||||
// same fences they arrived with. This can happen on any thread because no GL context
|
||||
// work is involved
|
||||
recycler(update.first, update.second);
|
||||
updates.pop_front();
|
||||
}
|
||||
|
||||
// The last texture remaining is the one we'll use to create the GLTexture
|
||||
const auto& update = updates.front();
|
||||
// Check for a fence, and if it exists, inject a wait into the command stream, then destroy the fence
|
||||
if (update.second) {
|
||||
GLsync fence = static_cast<GLsync>(update.second);
|
||||
glWaitSync(fence, 0, GL_TIMEOUT_IGNORED);
|
||||
glDeleteSync(fence);
|
||||
}
|
||||
|
||||
// Create the new texture object (replaces any previous texture object)
|
||||
new GLTextureType(backend.shared_from_this(), texture, update.first);
|
||||
}
|
||||
|
||||
// Return the texture object (if any) associated with the texture, without extensive logic
|
||||
// (external textures are
|
||||
return Backend::getGPUObject<GLTextureType>(texture);
|
||||
}
|
||||
|
||||
if (!texture.isDefined()) {
|
||||
// NO texture definition yet so let's avoid thinking
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// If the object hasn't been created, or the object definition is out of date, drop and re-create
|
||||
GLTexture* object = Backend::getGPUObject<GLTextureType>(texture);
|
||||
|
||||
// Create the texture if need be (force re-creation if the storage stamp changes
|
||||
// for easier use of immutable storage)
|
||||
if (!object || object->isInvalid()) {
|
||||
// This automatically any previous texture
|
||||
object = new GLTextureType(backend.shared_from_this(), texture, needTransfer);
|
||||
if (!object->_transferrable) {
|
||||
object->createTexture();
|
||||
object->_contentStamp = texture.getDataStamp();
|
||||
object->updateSize();
|
||||
object->postTransfer();
|
||||
}
|
||||
}
|
||||
|
||||
// Object maybe doens't neet to be tranasferred after creation
|
||||
if (!object->_transferrable) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// If we just did a transfer, return the object after doing post-transfer work
|
||||
if (GLSyncState::Transferred == object->getSyncState()) {
|
||||
object->postTransfer();
|
||||
}
|
||||
|
||||
if (object->isOutdated()) {
|
||||
// Object might be outdated, if so, start the transfer
|
||||
// (outdated objects that are already in transfer will have reported 'true' for ready()
|
||||
_textureTransferHelper->transferTexture(texturePointer);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!object->isReady()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
((GLTexture*)object)->updateMips();
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
template <typename GLTextureType>
|
||||
static GLuint getId(GLBackend& backend, const TexturePointer& texture, bool shouldSync) {
|
||||
if (!texture) {
|
||||
return 0;
|
||||
}
|
||||
GLTexture* object { nullptr };
|
||||
if (shouldSync) {
|
||||
object = sync<GLTextureType>(backend, texture, shouldSync);
|
||||
} else {
|
||||
object = Backend::getGPUObject<GLTextureType>(*texture);
|
||||
}
|
||||
|
||||
if (!object) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!shouldSync) {
|
||||
return object->_id;
|
||||
}
|
||||
|
||||
// Don't return textures that are in transfer state
|
||||
if ((object->getSyncState() != GLSyncState::Idle) ||
|
||||
// Don't return transferrable textures that have never completed transfer
|
||||
(!object->_transferrable || 0 != object->_transferCount)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return object->_id;
|
||||
}
|
||||
|
||||
~GLTexture();
|
||||
|
||||
// Is this texture generated outside the GPU library?
|
||||
const bool _external;
|
||||
const GLuint& _texture { _id };
|
||||
const std::string _source;
|
||||
const Stamp _storageStamp;
|
||||
const GLenum _target;
|
||||
const GLenum _internalFormat;
|
||||
const uint16 _maxMip;
|
||||
uint16 _minMip;
|
||||
const GLuint _virtualSize; // theoretical size as expected
|
||||
Stamp _contentStamp { 0 };
|
||||
const bool _transferrable;
|
||||
Size _transferCount { 0 };
|
||||
GLuint size() const { return _size; }
|
||||
GLSyncState getSyncState() const { return _syncState; }
|
||||
|
||||
// Is the storage out of date relative to the gpu texture?
|
||||
bool isInvalid() const;
|
||||
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
|
||||
static uint8_t getFaceCount(GLenum textureType);
|
||||
static GLenum getGLTextureType(const Texture& texture);
|
||||
|
||||
// Is the content out of date relative to the gpu texture?
|
||||
bool isOutdated() const;
|
||||
|
||||
// Is the texture in a state where it can be rendered with no work?
|
||||
bool isReady() const;
|
||||
|
||||
// Execute any post-move operations that must occur only on the main thread
|
||||
virtual void postTransfer();
|
||||
|
||||
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
|
||||
|
||||
static const size_t CUBE_NUM_FACES = 6;
|
||||
static const GLenum CUBE_FACE_LAYOUT[6];
|
||||
static const uint8_t TEXTURE_2D_NUM_FACES = 1;
|
||||
static const uint8_t TEXTURE_CUBE_NUM_FACES = 6;
|
||||
static const GLenum CUBE_FACE_LAYOUT[TEXTURE_CUBE_NUM_FACES];
|
||||
static const GLFilterMode FILTER_MODES[Sampler::NUM_FILTERS];
|
||||
static const GLenum WRAP_MODES[Sampler::NUM_WRAP_MODES];
|
||||
|
||||
// Return a floating point value indicating how much of the allowed
|
||||
// texture memory we are currently consuming. A value of 0 indicates
|
||||
// no texture memory usage, while a value of 1 indicates all available / allowed memory
|
||||
// is consumed. A value above 1 indicates that there is a problem.
|
||||
static float getMemoryPressure();
|
||||
protected:
|
||||
|
||||
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
|
||||
|
||||
static GLenum getGLTextureType(const Texture& texture);
|
||||
|
||||
|
||||
const GLuint _size { 0 }; // true size as reported by the gl api
|
||||
std::atomic<GLSyncState> _syncState { GLSyncState::Idle };
|
||||
|
||||
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id, bool transferrable);
|
||||
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
|
||||
|
||||
void setSyncState(GLSyncState syncState) { _syncState = syncState; }
|
||||
|
||||
void createTexture();
|
||||
|
||||
virtual void updateMips() {}
|
||||
virtual void allocateStorage() const = 0;
|
||||
virtual void updateSize() const = 0;
|
||||
virtual void syncSampler() const = 0;
|
||||
virtual uint32 size() const = 0;
|
||||
virtual void generateMips() const = 0;
|
||||
virtual void withPreservedTexture(std::function<void()> f) const;
|
||||
|
||||
protected:
|
||||
void setSize(GLuint size) const;
|
||||
|
||||
virtual void startTransfer();
|
||||
// Returns true if this is the last block required to complete transfer
|
||||
virtual bool continueTransfer() { return false; }
|
||||
virtual void finishTransfer();
|
||||
|
||||
private:
|
||||
friend class GLTextureTransferHelper;
|
||||
friend class GLBackend;
|
||||
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
|
||||
};
|
||||
|
||||
class GLExternalTexture : public GLTexture {
|
||||
using Parent = GLTexture;
|
||||
friend class GLBackend;
|
||||
public:
|
||||
~GLExternalTexture();
|
||||
protected:
|
||||
GLExternalTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
|
||||
void generateMips() const override {}
|
||||
uint32 size() const override { return 0; }
|
||||
};
|
||||
|
||||
|
||||
} }
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,208 +0,0 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/04/03
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLTextureTransfer.h"
|
||||
|
||||
#include <gl/GLHelpers.h>
|
||||
#include <gl/Context.h>
|
||||
|
||||
#include <gpu/GPULogging.h>
|
||||
|
||||
#include "GLShared.h"
|
||||
#include "GLTexture.h"
|
||||
|
||||
#ifdef HAVE_NSIGHT
|
||||
#include "nvToolsExt.h"
|
||||
std::unordered_map<TexturePointer, nvtxRangeId_t> _map;
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef TEXTURE_TRANSFER_PBOS
|
||||
#define TEXTURE_TRANSFER_BLOCK_SIZE (64 * 1024)
|
||||
#define TEXTURE_TRANSFER_PBO_COUNT 128
|
||||
#endif
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLTextureTransferHelper::GLTextureTransferHelper() {
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
setObjectName("TextureTransferThread");
|
||||
_context.create();
|
||||
initialize(true, QThread::LowPriority);
|
||||
// Clean shutdown on UNIX, otherwise _canvas is freed early
|
||||
connect(qApp, &QCoreApplication::aboutToQuit, [&] { terminate(); });
|
||||
#else
|
||||
initialize(false, QThread::LowPriority);
|
||||
#endif
|
||||
}
|
||||
|
||||
GLTextureTransferHelper::~GLTextureTransferHelper() {
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
if (isStillRunning()) {
|
||||
terminate();
|
||||
}
|
||||
#else
|
||||
terminate();
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLTextureTransferHelper::transferTexture(const gpu::TexturePointer& texturePointer) {
|
||||
GLTexture* object = Backend::getGPUObject<GLTexture>(*texturePointer);
|
||||
|
||||
Backend::incrementTextureGPUTransferCount();
|
||||
object->setSyncState(GLSyncState::Pending);
|
||||
Lock lock(_mutex);
|
||||
_pendingTextures.push_back(texturePointer);
|
||||
}
|
||||
|
||||
void GLTextureTransferHelper::setup() {
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
_context.makeCurrent();
|
||||
|
||||
#ifdef TEXTURE_TRANSFER_FORCE_DRAW
|
||||
// FIXME don't use opengl 4.5 DSA functionality without verifying it's present
|
||||
glCreateRenderbuffers(1, &_drawRenderbuffer);
|
||||
glNamedRenderbufferStorage(_drawRenderbuffer, GL_RGBA8, 128, 128);
|
||||
glCreateFramebuffers(1, &_drawFramebuffer);
|
||||
glNamedFramebufferRenderbuffer(_drawFramebuffer, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _drawRenderbuffer);
|
||||
glCreateFramebuffers(1, &_readFramebuffer);
|
||||
#endif
|
||||
|
||||
#ifdef TEXTURE_TRANSFER_PBOS
|
||||
std::array<GLuint, TEXTURE_TRANSFER_PBO_COUNT> pbos;
|
||||
glCreateBuffers(TEXTURE_TRANSFER_PBO_COUNT, &pbos[0]);
|
||||
for (uint32_t i = 0; i < TEXTURE_TRANSFER_PBO_COUNT; ++i) {
|
||||
TextureTransferBlock newBlock;
|
||||
newBlock._pbo = pbos[i];
|
||||
glNamedBufferStorage(newBlock._pbo, TEXTURE_TRANSFER_BLOCK_SIZE, 0, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
|
||||
newBlock._mapped = glMapNamedBufferRange(newBlock._pbo, 0, TEXTURE_TRANSFER_BLOCK_SIZE, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
|
||||
_readyQueue.push(newBlock);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLTextureTransferHelper::shutdown() {
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
_context.makeCurrent();
|
||||
#endif
|
||||
|
||||
#ifdef TEXTURE_TRANSFER_FORCE_DRAW
|
||||
glNamedFramebufferRenderbuffer(_drawFramebuffer, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, 0);
|
||||
glDeleteFramebuffers(1, &_drawFramebuffer);
|
||||
_drawFramebuffer = 0;
|
||||
glDeleteFramebuffers(1, &_readFramebuffer);
|
||||
_readFramebuffer = 0;
|
||||
|
||||
glNamedFramebufferTexture(_readFramebuffer, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0);
|
||||
glDeleteRenderbuffers(1, &_drawRenderbuffer);
|
||||
_drawRenderbuffer = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLTextureTransferHelper::queueExecution(VoidLambda lambda) {
|
||||
Lock lock(_mutex);
|
||||
_pendingCommands.push_back(lambda);
|
||||
}
|
||||
|
||||
#define MAX_TRANSFERS_PER_PASS 2
|
||||
|
||||
bool GLTextureTransferHelper::process() {
|
||||
// Take any new textures or commands off the queue
|
||||
VoidLambdaList pendingCommands;
|
||||
TextureList newTransferTextures;
|
||||
{
|
||||
Lock lock(_mutex);
|
||||
newTransferTextures.swap(_pendingTextures);
|
||||
pendingCommands.swap(_pendingCommands);
|
||||
}
|
||||
|
||||
if (!pendingCommands.empty()) {
|
||||
for (auto command : pendingCommands) {
|
||||
command();
|
||||
}
|
||||
glFlush();
|
||||
}
|
||||
|
||||
if (!newTransferTextures.empty()) {
|
||||
for (auto& texturePointer : newTransferTextures) {
|
||||
#ifdef HAVE_NSIGHT
|
||||
_map[texturePointer] = nvtxRangeStart("TextureTansfer");
|
||||
#endif
|
||||
GLTexture* object = Backend::getGPUObject<GLTexture>(*texturePointer);
|
||||
object->startTransfer();
|
||||
_transferringTextures.push_back(texturePointer);
|
||||
_textureIterator = _transferringTextures.begin();
|
||||
}
|
||||
_transferringTextures.sort([](const gpu::TexturePointer& a, const gpu::TexturePointer& b)->bool {
|
||||
return a->getSize() < b->getSize();
|
||||
});
|
||||
}
|
||||
|
||||
// No transfers in progress, sleep
|
||||
if (_transferringTextures.empty()) {
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
QThread::usleep(1);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
PROFILE_COUNTER_IF_CHANGED(render_gpu_gl, "transferringTextures", int, (int) _transferringTextures.size())
|
||||
|
||||
static auto lastReport = usecTimestampNow();
|
||||
auto now = usecTimestampNow();
|
||||
auto lastReportInterval = now - lastReport;
|
||||
if (lastReportInterval > USECS_PER_SECOND * 4) {
|
||||
lastReport = now;
|
||||
qCDebug(gpulogging) << "Texture list " << _transferringTextures.size();
|
||||
}
|
||||
|
||||
size_t transferCount = 0;
|
||||
for (_textureIterator = _transferringTextures.begin(); _textureIterator != _transferringTextures.end();) {
|
||||
if (++transferCount > MAX_TRANSFERS_PER_PASS) {
|
||||
break;
|
||||
}
|
||||
auto texture = *_textureIterator;
|
||||
GLTexture* gltexture = Backend::getGPUObject<GLTexture>(*texture);
|
||||
if (gltexture->continueTransfer()) {
|
||||
++_textureIterator;
|
||||
continue;
|
||||
}
|
||||
|
||||
gltexture->finishTransfer();
|
||||
|
||||
#ifdef TEXTURE_TRANSFER_FORCE_DRAW
|
||||
// FIXME force a draw on the texture transfer thread before passing the texture to the main thread for use
|
||||
#endif
|
||||
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
clientWait();
|
||||
#endif
|
||||
gltexture->_contentStamp = gltexture->_gpuObject.getDataStamp();
|
||||
gltexture->updateSize();
|
||||
gltexture->setSyncState(gpu::gl::GLSyncState::Transferred);
|
||||
Backend::decrementTextureGPUTransferCount();
|
||||
#ifdef HAVE_NSIGHT
|
||||
// Mark the texture as transferred
|
||||
nvtxRangeEnd(_map[texture]);
|
||||
_map.erase(texture);
|
||||
#endif
|
||||
_textureIterator = _transferringTextures.erase(_textureIterator);
|
||||
}
|
||||
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
if (!_transferringTextures.empty()) {
|
||||
// Don't saturate the GPU
|
||||
clientWait();
|
||||
} else {
|
||||
// Don't saturate the CPU
|
||||
QThread::msleep(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/04/03
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLTextureTransfer_h
|
||||
#define hifi_gpu_gl_GLTextureTransfer_h
|
||||
|
||||
#include <QtGlobal>
|
||||
#include <QtCore/QSharedPointer>
|
||||
|
||||
#include <GenericQueueThread.h>
|
||||
|
||||
#include <gl/Context.h>
|
||||
|
||||
#include "GLShared.h"
|
||||
|
||||
#ifdef Q_OS_WIN
|
||||
#define THREADED_TEXTURE_TRANSFER
|
||||
#endif
|
||||
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
// FIXME when sparse textures are enabled, it's harder to force a draw on the transfer thread
|
||||
// also, the current draw code is implicitly using OpenGL 4.5 functionality
|
||||
//#define TEXTURE_TRANSFER_FORCE_DRAW
|
||||
// FIXME PBO's increase the complexity and don't seem to work reliably
|
||||
//#define TEXTURE_TRANSFER_PBOS
|
||||
#endif
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
using TextureList = std::list<TexturePointer>;
|
||||
using TextureListIterator = TextureList::iterator;
|
||||
|
||||
class GLTextureTransferHelper : public GenericThread {
|
||||
public:
|
||||
using VoidLambda = std::function<void()>;
|
||||
using VoidLambdaList = std::list<VoidLambda>;
|
||||
using Pointer = std::shared_ptr<GLTextureTransferHelper>;
|
||||
GLTextureTransferHelper();
|
||||
~GLTextureTransferHelper();
|
||||
void transferTexture(const gpu::TexturePointer& texturePointer);
|
||||
void queueExecution(VoidLambda lambda);
|
||||
|
||||
void setup() override;
|
||||
void shutdown() override;
|
||||
bool process() override;
|
||||
|
||||
private:
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
::gl::OffscreenContext _context;
|
||||
#endif
|
||||
|
||||
#ifdef TEXTURE_TRANSFER_FORCE_DRAW
|
||||
// Framebuffers / renderbuffers for forcing access to the texture on the transfer thread
|
||||
GLuint _drawRenderbuffer { 0 };
|
||||
GLuint _drawFramebuffer { 0 };
|
||||
GLuint _readFramebuffer { 0 };
|
||||
#endif
|
||||
|
||||
// A mutex for protecting items access on the render and transfer threads
|
||||
Mutex _mutex;
|
||||
// Commands that have been submitted for execution on the texture transfer thread
|
||||
VoidLambdaList _pendingCommands;
|
||||
// Textures that have been submitted for transfer
|
||||
TextureList _pendingTextures;
|
||||
// Textures currently in the transfer process
|
||||
// Only used on the transfer thread
|
||||
TextureList _transferringTextures;
|
||||
TextureListIterator _textureIterator;
|
||||
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
#endif
|
|
@ -40,18 +40,28 @@ public:
|
|||
|
||||
class GL41Texture : public GLTexture {
|
||||
using Parent = GLTexture;
|
||||
GLuint allocate();
|
||||
public:
|
||||
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer, GLuint externalId);
|
||||
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer, bool transferrable);
|
||||
static GLuint allocate();
|
||||
|
||||
public:
|
||||
~GL41Texture();
|
||||
|
||||
private:
|
||||
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& buffer);
|
||||
|
||||
protected:
|
||||
void transferMip(uint16_t mipLevel, uint8_t face) const;
|
||||
void startTransfer() override;
|
||||
void allocateStorage() const override;
|
||||
void updateSize() const override;
|
||||
void syncSampler() const override;
|
||||
void generateMips() const override;
|
||||
uint32 size() const override;
|
||||
|
||||
friend class GL41Backend;
|
||||
const Stamp _storageStamp;
|
||||
mutable Stamp _contentStamp { 0 };
|
||||
mutable Stamp _samplerStamp { 0 };
|
||||
const uint32 _size;
|
||||
|
||||
|
||||
bool isOutdated() const;
|
||||
void withPreservedTexture(std::function<void()> f) const;
|
||||
void syncContent() const;
|
||||
void syncSampler() const;
|
||||
};
|
||||
|
||||
|
||||
|
@ -62,8 +72,7 @@ protected:
|
|||
GLuint getBufferID(const Buffer& buffer) override;
|
||||
GLBuffer* syncGPUObject(const Buffer& buffer) override;
|
||||
|
||||
GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) override;
|
||||
GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) override;
|
||||
GLTexture* syncGPUObject(const TexturePointer& texture) override;
|
||||
|
||||
GLuint getQueryID(const QueryPointer& query) override;
|
||||
GLQuery* syncGPUObject(const Query& query) override;
|
||||
|
|
|
@ -53,10 +53,12 @@ public:
|
|||
GL_COLOR_ATTACHMENT15 };
|
||||
|
||||
int unit = 0;
|
||||
auto backend = _backend.lock();
|
||||
for (auto& b : _gpuObject.getRenderBuffers()) {
|
||||
surface = b._texture;
|
||||
if (surface) {
|
||||
gltexture = gl::GLTexture::sync<GL41Backend::GL41Texture>(*_backend.lock().get(), surface, false); // Grab the gltexture and don't transfer
|
||||
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
|
||||
gltexture = backend->syncGPUObject(surface);
|
||||
} else {
|
||||
gltexture = nullptr;
|
||||
}
|
||||
|
@ -81,9 +83,11 @@ public:
|
|||
}
|
||||
|
||||
if (_gpuObject.getDepthStamp() != _depthStamp) {
|
||||
auto backend = _backend.lock();
|
||||
auto surface = _gpuObject.getDepthStencilBuffer();
|
||||
if (_gpuObject.hasDepthStencil() && surface) {
|
||||
gltexture = gl::GLTexture::sync<GL41Backend::GL41Texture>(*_backend.lock().get(), surface, false); // Grab the gltexture and don't transfer
|
||||
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
|
||||
gltexture = backend->syncGPUObject(surface);
|
||||
}
|
||||
|
||||
if (gltexture) {
|
||||
|
@ -110,7 +114,7 @@ public:
|
|||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, currentFBO);
|
||||
}
|
||||
|
||||
checkStatus(GL_DRAW_FRAMEBUFFER);
|
||||
checkStatus();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -29,20 +29,102 @@ GLuint GL41Texture::allocate() {
|
|||
return result;
|
||||
}
|
||||
|
||||
GLuint GL41Backend::getTextureID(const TexturePointer& texture, bool transfer) {
|
||||
return GL41Texture::getId<GL41Texture>(*this, texture, transfer);
|
||||
GLTexture* GL41Backend::syncGPUObject(const TexturePointer& texturePointer) {
|
||||
if (!texturePointer) {
|
||||
return nullptr;
|
||||
}
|
||||
const Texture& texture = *texturePointer;
|
||||
if (TextureUsageType::EXTERNAL == texture.getUsageType()) {
|
||||
return Parent::syncGPUObject(texturePointer);
|
||||
}
|
||||
|
||||
if (!texture.isDefined()) {
|
||||
// NO texture definition yet so let's avoid thinking
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// If the object hasn't been created, or the object definition is out of date, drop and re-create
|
||||
GL41Texture* object = Backend::getGPUObject<GL41Texture>(texture);
|
||||
if (!object || object->_storageStamp < texture.getStamp()) {
|
||||
// This automatically any previous texture
|
||||
object = new GL41Texture(shared_from_this(), texture);
|
||||
}
|
||||
|
||||
// FIXME internalize to GL41Texture 'sync' function
|
||||
if (object->isOutdated()) {
|
||||
object->withPreservedTexture([&] {
|
||||
if (object->_contentStamp <= texture.getDataStamp()) {
|
||||
// FIXME implement synchronous texture transfer here
|
||||
object->syncContent();
|
||||
}
|
||||
|
||||
if (object->_samplerStamp <= texture.getSamplerStamp()) {
|
||||
object->syncSampler();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
GLTexture* GL41Backend::syncGPUObject(const TexturePointer& texture, bool transfer) {
|
||||
return GL41Texture::sync<GL41Texture>(*this, texture, transfer);
|
||||
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture)
|
||||
: GLTexture(backend, texture, allocate()), _storageStamp { texture.getStamp() }, _size(texture.evalTotalSize()) {
|
||||
incrementTextureGPUCount();
|
||||
withPreservedTexture([&] {
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), _gpuObject.getStoredMipFormat());
|
||||
auto numMips = _gpuObject.evalNumMips();
|
||||
for (uint16_t mipLevel = 0; mipLevel < numMips; ++mipLevel) {
|
||||
// Get the mip level dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuObject.evalMipDimensions(mipLevel);
|
||||
uint8_t face = 0;
|
||||
for (GLenum target : getFaceTargets(_target)) {
|
||||
const Byte* mipData = nullptr;
|
||||
if (_gpuObject.isStoredMipFaceAvailable(mipLevel, face)) {
|
||||
auto mip = _gpuObject.accessStoredMipFace(mipLevel, face);
|
||||
mipData = mip->readData();
|
||||
}
|
||||
glTexImage2D(target, mipLevel, texelFormat.internalFormat, dimensions.x, dimensions.y, 0, texelFormat.format, texelFormat.type, mipData);
|
||||
(void)CHECK_GL_ERROR();
|
||||
++face;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint externalId)
|
||||
: GLTexture(backend, texture, externalId) {
|
||||
GL41Texture::~GL41Texture() {
|
||||
|
||||
}
|
||||
|
||||
GL41Texture::GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable)
|
||||
: GLTexture(backend, texture, allocate(), transferrable) {
|
||||
bool GL41Texture::isOutdated() const {
|
||||
if (_samplerStamp <= _gpuObject.getSamplerStamp()) {
|
||||
return true;
|
||||
}
|
||||
if (TextureUsageType::RESOURCE == _gpuObject.getUsageType() && _contentStamp <= _gpuObject.getDataStamp()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void GL41Texture::withPreservedTexture(std::function<void()> f) const {
|
||||
GLint boundTex = -1;
|
||||
switch (_target) {
|
||||
case GL_TEXTURE_2D:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
|
||||
break;
|
||||
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
|
||||
break;
|
||||
|
||||
default:
|
||||
qFatal("Unsupported texture type");
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
glBindTexture(_target, _texture);
|
||||
f();
|
||||
glBindTexture(_target, boundTex);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL41Texture::generateMips() const {
|
||||
|
@ -52,94 +134,12 @@ void GL41Texture::generateMips() const {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL41Texture::allocateStorage() const {
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
|
||||
glTexParameteri(_target, GL_TEXTURE_BASE_LEVEL, 0);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glTexParameteri(_target, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
if (GLEW_VERSION_4_2 && !_gpuObject.getTexelFormat().isCompressed()) {
|
||||
// Get the dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuObject.evalMipDimensions(_minMip);
|
||||
glTexStorage2D(_target, usedMipLevels(), texelFormat.internalFormat, dimensions.x, dimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
} else {
|
||||
for (uint16_t l = _minMip; l <= _maxMip; l++) {
|
||||
// Get the mip level dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuObject.evalMipDimensions(l);
|
||||
for (GLenum target : getFaceTargets(_target)) {
|
||||
glTexImage2D(target, l - _minMip, texelFormat.internalFormat, dimensions.x, dimensions.y, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
}
|
||||
void GL41Texture::syncContent() const {
|
||||
// FIXME actually copy the texture data
|
||||
_contentStamp = _gpuObject.getDataStamp() + 1;
|
||||
}
|
||||
|
||||
void GL41Texture::updateSize() const {
|
||||
setSize(_virtualSize);
|
||||
if (!_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_gpuObject.getTexelFormat().isCompressed()) {
|
||||
GLenum proxyType = GL_TEXTURE_2D;
|
||||
GLuint numFaces = 1;
|
||||
if (_gpuObject.getType() == gpu::Texture::TEX_CUBE) {
|
||||
proxyType = CUBE_FACE_LAYOUT[0];
|
||||
numFaces = (GLuint)CUBE_NUM_FACES;
|
||||
}
|
||||
GLint gpuSize{ 0 };
|
||||
glGetTexLevelParameteriv(proxyType, 0, GL_TEXTURE_COMPRESSED, &gpuSize);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
if (gpuSize) {
|
||||
for (GLuint level = _minMip; level < _maxMip; level++) {
|
||||
GLint levelSize{ 0 };
|
||||
glGetTexLevelParameteriv(proxyType, level, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &levelSize);
|
||||
levelSize *= numFaces;
|
||||
|
||||
if (levelSize <= 0) {
|
||||
break;
|
||||
}
|
||||
gpuSize += levelSize;
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
setSize(gpuSize);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move content bits from the CPU to the GPU for a given mip / face
|
||||
void GL41Texture::transferMip(uint16_t mipLevel, uint8_t face) const {
|
||||
auto mip = _gpuObject.accessStoredMipFace(mipLevel, face);
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), mip->getFormat());
|
||||
//GLenum target = getFaceTargets()[face];
|
||||
GLenum target = _target == GL_TEXTURE_2D ? GL_TEXTURE_2D : CUBE_FACE_LAYOUT[face];
|
||||
auto size = _gpuObject.evalMipDimensions(mipLevel);
|
||||
glTexSubImage2D(target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL41Texture::startTransfer() {
|
||||
PROFILE_RANGE(render_gpu_gl, __FUNCTION__);
|
||||
Parent::startTransfer();
|
||||
|
||||
glBindTexture(_target, _id);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
// transfer pixels from each faces
|
||||
uint8_t numFaces = (Texture::TEX_CUBE == _gpuObject.getType()) ? CUBE_NUM_FACES : 1;
|
||||
for (uint8_t f = 0; f < numFaces; f++) {
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
|
||||
transferMip(i, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GL41Backend::GL41Texture::syncSampler() const {
|
||||
void GL41Texture::syncSampler() const {
|
||||
const Sampler& sampler = _gpuObject.getSampler();
|
||||
const auto& fm = FILTER_MODES[sampler.getFilter()];
|
||||
glTexParameteri(_target, GL_TEXTURE_MIN_FILTER, fm.minFilter);
|
||||
|
@ -161,5 +161,9 @@ void GL41Backend::GL41Texture::syncSampler() const {
|
|||
glTexParameterf(_target, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
|
||||
glTexParameterf(_target, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
|
||||
glTexParameterf(_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
|
||||
_samplerStamp = _gpuObject.getSamplerStamp() + 1;
|
||||
}
|
||||
|
||||
uint32 GL41Texture::size() const {
|
||||
return _size;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,12 @@ Q_LOGGING_CATEGORY(gpugl45logging, "hifi.gpu.gl45")
|
|||
using namespace gpu;
|
||||
using namespace gpu::gl45;
|
||||
|
||||
void GL45Backend::recycle() const {
|
||||
Parent::recycle();
|
||||
GL45VariableAllocationTexture::manageMemory();
|
||||
GL45VariableAllocationTexture::_frameTexturesCreated = 0;
|
||||
}
|
||||
|
||||
void GL45Backend::do_draw(const Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
|
@ -163,8 +169,3 @@ void GL45Backend::do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOf
|
|||
_stats._DSNumAPIDrawcalls++;
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL45Backend::recycle() const {
|
||||
Parent::recycle();
|
||||
derezTextures();
|
||||
}
|
||||
|
|
|
@ -8,17 +8,21 @@
|
|||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#pragma once
|
||||
#ifndef hifi_gpu_45_GL45Backend_h
|
||||
#define hifi_gpu_45_GL45Backend_h
|
||||
|
||||
#include "../gl/GLBackend.h"
|
||||
#include "../gl/GLTexture.h"
|
||||
#include <thread>
|
||||
|
||||
#define INCREMENTAL_TRANSFER 0
|
||||
#define THREADED_TEXTURE_BUFFERING 1
|
||||
|
||||
namespace gpu { namespace gl45 {
|
||||
|
||||
using namespace gpu::gl;
|
||||
using TextureWeakPointer = std::weak_ptr<Texture>;
|
||||
|
||||
class GL45Backend : public GLBackend {
|
||||
using Parent = GLBackend;
|
||||
|
@ -31,60 +35,219 @@ public:
|
|||
|
||||
class GL45Texture : public GLTexture {
|
||||
using Parent = GLTexture;
|
||||
friend class GL45Backend;
|
||||
static GLuint allocate(const Texture& texture);
|
||||
protected:
|
||||
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||
void generateMips() const override;
|
||||
void copyMipFaceFromTexture(uint16_t sourceMip, uint16_t targetMip, uint8_t face) const;
|
||||
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const;
|
||||
virtual void syncSampler() const;
|
||||
};
|
||||
|
||||
//
|
||||
// Textures that have fixed allocation sizes and cannot be managed at runtime
|
||||
//
|
||||
|
||||
class GL45FixedAllocationTexture : public GL45Texture {
|
||||
using Parent = GL45Texture;
|
||||
friend class GL45Backend;
|
||||
|
||||
public:
|
||||
GL45FixedAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||
~GL45FixedAllocationTexture();
|
||||
|
||||
protected:
|
||||
uint32 size() const override { return _size; }
|
||||
void allocateStorage() const;
|
||||
void syncSampler() const override;
|
||||
const uint32 _size { 0 };
|
||||
};
|
||||
|
||||
class GL45AttachmentTexture : public GL45FixedAllocationTexture {
|
||||
using Parent = GL45FixedAllocationTexture;
|
||||
friend class GL45Backend;
|
||||
protected:
|
||||
GL45AttachmentTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||
~GL45AttachmentTexture();
|
||||
};
|
||||
|
||||
class GL45StrictResourceTexture : public GL45FixedAllocationTexture {
|
||||
using Parent = GL45FixedAllocationTexture;
|
||||
friend class GL45Backend;
|
||||
protected:
|
||||
GL45StrictResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||
};
|
||||
|
||||
//
|
||||
// Textures that can be managed at runtime to increase or decrease their memory load
|
||||
//
|
||||
|
||||
class GL45VariableAllocationTexture : public GL45Texture {
|
||||
using Parent = GL45Texture;
|
||||
friend class GL45Backend;
|
||||
using PromoteLambda = std::function<void()>;
|
||||
|
||||
public:
|
||||
enum class MemoryPressureState {
|
||||
Idle,
|
||||
Transfer,
|
||||
Oversubscribed,
|
||||
Undersubscribed,
|
||||
};
|
||||
|
||||
using QueuePair = std::pair<TextureWeakPointer, float>;
|
||||
struct QueuePairLess {
|
||||
bool operator()(const QueuePair& a, const QueuePair& b) {
|
||||
return a.second < b.second;
|
||||
}
|
||||
};
|
||||
using WorkQueue = std::priority_queue<QueuePair, std::vector<QueuePair>, QueuePairLess>;
|
||||
|
||||
class TransferJob {
|
||||
using VoidLambda = std::function<void()>;
|
||||
using VoidLambdaQueue = std::queue<VoidLambda>;
|
||||
using ThreadPointer = std::shared_ptr<std::thread>;
|
||||
const GL45VariableAllocationTexture& _parent;
|
||||
// Holds the contents to transfer to the GPU in CPU memory
|
||||
std::vector<uint8_t> _buffer;
|
||||
// Indicates if a transfer from backing storage to interal storage has started
|
||||
bool _bufferingStarted { false };
|
||||
bool _bufferingCompleted { false };
|
||||
VoidLambda _transferLambda;
|
||||
VoidLambda _bufferingLambda;
|
||||
#if THREADED_TEXTURE_BUFFERING
|
||||
static Mutex _mutex;
|
||||
static VoidLambdaQueue _bufferLambdaQueue;
|
||||
static ThreadPointer _bufferThread;
|
||||
static std::atomic<bool> _shutdownBufferingThread;
|
||||
static void bufferLoop();
|
||||
#endif
|
||||
|
||||
public:
|
||||
TransferJob(const TransferJob& other) = delete;
|
||||
TransferJob(const GL45VariableAllocationTexture& parent, std::function<void()> transferLambda);
|
||||
TransferJob(const GL45VariableAllocationTexture& parent, uint16_t sourceMip, uint16_t targetMip, uint8_t face, uint32_t lines = 0, uint32_t lineOffset = 0);
|
||||
~TransferJob();
|
||||
bool tryTransfer();
|
||||
|
||||
#if THREADED_TEXTURE_BUFFERING
|
||||
static void startTransferLoop();
|
||||
static void stopTransferLoop();
|
||||
#endif
|
||||
|
||||
private:
|
||||
size_t _transferSize { 0 };
|
||||
#if THREADED_TEXTURE_BUFFERING
|
||||
void startBuffering();
|
||||
#endif
|
||||
void transfer();
|
||||
};
|
||||
|
||||
using TransferQueue = std::queue<std::unique_ptr<TransferJob>>;
|
||||
static MemoryPressureState _memoryPressureState;
|
||||
protected:
|
||||
static size_t _frameTexturesCreated;
|
||||
static std::atomic<bool> _memoryPressureStateStale;
|
||||
static std::list<TextureWeakPointer> _memoryManagedTextures;
|
||||
static WorkQueue _transferQueue;
|
||||
static WorkQueue _promoteQueue;
|
||||
static WorkQueue _demoteQueue;
|
||||
static TexturePointer _currentTransferTexture;
|
||||
static const uvec3 INITIAL_MIP_TRANSFER_DIMENSIONS;
|
||||
|
||||
|
||||
static void updateMemoryPressure();
|
||||
static void processWorkQueues();
|
||||
static void addMemoryManagedTexture(const TexturePointer& texturePointer);
|
||||
static void addToWorkQueue(const TexturePointer& texture);
|
||||
static WorkQueue& getActiveWorkQueue();
|
||||
|
||||
static void manageMemory();
|
||||
|
||||
protected:
|
||||
GL45VariableAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||
~GL45VariableAllocationTexture();
|
||||
//bool canPromoteNoAllocate() const { return _allocatedMip < _populatedMip; }
|
||||
bool canPromote() const { return _allocatedMip > 0; }
|
||||
bool canDemote() const { return _allocatedMip < _maxAllocatedMip; }
|
||||
bool hasPendingTransfers() const { return _populatedMip > _allocatedMip; }
|
||||
void executeNextTransfer(const TexturePointer& currentTexture);
|
||||
uint32 size() const override { return _size; }
|
||||
virtual void populateTransferQueue() = 0;
|
||||
virtual void promote() = 0;
|
||||
virtual void demote() = 0;
|
||||
|
||||
// The allocated mip level, relative to the number of mips in the gpu::Texture object
|
||||
// The relationship between a given glMip to the original gpu::Texture mip is always
|
||||
// glMip + _allocatedMip
|
||||
uint16 _allocatedMip { 0 };
|
||||
// The populated mip level, relative to the number of mips in the gpu::Texture object
|
||||
// This must always be >= the allocated mip
|
||||
uint16 _populatedMip { 0 };
|
||||
// The highest (lowest resolution) mip that we will support, relative to the number
|
||||
// of mips in the gpu::Texture object
|
||||
uint16 _maxAllocatedMip { 0 };
|
||||
uint32 _size { 0 };
|
||||
// Contains a series of lambdas that when executed will transfer data to the GPU, modify
|
||||
// the _populatedMip and update the sampler in order to fully populate the allocated texture
|
||||
// until _populatedMip == _allocatedMip
|
||||
TransferQueue _pendingTransfers;
|
||||
};
|
||||
|
||||
class GL45ResourceTexture : public GL45VariableAllocationTexture {
|
||||
using Parent = GL45VariableAllocationTexture;
|
||||
friend class GL45Backend;
|
||||
protected:
|
||||
GL45ResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||
|
||||
void syncSampler() const override;
|
||||
void promote() override;
|
||||
void demote() override;
|
||||
void populateTransferQueue() override;
|
||||
|
||||
void allocateStorage(uint16 mip);
|
||||
void copyMipsFromTexture();
|
||||
};
|
||||
|
||||
#if 0
|
||||
class GL45SparseResourceTexture : public GL45VariableAllocationTexture {
|
||||
using Parent = GL45VariableAllocationTexture;
|
||||
friend class GL45Backend;
|
||||
using TextureTypeFormat = std::pair<GLenum, GLenum>;
|
||||
using PageDimensions = std::vector<uvec3>;
|
||||
using PageDimensionsMap = std::map<TextureTypeFormat, PageDimensions>;
|
||||
static PageDimensionsMap pageDimensionsByFormat;
|
||||
static Mutex pageDimensionsMutex;
|
||||
|
||||
static bool isSparseEligible(const Texture& texture);
|
||||
static PageDimensions getPageDimensionsForFormat(const TextureTypeFormat& typeFormat);
|
||||
static PageDimensions getPageDimensionsForFormat(GLenum type, GLenum format);
|
||||
static const uint32_t DEFAULT_PAGE_DIMENSION = 128;
|
||||
static const uint32_t DEFAULT_MAX_SPARSE_LEVEL = 0xFFFF;
|
||||
|
||||
public:
|
||||
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint externalId);
|
||||
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable);
|
||||
~GL45Texture();
|
||||
|
||||
void postTransfer() override;
|
||||
|
||||
struct SparseInfo {
|
||||
SparseInfo(GL45Texture& texture);
|
||||
void maybeMakeSparse();
|
||||
void update();
|
||||
uvec3 getPageCounts(const uvec3& dimensions) const;
|
||||
uint32_t getPageCount(const uvec3& dimensions) const;
|
||||
uint32_t getSize() const;
|
||||
|
||||
GL45Texture& texture;
|
||||
bool sparse { false };
|
||||
uvec3 pageDimensions { DEFAULT_PAGE_DIMENSION };
|
||||
GLuint maxSparseLevel { DEFAULT_MAX_SPARSE_LEVEL };
|
||||
uint32_t allocatedPages { 0 };
|
||||
uint32_t maxPages { 0 };
|
||||
uint32_t pageBytes { 0 };
|
||||
GLint pageDimensionsIndex { 0 };
|
||||
};
|
||||
|
||||
protected:
|
||||
void updateMips() override;
|
||||
void stripToMip(uint16_t newMinMip);
|
||||
void startTransfer() override;
|
||||
bool continueTransfer() override;
|
||||
void finishTransfer() override;
|
||||
void incrementalTransfer(const uvec3& size, const gpu::Texture::PixelsPointer& mip, std::function<void(const ivec3& offset, const uvec3& size)> f) const;
|
||||
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
|
||||
void allocateMip(uint16_t mipLevel, uint8_t face = 0) const;
|
||||
void allocateStorage() const override;
|
||||
void updateSize() const override;
|
||||
void syncSampler() const override;
|
||||
void generateMips() const override;
|
||||
void withPreservedTexture(std::function<void()> f) const override;
|
||||
void derez();
|
||||
GL45SparseResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||
~GL45SparseResourceTexture();
|
||||
uint32 size() const override { return _allocatedPages * _pageBytes; }
|
||||
void promote() override;
|
||||
void demote() override;
|
||||
|
||||
SparseInfo _sparseInfo;
|
||||
uint16_t _mipOffset { 0 };
|
||||
friend class GL45Backend;
|
||||
private:
|
||||
uvec3 getPageCounts(const uvec3& dimensions) const;
|
||||
uint32_t getPageCount(const uvec3& dimensions) const;
|
||||
|
||||
uint32_t _allocatedPages { 0 };
|
||||
uint32_t _pageBytes { 0 };
|
||||
uvec3 _pageDimensions { DEFAULT_PAGE_DIMENSION };
|
||||
GLuint _maxSparseLevel { DEFAULT_MAX_SPARSE_LEVEL };
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
protected:
|
||||
|
||||
void recycle() const override;
|
||||
void derezTextures() const;
|
||||
|
||||
GLuint getFramebufferID(const FramebufferPointer& framebuffer) override;
|
||||
GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) override;
|
||||
|
@ -92,8 +255,7 @@ protected:
|
|||
GLuint getBufferID(const Buffer& buffer) override;
|
||||
GLBuffer* syncGPUObject(const Buffer& buffer) override;
|
||||
|
||||
GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) override;
|
||||
GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) override;
|
||||
GLTexture* syncGPUObject(const TexturePointer& texture) override;
|
||||
|
||||
GLuint getQueryID(const QueryPointer& query) override;
|
||||
GLQuery* syncGPUObject(const Query& query) override;
|
||||
|
@ -126,5 +288,5 @@ protected:
|
|||
|
||||
Q_DECLARE_LOGGING_CATEGORY(gpugl45logging)
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -49,10 +49,12 @@ public:
|
|||
GL_COLOR_ATTACHMENT15 };
|
||||
|
||||
int unit = 0;
|
||||
auto backend = _backend.lock();
|
||||
for (auto& b : _gpuObject.getRenderBuffers()) {
|
||||
surface = b._texture;
|
||||
if (surface) {
|
||||
gltexture = gl::GLTexture::sync<GL45Backend::GL45Texture>(*_backend.lock().get(), surface, false); // Grab the gltexture and don't transfer
|
||||
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
|
||||
gltexture = backend->syncGPUObject(surface);
|
||||
} else {
|
||||
gltexture = nullptr;
|
||||
}
|
||||
|
@ -78,8 +80,10 @@ public:
|
|||
|
||||
if (_gpuObject.getDepthStamp() != _depthStamp) {
|
||||
auto surface = _gpuObject.getDepthStencilBuffer();
|
||||
auto backend = _backend.lock();
|
||||
if (_gpuObject.hasDepthStencil() && surface) {
|
||||
gltexture = gl::GLTexture::sync<GL45Backend::GL45Texture>(*_backend.lock().get(), surface, false); // Grab the gltexture and don't transfer
|
||||
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
|
||||
gltexture = backend->syncGPUObject(surface);
|
||||
}
|
||||
|
||||
if (gltexture) {
|
||||
|
@ -102,7 +106,7 @@ public:
|
|||
_status = glCheckNamedFramebufferStatus(_id, GL_DRAW_FRAMEBUFFER);
|
||||
|
||||
// restore the current framebuffer
|
||||
checkStatus(GL_DRAW_FRAMEBUFFER);
|
||||
checkStatus();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -8,9 +8,10 @@
|
|||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GL45Backend.h"
|
||||
|
||||
#include "GL45Backend.h"
|
||||
#include <mutex>
|
||||
#include <algorithm>
|
||||
#include <condition_variable>
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
|
@ -19,142 +20,70 @@
|
|||
#include <QtCore/QDebug>
|
||||
#include <QtCore/QThread>
|
||||
|
||||
#include <NumericalConstants.h>
|
||||
#include "../gl/GLTexelFormat.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
using namespace gpu::gl45;
|
||||
|
||||
// Allocate 1 MB of buffer space for paged transfers
|
||||
#define DEFAULT_PAGE_BUFFER_SIZE (1024*1024)
|
||||
#define DEFAULT_GL_PIXEL_ALIGNMENT 4
|
||||
|
||||
using GL45Texture = GL45Backend::GL45Texture;
|
||||
|
||||
static std::map<uint16_t, std::unordered_set<GL45Texture*>> texturesByMipCounts;
|
||||
static Mutex texturesByMipCountsMutex;
|
||||
using TextureTypeFormat = std::pair<GLenum, GLenum>;
|
||||
std::map<TextureTypeFormat, std::vector<uvec3>> sparsePageDimensionsByFormat;
|
||||
Mutex sparsePageDimensionsByFormatMutex;
|
||||
|
||||
static std::vector<uvec3> getPageDimensionsForFormat(const TextureTypeFormat& typeFormat) {
|
||||
{
|
||||
Lock lock(sparsePageDimensionsByFormatMutex);
|
||||
if (sparsePageDimensionsByFormat.count(typeFormat)) {
|
||||
return sparsePageDimensionsByFormat[typeFormat];
|
||||
}
|
||||
}
|
||||
GLint count = 0;
|
||||
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_NUM_VIRTUAL_PAGE_SIZES_ARB, 1, &count);
|
||||
|
||||
std::vector<uvec3> result;
|
||||
if (count > 0) {
|
||||
std::vector<GLint> x, y, z;
|
||||
x.resize(count);
|
||||
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &x[0]);
|
||||
y.resize(count);
|
||||
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &y[0]);
|
||||
z.resize(count);
|
||||
glGetInternalformativ(typeFormat.first, typeFormat.second, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &z[0]);
|
||||
|
||||
result.resize(count);
|
||||
for (GLint i = 0; i < count; ++i) {
|
||||
result[i] = uvec3(x[i], y[i], z[i]);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Lock lock(sparsePageDimensionsByFormatMutex);
|
||||
if (0 == sparsePageDimensionsByFormat.count(typeFormat)) {
|
||||
sparsePageDimensionsByFormat[typeFormat] = result;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::vector<uvec3> getPageDimensionsForFormat(GLenum target, GLenum format) {
|
||||
return getPageDimensionsForFormat({ target, format });
|
||||
}
|
||||
|
||||
GLTexture* GL45Backend::syncGPUObject(const TexturePointer& texture, bool transfer) {
|
||||
return GL45Texture::sync<GL45Texture>(*this, texture, transfer);
|
||||
}
|
||||
|
||||
using SparseInfo = GL45Backend::GL45Texture::SparseInfo;
|
||||
|
||||
SparseInfo::SparseInfo(GL45Texture& texture)
|
||||
: texture(texture) {
|
||||
}
|
||||
|
||||
void SparseInfo::maybeMakeSparse() {
|
||||
// Don't enable sparse for objects with explicitly managed mip levels
|
||||
if (!texture._gpuObject.isAutogenerateMips()) {
|
||||
return;
|
||||
}
|
||||
return;
|
||||
|
||||
const uvec3 dimensions = texture._gpuObject.getDimensions();
|
||||
auto allowedPageDimensions = getPageDimensionsForFormat(texture._target, texture._internalFormat);
|
||||
// In order to enable sparse the texture size must be an integer multiple of the page size
|
||||
for (size_t i = 0; i < allowedPageDimensions.size(); ++i) {
|
||||
pageDimensionsIndex = (uint32_t) i;
|
||||
pageDimensions = allowedPageDimensions[i];
|
||||
// Is this texture an integer multiple of page dimensions?
|
||||
if (uvec3(0) == (dimensions % pageDimensions)) {
|
||||
qCDebug(gpugl45logging) << "Enabling sparse for texture " << texture._source.c_str();
|
||||
sparse = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (sparse) {
|
||||
glTextureParameteri(texture._id, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
|
||||
glTextureParameteri(texture._id, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, pageDimensionsIndex);
|
||||
} else {
|
||||
qCDebug(gpugl45logging) << "Size " << dimensions.x << " x " << dimensions.y <<
|
||||
" is not supported by any sparse page size for texture" << texture._source.c_str();
|
||||
}
|
||||
}
|
||||
|
||||
#define SPARSE_PAGE_SIZE_OVERHEAD_ESTIMATE 1.3f
|
||||
#define MAX_RESOURCE_TEXTURES_PER_FRAME 2
|
||||
|
||||
// This can only be called after we've established our storage size
|
||||
void SparseInfo::update() {
|
||||
if (!sparse) {
|
||||
return;
|
||||
GLTexture* GL45Backend::syncGPUObject(const TexturePointer& texturePointer) {
|
||||
if (!texturePointer) {
|
||||
return nullptr;
|
||||
}
|
||||
glGetTextureParameterIuiv(texture._id, GL_NUM_SPARSE_LEVELS_ARB, &maxSparseLevel);
|
||||
pageBytes = texture._gpuObject.getTexelFormat().getSize();
|
||||
pageBytes *= pageDimensions.x * pageDimensions.y * pageDimensions.z;
|
||||
// Testing with a simple texture allocating app shows an estimated 20% GPU memory overhead for
|
||||
// sparse textures as compared to non-sparse, so we acount for that here.
|
||||
pageBytes = (uint32_t)(pageBytes * SPARSE_PAGE_SIZE_OVERHEAD_ESTIMATE);
|
||||
|
||||
for (uint16_t mipLevel = 0; mipLevel <= maxSparseLevel; ++mipLevel) {
|
||||
auto mipDimensions = texture._gpuObject.evalMipDimensions(mipLevel);
|
||||
auto mipPageCount = getPageCount(mipDimensions);
|
||||
maxPages += mipPageCount;
|
||||
const Texture& texture = *texturePointer;
|
||||
if (TextureUsageType::EXTERNAL == texture.getUsageType()) {
|
||||
return Parent::syncGPUObject(texturePointer);
|
||||
}
|
||||
if (texture._target == GL_TEXTURE_CUBE_MAP) {
|
||||
maxPages *= GLTexture::CUBE_NUM_FACES;
|
||||
|
||||
if (!texture.isDefined()) {
|
||||
// NO texture definition yet so let's avoid thinking
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
uvec3 SparseInfo::getPageCounts(const uvec3& dimensions) const {
|
||||
auto result = (dimensions / pageDimensions) +
|
||||
glm::clamp(dimensions % pageDimensions, glm::uvec3(0), glm::uvec3(1));
|
||||
return result;
|
||||
}
|
||||
GL45Texture* object = Backend::getGPUObject<GL45Texture>(texture);
|
||||
if (!object) {
|
||||
switch (texture.getUsageType()) {
|
||||
case TextureUsageType::RENDERBUFFER:
|
||||
object = new GL45AttachmentTexture(shared_from_this(), texture);
|
||||
break;
|
||||
|
||||
uint32_t SparseInfo::getPageCount(const uvec3& dimensions) const {
|
||||
auto pageCounts = getPageCounts(dimensions);
|
||||
return pageCounts.x * pageCounts.y * pageCounts.z;
|
||||
}
|
||||
case TextureUsageType::STRICT_RESOURCE:
|
||||
qCDebug(gpugllogging) << "Strict texture " << texture.source().c_str();
|
||||
object = new GL45StrictResourceTexture(shared_from_this(), texture);
|
||||
break;
|
||||
|
||||
case TextureUsageType::RESOURCE: {
|
||||
if (GL45VariableAllocationTexture::_frameTexturesCreated < MAX_RESOURCE_TEXTURES_PER_FRAME) {
|
||||
#if 0
|
||||
if (isTextureManagementSparseEnabled() && GL45Texture::isSparseEligible(texture)) {
|
||||
object = new GL45SparseResourceTexture(shared_from_this(), texture);
|
||||
} else {
|
||||
object = new GL45ResourceTexture(shared_from_this(), texture);
|
||||
}
|
||||
#else
|
||||
object = new GL45ResourceTexture(shared_from_this(), texture);
|
||||
#endif
|
||||
GL45VariableAllocationTexture::addMemoryManagedTexture(texturePointer);
|
||||
} else {
|
||||
auto fallback = texturePointer->getFallbackTexture();
|
||||
if (fallback) {
|
||||
object = static_cast<GL45Texture*>(syncGPUObject(fallback));
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
uint32_t SparseInfo::getSize() const {
|
||||
return allocatedPages * pageBytes;
|
||||
default:
|
||||
Q_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
void GL45Backend::initTextureManagementStage() {
|
||||
|
@ -171,6 +100,12 @@ void GL45Backend::initTextureManagementStage() {
|
|||
}
|
||||
}
|
||||
|
||||
using GL45Texture = GL45Backend::GL45Texture;
|
||||
|
||||
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture)
|
||||
: GLTexture(backend, texture, allocate(texture)) {
|
||||
incrementTextureGPUCount();
|
||||
}
|
||||
|
||||
GLuint GL45Texture::allocate(const Texture& texture) {
|
||||
GLuint result;
|
||||
|
@ -178,164 +113,43 @@ GLuint GL45Texture::allocate(const Texture& texture) {
|
|||
return result;
|
||||
}
|
||||
|
||||
GLuint GL45Backend::getTextureID(const TexturePointer& texture, bool transfer) {
|
||||
return GL45Texture::getId<GL45Texture>(*this, texture, transfer);
|
||||
}
|
||||
|
||||
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, GLuint externalId)
|
||||
: GLTexture(backend, texture, externalId), _sparseInfo(*this)
|
||||
{
|
||||
}
|
||||
|
||||
GL45Texture::GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture, bool transferrable)
|
||||
: GLTexture(backend, texture, allocate(texture), transferrable), _sparseInfo(*this)
|
||||
{
|
||||
|
||||
auto theBackend = _backend.lock();
|
||||
if (_transferrable && theBackend && theBackend->isTextureManagementSparseEnabled()) {
|
||||
_sparseInfo.maybeMakeSparse();
|
||||
if (_sparseInfo.sparse) {
|
||||
Backend::incrementTextureGPUSparseCount();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GL45Texture::~GL45Texture() {
|
||||
// Remove this texture from the candidate list of derezzable textures
|
||||
if (_transferrable) {
|
||||
auto mipLevels = usedMipLevels();
|
||||
Lock lock(texturesByMipCountsMutex);
|
||||
if (texturesByMipCounts.count(mipLevels)) {
|
||||
auto& textures = texturesByMipCounts[mipLevels];
|
||||
textures.erase(this);
|
||||
if (textures.empty()) {
|
||||
texturesByMipCounts.erase(mipLevels);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_sparseInfo.sparse) {
|
||||
Backend::decrementTextureGPUSparseCount();
|
||||
|
||||
// Experimenation suggests that allocating sparse textures on one context/thread and deallocating
|
||||
// them on another is buggy. So for sparse textures we need to queue a lambda with the deallocation
|
||||
// callls to the transfer thread
|
||||
auto id = _id;
|
||||
// Set the class _id to 0 so we don't try to double delete
|
||||
const_cast<GLuint&>(_id) = 0;
|
||||
std::list<std::function<void()>> destructionFunctions;
|
||||
|
||||
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
|
||||
auto maxSparseMip = std::min<uint16_t>(_maxMip, _sparseInfo.maxSparseLevel);
|
||||
for (uint16_t mipLevel = _minMip; mipLevel <= maxSparseMip; ++mipLevel) {
|
||||
auto mipDimensions = _gpuObject.evalMipDimensions(mipLevel);
|
||||
destructionFunctions.push_back([id, maxFace, mipLevel, mipDimensions] {
|
||||
glTexturePageCommitmentEXT(id, mipLevel, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
|
||||
});
|
||||
|
||||
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
|
||||
assert(deallocatedPages <= _sparseInfo.allocatedPages);
|
||||
_sparseInfo.allocatedPages -= deallocatedPages;
|
||||
}
|
||||
|
||||
if (0 != _sparseInfo.allocatedPages) {
|
||||
qCWarning(gpugl45logging) << "Allocated pages remaining " << _id << " " << _sparseInfo.allocatedPages;
|
||||
}
|
||||
|
||||
auto size = _size;
|
||||
const_cast<GLuint&>(_size) = 0;
|
||||
_textureTransferHelper->queueExecution([id, size, destructionFunctions] {
|
||||
for (auto function : destructionFunctions) {
|
||||
function();
|
||||
}
|
||||
glDeleteTextures(1, &id);
|
||||
Backend::decrementTextureGPUCount();
|
||||
Backend::updateTextureGPUMemoryUsage(size, 0);
|
||||
Backend::updateTextureGPUSparseMemoryUsage(size, 0);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void GL45Texture::withPreservedTexture(std::function<void()> f) const {
|
||||
f();
|
||||
}
|
||||
|
||||
void GL45Texture::generateMips() const {
|
||||
glGenerateTextureMipmap(_id);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL45Texture::allocateStorage() const {
|
||||
if (_gpuObject.getTexelFormat().isCompressed()) {
|
||||
qFatal("Compressed textures not yet supported");
|
||||
void GL45Texture::copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const {
|
||||
if (GL_TEXTURE_2D == _target) {
|
||||
glTextureSubImage2D(_id, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
||||
} else if (GL_TEXTURE_CUBE_MAP == _target) {
|
||||
// DSA ARB does not work on AMD, so use EXT
|
||||
// unless EXT is not available on the driver
|
||||
if (glTextureSubImage2DEXT) {
|
||||
auto target = GLTexture::CUBE_FACE_LAYOUT[face];
|
||||
glTextureSubImage2DEXT(_id, target, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
||||
} else {
|
||||
glTextureSubImage3D(_id, mip, 0, yOffset, face, size.x, size.y, 1, format, type, sourcePointer);
|
||||
}
|
||||
} else {
|
||||
Q_ASSERT(false);
|
||||
}
|
||||
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, 0);
|
||||
glTextureParameteri(_id, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
|
||||
// Get the dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuObject.evalMipDimensions(_minMip + _mipOffset);
|
||||
glTextureStorage2D(_id, usedMipLevels(), _internalFormat, dimensions.x, dimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GL45Texture::updateSize() const {
|
||||
if (_gpuObject.getTexelFormat().isCompressed()) {
|
||||
qFatal("Compressed textures not yet supported");
|
||||
void GL45Texture::copyMipFaceFromTexture(uint16_t sourceMip, uint16_t targetMip, uint8_t face) const {
|
||||
if (!_gpuObject.isStoredMipFaceAvailable(sourceMip)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_transferrable && _sparseInfo.sparse) {
|
||||
auto size = _sparseInfo.getSize();
|
||||
Backend::updateTextureGPUSparseMemoryUsage(_size, size);
|
||||
setSize(size);
|
||||
auto size = _gpuObject.evalMipDimensions(sourceMip);
|
||||
auto mipData = _gpuObject.accessStoredMipFace(sourceMip, face);
|
||||
if (mipData) {
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), _gpuObject.getStoredMipFormat());
|
||||
copyMipFaceLinesFromTexture(targetMip, face, size, 0, texelFormat.format, texelFormat.type, mipData->readData());
|
||||
} else {
|
||||
setSize(_gpuObject.evalTotalSize(_mipOffset));
|
||||
qCDebug(gpugllogging) << "Missing mipData level=" << sourceMip << " face=" << (int)face << " for texture " << _gpuObject.source().c_str();
|
||||
}
|
||||
}
|
||||
|
||||
void GL45Texture::startTransfer() {
|
||||
Parent::startTransfer();
|
||||
_sparseInfo.update();
|
||||
}
|
||||
|
||||
bool GL45Texture::continueTransfer() {
|
||||
PROFILE_RANGE(render_gpu_gl, "continueTransfer")
|
||||
size_t maxFace = GL_TEXTURE_CUBE_MAP == _target ? CUBE_NUM_FACES : 1;
|
||||
for (uint8_t face = 0; face < maxFace; ++face) {
|
||||
for (uint16_t mipLevel = _minMip; mipLevel <= _maxMip; ++mipLevel) {
|
||||
auto size = _gpuObject.evalMipDimensions(mipLevel);
|
||||
if (_sparseInfo.sparse && mipLevel <= _sparseInfo.maxSparseLevel) {
|
||||
glTexturePageCommitmentEXT(_id, mipLevel, 0, 0, face, size.x, size.y, 1, GL_TRUE);
|
||||
_sparseInfo.allocatedPages += _sparseInfo.getPageCount(size);
|
||||
}
|
||||
if (_gpuObject.isStoredMipFaceAvailable(mipLevel, face)) {
|
||||
PROFILE_RANGE_EX(render_gpu_gl, "texSubImage", 0x0000ffff, (size.x * size.y * maxFace / 1024));
|
||||
|
||||
auto mip = _gpuObject.accessStoredMipFace(mipLevel, face);
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), mip->getFormat());
|
||||
if (GL_TEXTURE_2D == _target) {
|
||||
glTextureSubImage2D(_id, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
|
||||
} else if (GL_TEXTURE_CUBE_MAP == _target) {
|
||||
// DSA ARB does not work on AMD, so use EXT
|
||||
// unless EXT is not available on the driver
|
||||
if (glTextureSubImage2DEXT) {
|
||||
auto target = CUBE_FACE_LAYOUT[face];
|
||||
glTextureSubImage2DEXT(_id, target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
|
||||
} else {
|
||||
glTextureSubImage3D(_id, mipLevel, 0, 0, face, size.x, size.y, 1, texelFormat.format, texelFormat.type, mip->readData());
|
||||
}
|
||||
} else {
|
||||
Q_ASSERT(false);
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void GL45Texture::finishTransfer() {
|
||||
Parent::finishTransfer();
|
||||
}
|
||||
|
||||
void GL45Texture::syncSampler() const {
|
||||
const Sampler& sampler = _gpuObject.getSampler();
|
||||
|
||||
|
@ -353,163 +167,63 @@ void GL45Texture::syncSampler() const {
|
|||
glTextureParameteri(_id, GL_TEXTURE_WRAP_S, WRAP_MODES[sampler.getWrapModeU()]);
|
||||
glTextureParameteri(_id, GL_TEXTURE_WRAP_T, WRAP_MODES[sampler.getWrapModeV()]);
|
||||
glTextureParameteri(_id, GL_TEXTURE_WRAP_R, WRAP_MODES[sampler.getWrapModeW()]);
|
||||
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
|
||||
glTextureParameterfv(_id, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
|
||||
// FIXME account for mip offsets here
|
||||
auto baseMip = std::max<uint16_t>(sampler.getMipOffset(), _minMip);
|
||||
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, sampler.getMinMip());
|
||||
glTextureParameterf(_id, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
|
||||
}
|
||||
|
||||
using GL45FixedAllocationTexture = GL45Backend::GL45FixedAllocationTexture;
|
||||
|
||||
GL45FixedAllocationTexture::GL45FixedAllocationTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45Texture(backend, texture), _size(texture.evalTotalSize()) {
|
||||
allocateStorage();
|
||||
syncSampler();
|
||||
}
|
||||
|
||||
GL45FixedAllocationTexture::~GL45FixedAllocationTexture() {
|
||||
}
|
||||
|
||||
void GL45FixedAllocationTexture::allocateStorage() const {
|
||||
const GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
|
||||
const auto dimensions = _gpuObject.getDimensions();
|
||||
const auto mips = _gpuObject.evalNumMips();
|
||||
glTextureStorage2D(_id, mips, texelFormat.internalFormat, dimensions.x, dimensions.y);
|
||||
}
|
||||
|
||||
void GL45FixedAllocationTexture::syncSampler() const {
|
||||
Parent::syncSampler();
|
||||
const Sampler& sampler = _gpuObject.getSampler();
|
||||
auto baseMip = std::max<uint16_t>(sampler.getMipOffset(), sampler.getMinMip());
|
||||
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, baseMip);
|
||||
glTextureParameterf(_id, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
|
||||
glTextureParameterf(_id, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip() - _mipOffset));
|
||||
glTextureParameterf(_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
|
||||
glTextureParameterf(_id, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
|
||||
}
|
||||
|
||||
void GL45Texture::postTransfer() {
|
||||
Parent::postTransfer();
|
||||
auto mipLevels = usedMipLevels();
|
||||
if (_transferrable && mipLevels > 1 && _minMip < _sparseInfo.maxSparseLevel) {
|
||||
Lock lock(texturesByMipCountsMutex);
|
||||
texturesByMipCounts[mipLevels].insert(this);
|
||||
}
|
||||
// Renderbuffer attachment textures
|
||||
using GL45AttachmentTexture = GL45Backend::GL45AttachmentTexture;
|
||||
|
||||
GL45AttachmentTexture::GL45AttachmentTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45FixedAllocationTexture(backend, texture) {
|
||||
Backend::updateTextureGPUFramebufferMemoryUsage(0, size());
|
||||
}
|
||||
|
||||
void GL45Texture::stripToMip(uint16_t newMinMip) {
|
||||
if (newMinMip < _minMip) {
|
||||
qCWarning(gpugl45logging) << "Cannot decrease the min mip";
|
||||
return;
|
||||
}
|
||||
GL45AttachmentTexture::~GL45AttachmentTexture() {
|
||||
Backend::updateTextureGPUFramebufferMemoryUsage(size(), 0);
|
||||
}
|
||||
|
||||
if (_sparseInfo.sparse && newMinMip > _sparseInfo.maxSparseLevel) {
|
||||
qCWarning(gpugl45logging) << "Cannot increase the min mip into the mip tail";
|
||||
return;
|
||||
}
|
||||
// Strict resource textures
|
||||
using GL45StrictResourceTexture = GL45Backend::GL45StrictResourceTexture;
|
||||
|
||||
PROFILE_RANGE(render_gpu_gl, "GL45Texture::stripToMip");
|
||||
|
||||
auto mipLevels = usedMipLevels();
|
||||
{
|
||||
Lock lock(texturesByMipCountsMutex);
|
||||
assert(0 != texturesByMipCounts.count(mipLevels));
|
||||
assert(0 != texturesByMipCounts[mipLevels].count(this));
|
||||
texturesByMipCounts[mipLevels].erase(this);
|
||||
if (texturesByMipCounts[mipLevels].empty()) {
|
||||
texturesByMipCounts.erase(mipLevels);
|
||||
GL45StrictResourceTexture::GL45StrictResourceTexture(const std::weak_ptr<GLBackend>& backend, const Texture& texture) : GL45FixedAllocationTexture(backend, texture) {
|
||||
auto mipLevels = _gpuObject.evalNumMips();
|
||||
for (uint16_t sourceMip = 0; sourceMip < mipLevels; ++sourceMip) {
|
||||
uint16_t targetMip = sourceMip;
|
||||
size_t maxFace = GLTexture::getFaceCount(_target);
|
||||
for (uint8_t face = 0; face < maxFace; ++face) {
|
||||
copyMipFaceFromTexture(sourceMip, targetMip, face);
|
||||
}
|
||||
}
|
||||
|
||||
// If we weren't generating mips before, we need to now that we're stripping down mip levels.
|
||||
if (!_gpuObject.isAutogenerateMips()) {
|
||||
qCDebug(gpugl45logging) << "Force mip generation for texture";
|
||||
glGenerateTextureMipmap(_id);
|
||||
}
|
||||
|
||||
|
||||
uint8_t maxFace = (uint8_t)((_target == GL_TEXTURE_CUBE_MAP) ? GLTexture::CUBE_NUM_FACES : 1);
|
||||
if (_sparseInfo.sparse) {
|
||||
for (uint16_t mip = _minMip; mip < newMinMip; ++mip) {
|
||||
auto id = _id;
|
||||
auto mipDimensions = _gpuObject.evalMipDimensions(mip);
|
||||
_textureTransferHelper->queueExecution([id, mip, mipDimensions, maxFace] {
|
||||
glTexturePageCommitmentEXT(id, mip, 0, 0, 0, mipDimensions.x, mipDimensions.y, maxFace, GL_FALSE);
|
||||
});
|
||||
|
||||
auto deallocatedPages = _sparseInfo.getPageCount(mipDimensions) * maxFace;
|
||||
assert(deallocatedPages < _sparseInfo.allocatedPages);
|
||||
_sparseInfo.allocatedPages -= deallocatedPages;
|
||||
}
|
||||
_minMip = newMinMip;
|
||||
} else {
|
||||
GLuint oldId = _id;
|
||||
// Find the distance between the old min mip and the new one
|
||||
uint16 mipDelta = newMinMip - _minMip;
|
||||
_mipOffset += mipDelta;
|
||||
const_cast<uint16&>(_maxMip) -= mipDelta;
|
||||
auto newLevels = usedMipLevels();
|
||||
|
||||
// Create and setup the new texture (allocate)
|
||||
{
|
||||
Vec3u newDimensions = _gpuObject.evalMipDimensions(_mipOffset);
|
||||
PROFILE_RANGE_EX(render_gpu_gl, "Re-Allocate", 0xff0000ff, (newDimensions.x * newDimensions.y));
|
||||
|
||||
glCreateTextures(_target, 1, &const_cast<GLuint&>(_id));
|
||||
glTextureParameteri(_id, GL_TEXTURE_BASE_LEVEL, 0);
|
||||
glTextureParameteri(_id, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
|
||||
glTextureStorage2D(_id, newLevels, _internalFormat, newDimensions.x, newDimensions.y);
|
||||
}
|
||||
|
||||
// Copy the contents of the old texture to the new
|
||||
{
|
||||
PROFILE_RANGE(render_gpu_gl, "Blit");
|
||||
// Preferred path only available in 4.3
|
||||
for (uint16 targetMip = _minMip; targetMip <= _maxMip; ++targetMip) {
|
||||
uint16 sourceMip = targetMip + mipDelta;
|
||||
Vec3u mipDimensions = _gpuObject.evalMipDimensions(targetMip + _mipOffset);
|
||||
for (GLenum target : getFaceTargets(_target)) {
|
||||
glCopyImageSubData(
|
||||
oldId, target, sourceMip, 0, 0, 0,
|
||||
_id, target, targetMip, 0, 0, 0,
|
||||
mipDimensions.x, mipDimensions.y, 1
|
||||
);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
glDeleteTextures(1, &oldId);
|
||||
}
|
||||
}
|
||||
|
||||
// Re-sync the sampler to force access to the new mip level
|
||||
syncSampler();
|
||||
updateSize();
|
||||
|
||||
// Re-insert into the texture-by-mips map if appropriate
|
||||
mipLevels = usedMipLevels();
|
||||
if (mipLevels > 1 && (!_sparseInfo.sparse || _minMip < _sparseInfo.maxSparseLevel)) {
|
||||
Lock lock(texturesByMipCountsMutex);
|
||||
texturesByMipCounts[mipLevels].insert(this);
|
||||
if (texture.isAutogenerateMips()) {
|
||||
generateMips();
|
||||
}
|
||||
}
|
||||
|
||||
void GL45Texture::updateMips() {
|
||||
if (!_sparseInfo.sparse) {
|
||||
return;
|
||||
}
|
||||
auto newMinMip = std::min<uint16_t>(_gpuObject.minMip(), _sparseInfo.maxSparseLevel);
|
||||
if (_minMip < newMinMip) {
|
||||
stripToMip(newMinMip);
|
||||
}
|
||||
}
|
||||
|
||||
void GL45Texture::derez() {
|
||||
if (_sparseInfo.sparse) {
|
||||
assert(_minMip < _sparseInfo.maxSparseLevel);
|
||||
}
|
||||
assert(_minMip < _maxMip);
|
||||
assert(_transferrable);
|
||||
stripToMip(_minMip + 1);
|
||||
}
|
||||
|
||||
void GL45Backend::derezTextures() const {
|
||||
if (GLTexture::getMemoryPressure() < 1.0f) {
|
||||
return;
|
||||
}
|
||||
|
||||
Lock lock(texturesByMipCountsMutex);
|
||||
if (texturesByMipCounts.empty()) {
|
||||
// No available textures to derez
|
||||
return;
|
||||
}
|
||||
|
||||
auto mipLevel = texturesByMipCounts.rbegin()->first;
|
||||
if (mipLevel <= 1) {
|
||||
// No mips available to remove
|
||||
return;
|
||||
}
|
||||
|
||||
GL45Texture* targetTexture = nullptr;
|
||||
{
|
||||
auto& textures = texturesByMipCounts[mipLevel];
|
||||
assert(!textures.empty());
|
||||
targetTexture = *textures.begin();
|
||||
}
|
||||
lock.unlock();
|
||||
targetTexture->derez();
|
||||
}
|
||||
|
|
1033
libraries/gpu-gl/src/gpu/gl45/GL45BackendVariableTexture.cpp
Normal file
1033
libraries/gpu-gl/src/gpu/gl45/GL45BackendVariableTexture.cpp
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,6 @@
|
|||
set(TARGET_NAME gpu)
|
||||
autoscribe_shader_lib(gpu)
|
||||
setup_hifi_library()
|
||||
link_hifi_libraries(shared)
|
||||
link_hifi_libraries(shared ktx)
|
||||
|
||||
target_nsight()
|
||||
|
|
|
@ -292,15 +292,8 @@ void Batch::setUniformBuffer(uint32 slot, const BufferView& view) {
|
|||
setUniformBuffer(slot, view._buffer, view._offset, view._size);
|
||||
}
|
||||
|
||||
|
||||
void Batch::setResourceTexture(uint32 slot, const TexturePointer& texture) {
|
||||
if (texture && texture->getUsage().isExternal()) {
|
||||
auto recycler = texture->getExternalRecycler();
|
||||
Q_ASSERT(recycler);
|
||||
}
|
||||
|
||||
ADD_COMMAND(setResourceTexture);
|
||||
|
||||
_params.emplace_back(_textures.cache(texture));
|
||||
_params.emplace_back(slot);
|
||||
}
|
||||
|
|
|
@ -241,6 +241,7 @@ std::atomic<Buffer::Size> Context::_bufferGPUMemoryUsage { 0 };
|
|||
|
||||
std::atomic<uint32_t> Context::_textureGPUCount{ 0 };
|
||||
std::atomic<uint32_t> Context::_textureGPUSparseCount { 0 };
|
||||
std::atomic<Texture::Size> Context::_textureTransferPendingSize { 0 };
|
||||
std::atomic<Texture::Size> Context::_textureGPUMemoryUsage { 0 };
|
||||
std::atomic<Texture::Size> Context::_textureGPUVirtualMemoryUsage { 0 };
|
||||
std::atomic<Texture::Size> Context::_textureGPUFramebufferMemoryUsage { 0 };
|
||||
|
@ -317,6 +318,17 @@ void Context::decrementTextureGPUSparseCount() {
|
|||
--_textureGPUSparseCount;
|
||||
}
|
||||
|
||||
void Context::updateTextureTransferPendingSize(Size prevObjectSize, Size newObjectSize) {
|
||||
if (prevObjectSize == newObjectSize) {
|
||||
return;
|
||||
}
|
||||
if (newObjectSize > prevObjectSize) {
|
||||
_textureTransferPendingSize.fetch_add(newObjectSize - prevObjectSize);
|
||||
} else {
|
||||
_textureTransferPendingSize.fetch_sub(prevObjectSize - newObjectSize);
|
||||
}
|
||||
}
|
||||
|
||||
void Context::updateTextureGPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
|
||||
if (prevObjectSize == newObjectSize) {
|
||||
return;
|
||||
|
@ -390,6 +402,10 @@ uint32_t Context::getTextureGPUSparseCount() {
|
|||
return _textureGPUSparseCount.load();
|
||||
}
|
||||
|
||||
Context::Size Context::getTextureTransferPendingSize() {
|
||||
return _textureTransferPendingSize.load();
|
||||
}
|
||||
|
||||
Context::Size Context::getTextureGPUMemoryUsage() {
|
||||
return _textureGPUMemoryUsage.load();
|
||||
}
|
||||
|
@ -419,6 +435,7 @@ void Backend::incrementTextureGPUCount() { Context::incrementTextureGPUCount();
|
|||
void Backend::decrementTextureGPUCount() { Context::decrementTextureGPUCount(); }
|
||||
void Backend::incrementTextureGPUSparseCount() { Context::incrementTextureGPUSparseCount(); }
|
||||
void Backend::decrementTextureGPUSparseCount() { Context::decrementTextureGPUSparseCount(); }
|
||||
void Backend::updateTextureTransferPendingSize(Resource::Size prevObjectSize, Resource::Size newObjectSize) { Context::updateTextureTransferPendingSize(prevObjectSize, newObjectSize); }
|
||||
void Backend::updateTextureGPUMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize) { Context::updateTextureGPUMemoryUsage(prevObjectSize, newObjectSize); }
|
||||
void Backend::updateTextureGPUVirtualMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize) { Context::updateTextureGPUVirtualMemoryUsage(prevObjectSize, newObjectSize); }
|
||||
void Backend::updateTextureGPUFramebufferMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize) { Context::updateTextureGPUFramebufferMemoryUsage(prevObjectSize, newObjectSize); }
|
||||
|
|
|
@ -101,6 +101,7 @@ public:
|
|||
static void decrementTextureGPUCount();
|
||||
static void incrementTextureGPUSparseCount();
|
||||
static void decrementTextureGPUSparseCount();
|
||||
static void updateTextureTransferPendingSize(Resource::Size prevObjectSize, Resource::Size newObjectSize);
|
||||
static void updateTextureGPUMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize);
|
||||
static void updateTextureGPUSparseMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize);
|
||||
static void updateTextureGPUVirtualMemoryUsage(Resource::Size prevObjectSize, Resource::Size newObjectSize);
|
||||
|
@ -220,6 +221,7 @@ public:
|
|||
static uint32_t getTextureGPUSparseCount();
|
||||
static Size getFreeGPUMemory();
|
||||
static Size getUsedGPUMemory();
|
||||
static Size getTextureTransferPendingSize();
|
||||
static Size getTextureGPUMemoryUsage();
|
||||
static Size getTextureGPUVirtualMemoryUsage();
|
||||
static Size getTextureGPUFramebufferMemoryUsage();
|
||||
|
@ -263,6 +265,7 @@ protected:
|
|||
static void decrementTextureGPUCount();
|
||||
static void incrementTextureGPUSparseCount();
|
||||
static void decrementTextureGPUSparseCount();
|
||||
static void updateTextureTransferPendingSize(Size prevObjectSize, Size newObjectSize);
|
||||
static void updateTextureGPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
|
||||
static void updateTextureGPUSparseMemoryUsage(Size prevObjectSize, Size newObjectSize);
|
||||
static void updateTextureGPUVirtualMemoryUsage(Size prevObjectSize, Size newObjectSize);
|
||||
|
@ -279,6 +282,7 @@ protected:
|
|||
|
||||
static std::atomic<uint32_t> _textureGPUCount;
|
||||
static std::atomic<uint32_t> _textureGPUSparseCount;
|
||||
static std::atomic<Size> _textureTransferPendingSize;
|
||||
static std::atomic<Size> _textureGPUMemoryUsage;
|
||||
static std::atomic<Size> _textureGPUSparseMemoryUsage;
|
||||
static std::atomic<Size> _textureGPUVirtualMemoryUsage;
|
||||
|
|
|
@ -10,8 +10,15 @@
|
|||
|
||||
using namespace gpu;
|
||||
|
||||
const Element Element::COLOR_R_8 { SCALAR, NUINT8, RED };
|
||||
const Element Element::COLOR_SR_8 { SCALAR, NUINT8, SRED };
|
||||
|
||||
const Element Element::COLOR_RGBA_32{ VEC4, NUINT8, RGBA };
|
||||
const Element Element::COLOR_SRGBA_32{ VEC4, NUINT8, SRGBA };
|
||||
|
||||
const Element Element::COLOR_BGRA_32{ VEC4, NUINT8, BGRA };
|
||||
const Element Element::COLOR_SBGRA_32{ VEC4, NUINT8, SBGRA };
|
||||
|
||||
const Element Element::COLOR_R11G11B10{ SCALAR, FLOAT, R11G11B10 };
|
||||
const Element Element::VEC4F_COLOR_RGBA{ VEC4, FLOAT, RGBA };
|
||||
const Element Element::VEC2F_UV{ VEC2, FLOAT, UV };
|
||||
|
|
|
@ -133,6 +133,7 @@ static const int SCALAR_COUNT[NUM_DIMENSIONS] = {
|
|||
enum Semantic {
|
||||
RAW = 0, // used as RAW memory
|
||||
|
||||
RED,
|
||||
RGB,
|
||||
RGBA,
|
||||
BGRA,
|
||||
|
@ -149,6 +150,7 @@ enum Semantic {
|
|||
STENCIL, // Stencil only buffer
|
||||
DEPTH_STENCIL, // Depth Stencil buffer
|
||||
|
||||
SRED,
|
||||
SRGB,
|
||||
SRGBA,
|
||||
SBGRA,
|
||||
|
@ -227,8 +229,12 @@ public:
|
|||
return getRaw() != right.getRaw();
|
||||
}
|
||||
|
||||
static const Element COLOR_R_8;
|
||||
static const Element COLOR_SR_8;
|
||||
static const Element COLOR_RGBA_32;
|
||||
static const Element COLOR_SRGBA_32;
|
||||
static const Element COLOR_BGRA_32;
|
||||
static const Element COLOR_SBGRA_32;
|
||||
static const Element COLOR_R11G11B10;
|
||||
static const Element VEC4F_COLOR_RGBA;
|
||||
static const Element VEC2F_UV;
|
||||
|
|
|
@ -32,7 +32,7 @@ Framebuffer* Framebuffer::create(const std::string& name) {
|
|||
Framebuffer* Framebuffer::create(const std::string& name, const Format& colorBufferFormat, uint16 width, uint16 height) {
|
||||
auto framebuffer = Framebuffer::create(name);
|
||||
|
||||
auto colorTexture = TexturePointer(Texture::create2D(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
auto colorTexture = TexturePointer(Texture::createRenderBuffer(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
colorTexture->setSource("Framebuffer::colorTexture");
|
||||
|
||||
framebuffer->setRenderBuffer(0, colorTexture);
|
||||
|
@ -43,8 +43,8 @@ Framebuffer* Framebuffer::create(const std::string& name, const Format& colorBuf
|
|||
Framebuffer* Framebuffer::create(const std::string& name, const Format& colorBufferFormat, const Format& depthStencilBufferFormat, uint16 width, uint16 height) {
|
||||
auto framebuffer = Framebuffer::create(name);
|
||||
|
||||
auto colorTexture = TexturePointer(Texture::create2D(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
auto depthTexture = TexturePointer(Texture::create2D(depthStencilBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
auto colorTexture = TexturePointer(Texture::createRenderBuffer(colorBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
auto depthTexture = TexturePointer(Texture::createRenderBuffer(depthStencilBufferFormat, width, height, Sampler(Sampler::FILTER_MIN_MAG_POINT)));
|
||||
framebuffer->setRenderBuffer(0, colorTexture);
|
||||
framebuffer->setDepthStencilBuffer(depthTexture, depthStencilBufferFormat);
|
||||
|
||||
|
@ -55,7 +55,7 @@ Framebuffer* Framebuffer::createShadowmap(uint16 width) {
|
|||
auto framebuffer = Framebuffer::create("Shadowmap");
|
||||
|
||||
auto depthFormat = Element(gpu::SCALAR, gpu::FLOAT, gpu::DEPTH); // Depth32 texel format
|
||||
auto depthTexture = TexturePointer(Texture::create2D(depthFormat, width, width));
|
||||
auto depthTexture = TexturePointer(Texture::createRenderBuffer(depthFormat, width, width));
|
||||
Sampler::Desc samplerDesc;
|
||||
samplerDesc._borderColor = glm::vec4(1.0f);
|
||||
samplerDesc._wrapModeU = Sampler::WRAP_BORDER;
|
||||
|
@ -143,6 +143,8 @@ int Framebuffer::setRenderBuffer(uint32 slot, const TexturePointer& texture, uin
|
|||
return -1;
|
||||
}
|
||||
|
||||
Q_ASSERT(!texture || TextureUsageType::RENDERBUFFER == texture->getUsageType());
|
||||
|
||||
// Check for the slot
|
||||
if (slot >= getMaxNumRenderBuffers()) {
|
||||
return -1;
|
||||
|
@ -222,6 +224,8 @@ bool Framebuffer::setDepthStencilBuffer(const TexturePointer& texture, const For
|
|||
return false;
|
||||
}
|
||||
|
||||
Q_ASSERT(!texture || TextureUsageType::RENDERBUFFER == texture->getUsageType());
|
||||
|
||||
// Check for the compatibility of size
|
||||
if (texture) {
|
||||
if (!validateTargetCompatibility(*texture)) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <QtCore/QThread>
|
||||
#include <Trace.h>
|
||||
|
||||
#include <ktx/KTX.h>
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
#include "GPULogging.h"
|
||||
|
@ -88,6 +89,10 @@ uint32_t Texture::getTextureGPUSparseCount() {
|
|||
return Context::getTextureGPUSparseCount();
|
||||
}
|
||||
|
||||
Texture::Size Texture::getTextureTransferPendingSize() {
|
||||
return Context::getTextureTransferPendingSize();
|
||||
}
|
||||
|
||||
Texture::Size Texture::getTextureGPUMemoryUsage() {
|
||||
return Context::getTextureGPUMemoryUsage();
|
||||
}
|
||||
|
@ -120,62 +125,23 @@ void Texture::setAllowedGPUMemoryUsage(Size size) {
|
|||
|
||||
uint8 Texture::NUM_FACES_PER_TYPE[NUM_TYPES] = { 1, 1, 1, 6 };
|
||||
|
||||
Texture::Pixels::Pixels(const Element& format, Size size, const Byte* bytes) :
|
||||
_format(format),
|
||||
_sysmem(size, bytes),
|
||||
_isGPULoaded(false) {
|
||||
Texture::updateTextureCPUMemoryUsage(0, _sysmem.getSize());
|
||||
}
|
||||
using Storage = Texture::Storage;
|
||||
using PixelsPointer = Texture::PixelsPointer;
|
||||
using MemoryStorage = Texture::MemoryStorage;
|
||||
|
||||
Texture::Pixels::~Pixels() {
|
||||
Texture::updateTextureCPUMemoryUsage(_sysmem.getSize(), 0);
|
||||
}
|
||||
|
||||
Texture::Size Texture::Pixels::resize(Size pSize) {
|
||||
auto prevSize = _sysmem.getSize();
|
||||
auto newSize = _sysmem.resize(pSize);
|
||||
Texture::updateTextureCPUMemoryUsage(prevSize, newSize);
|
||||
return newSize;
|
||||
}
|
||||
|
||||
Texture::Size Texture::Pixels::setData(const Element& format, Size size, const Byte* bytes ) {
|
||||
_format = format;
|
||||
auto prevSize = _sysmem.getSize();
|
||||
auto newSize = _sysmem.setData(size, bytes);
|
||||
Texture::updateTextureCPUMemoryUsage(prevSize, newSize);
|
||||
_isGPULoaded = false;
|
||||
return newSize;
|
||||
}
|
||||
|
||||
void Texture::Pixels::notifyGPULoaded() {
|
||||
_isGPULoaded = true;
|
||||
auto prevSize = _sysmem.getSize();
|
||||
auto newSize = _sysmem.resize(0);
|
||||
Texture::updateTextureCPUMemoryUsage(prevSize, newSize);
|
||||
}
|
||||
|
||||
void Texture::Storage::assignTexture(Texture* texture) {
|
||||
void Storage::assignTexture(Texture* texture) {
|
||||
_texture = texture;
|
||||
if (_texture) {
|
||||
_type = _texture->getType();
|
||||
}
|
||||
}
|
||||
|
||||
void Texture::Storage::reset() {
|
||||
void MemoryStorage::reset() {
|
||||
_mips.clear();
|
||||
bumpStamp();
|
||||
}
|
||||
|
||||
Texture::PixelsPointer Texture::Storage::editMipFace(uint16 level, uint8 face) {
|
||||
if (level < _mips.size()) {
|
||||
assert(face < _mips[level].size());
|
||||
bumpStamp();
|
||||
return _mips[level][face];
|
||||
}
|
||||
return PixelsPointer();
|
||||
}
|
||||
|
||||
const Texture::PixelsPointer Texture::Storage::getMipFace(uint16 level, uint8 face) const {
|
||||
PixelsPointer MemoryStorage::getMipFace(uint16 level, uint8 face) const {
|
||||
if (level < _mips.size()) {
|
||||
assert(face < _mips[level].size());
|
||||
return _mips[level][face];
|
||||
|
@ -183,20 +149,12 @@ const Texture::PixelsPointer Texture::Storage::getMipFace(uint16 level, uint8 fa
|
|||
return PixelsPointer();
|
||||
}
|
||||
|
||||
void Texture::Storage::notifyMipFaceGPULoaded(uint16 level, uint8 face) const {
|
||||
PixelsPointer mipFace = getMipFace(level, face);
|
||||
// Free the mips
|
||||
if (mipFace) {
|
||||
mipFace->notifyGPULoaded();
|
||||
}
|
||||
}
|
||||
|
||||
bool Texture::Storage::isMipAvailable(uint16 level, uint8 face) const {
|
||||
bool MemoryStorage::isMipAvailable(uint16 level, uint8 face) const {
|
||||
PixelsPointer mipFace = getMipFace(level, face);
|
||||
return (mipFace && mipFace->getSize());
|
||||
}
|
||||
|
||||
bool Texture::Storage::allocateMip(uint16 level) {
|
||||
bool MemoryStorage::allocateMip(uint16 level) {
|
||||
bool changed = false;
|
||||
if (level >= _mips.size()) {
|
||||
_mips.resize(level+1, std::vector<PixelsPointer>(Texture::NUM_FACES_PER_TYPE[getType()]));
|
||||
|
@ -206,7 +164,6 @@ bool Texture::Storage::allocateMip(uint16 level) {
|
|||
auto& mip = _mips[level];
|
||||
for (auto& face : mip) {
|
||||
if (!face) {
|
||||
face = std::make_shared<Pixels>();
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
@ -216,7 +173,7 @@ bool Texture::Storage::allocateMip(uint16 level) {
|
|||
return changed;
|
||||
}
|
||||
|
||||
bool Texture::Storage::assignMipData(uint16 level, const Element& format, Size size, const Byte* bytes) {
|
||||
void MemoryStorage::assignMipData(uint16 level, const storage::StoragePointer& storagePointer) {
|
||||
|
||||
allocateMip(level);
|
||||
auto& mip = _mips[level];
|
||||
|
@ -225,64 +182,63 @@ bool Texture::Storage::assignMipData(uint16 level, const Element& format, Size s
|
|||
// The bytes assigned here are supposed to contain all the faces bytes of the mip.
|
||||
// For tex1D, 2D, 3D there is only one face
|
||||
// For Cube, we expect the 6 faces in the order X+, X-, Y+, Y-, Z+, Z-
|
||||
auto sizePerFace = size / mip.size();
|
||||
auto faceBytes = bytes;
|
||||
Size allocated = 0;
|
||||
auto sizePerFace = storagePointer->size() / mip.size();
|
||||
size_t offset = 0;
|
||||
for (auto& face : mip) {
|
||||
allocated += face->setData(format, sizePerFace, faceBytes);
|
||||
faceBytes += sizePerFace;
|
||||
face = storagePointer->createView(sizePerFace, offset);
|
||||
offset += sizePerFace;
|
||||
}
|
||||
|
||||
bumpStamp();
|
||||
|
||||
return allocated == size;
|
||||
}
|
||||
|
||||
|
||||
bool Texture::Storage::assignMipFaceData(uint16 level, const Element& format, Size size, const Byte* bytes, uint8 face) {
|
||||
|
||||
void Texture::MemoryStorage::assignMipFaceData(uint16 level, uint8 face, const storage::StoragePointer& storagePointer) {
|
||||
allocateMip(level);
|
||||
auto mip = _mips[level];
|
||||
Size allocated = 0;
|
||||
auto& mip = _mips[level];
|
||||
if (face < mip.size()) {
|
||||
auto mipFace = mip[face];
|
||||
allocated += mipFace->setData(format, size, bytes);
|
||||
mip[face] = storagePointer;
|
||||
bumpStamp();
|
||||
}
|
||||
|
||||
return allocated == size;
|
||||
}
|
||||
|
||||
Texture* Texture::createExternal2D(const ExternalRecycler& recycler, const Sampler& sampler) {
|
||||
Texture* tex = new Texture();
|
||||
Texture* Texture::createExternal(const ExternalRecycler& recycler, const Sampler& sampler) {
|
||||
Texture* tex = new Texture(TextureUsageType::EXTERNAL);
|
||||
tex->_type = TEX_2D;
|
||||
tex->_maxMip = 0;
|
||||
tex->_sampler = sampler;
|
||||
tex->setUsage(Usage::Builder().withExternal().withColor());
|
||||
tex->setExternalRecycler(recycler);
|
||||
return tex;
|
||||
}
|
||||
|
||||
Texture* Texture::createRenderBuffer(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler) {
|
||||
return create(TextureUsageType::RENDERBUFFER, TEX_2D, texelFormat, width, height, 1, 1, 0, sampler);
|
||||
}
|
||||
|
||||
Texture* Texture::create1D(const Element& texelFormat, uint16 width, const Sampler& sampler) {
|
||||
return create(TEX_1D, texelFormat, width, 1, 1, 1, 1, sampler);
|
||||
return create(TextureUsageType::RESOURCE, TEX_1D, texelFormat, width, 1, 1, 1, 0, sampler);
|
||||
}
|
||||
|
||||
Texture* Texture::create2D(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler) {
|
||||
return create(TEX_2D, texelFormat, width, height, 1, 1, 1, sampler);
|
||||
return create(TextureUsageType::RESOURCE, TEX_2D, texelFormat, width, height, 1, 1, 0, sampler);
|
||||
}
|
||||
|
||||
Texture* Texture::createStrict(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler) {
|
||||
return create(TextureUsageType::STRICT_RESOURCE, TEX_2D, texelFormat, width, height, 1, 1, 0, sampler);
|
||||
}
|
||||
|
||||
Texture* Texture::create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth, const Sampler& sampler) {
|
||||
return create(TEX_3D, texelFormat, width, height, depth, 1, 1, sampler);
|
||||
return create(TextureUsageType::RESOURCE, TEX_3D, texelFormat, width, height, depth, 1, 0, sampler);
|
||||
}
|
||||
|
||||
Texture* Texture::createCube(const Element& texelFormat, uint16 width, const Sampler& sampler) {
|
||||
return create(TEX_CUBE, texelFormat, width, width, 1, 1, 1, sampler);
|
||||
return create(TextureUsageType::RESOURCE, TEX_CUBE, texelFormat, width, width, 1, 1, 0, sampler);
|
||||
}
|
||||
|
||||
Texture* Texture::create(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler)
|
||||
Texture* Texture::create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler)
|
||||
{
|
||||
Texture* tex = new Texture();
|
||||
tex->_storage.reset(new Storage());
|
||||
Texture* tex = new Texture(usageType);
|
||||
tex->_storage.reset(new MemoryStorage());
|
||||
tex->_type = type;
|
||||
tex->_storage->assignTexture(tex);
|
||||
tex->_maxMip = 0;
|
||||
|
@ -293,16 +249,14 @@ Texture* Texture::create(Type type, const Element& texelFormat, uint16 width, ui
|
|||
return tex;
|
||||
}
|
||||
|
||||
Texture::Texture():
|
||||
Resource()
|
||||
{
|
||||
Texture::Texture(TextureUsageType usageType) :
|
||||
Resource(), _usageType(usageType) {
|
||||
_textureCPUCount++;
|
||||
}
|
||||
|
||||
Texture::~Texture()
|
||||
{
|
||||
Texture::~Texture() {
|
||||
_textureCPUCount--;
|
||||
if (getUsage().isExternal()) {
|
||||
if (_usageType == TextureUsageType::EXTERNAL) {
|
||||
Texture::ExternalUpdates externalUpdates;
|
||||
{
|
||||
Lock lock(_externalMutex);
|
||||
|
@ -321,7 +275,7 @@ Texture::~Texture()
|
|||
}
|
||||
|
||||
Texture::Size Texture::resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices) {
|
||||
if (width && height && depth && numSamples && numSlices) {
|
||||
if (width && height && depth && numSamples) {
|
||||
bool changed = false;
|
||||
|
||||
if ( _type != type) {
|
||||
|
@ -382,20 +336,20 @@ Texture::Size Texture::resize(Type type, const Element& texelFormat, uint16 widt
|
|||
}
|
||||
|
||||
Texture::Size Texture::resize1D(uint16 width, uint16 numSamples) {
|
||||
return resize(TEX_1D, getTexelFormat(), width, 1, 1, numSamples, 1);
|
||||
return resize(TEX_1D, getTexelFormat(), width, 1, 1, numSamples, 0);
|
||||
}
|
||||
Texture::Size Texture::resize2D(uint16 width, uint16 height, uint16 numSamples) {
|
||||
return resize(TEX_2D, getTexelFormat(), width, height, 1, numSamples, 1);
|
||||
return resize(TEX_2D, getTexelFormat(), width, height, 1, numSamples, 0);
|
||||
}
|
||||
Texture::Size Texture::resize3D(uint16 width, uint16 height, uint16 depth, uint16 numSamples) {
|
||||
return resize(TEX_3D, getTexelFormat(), width, height, depth, numSamples, 1);
|
||||
return resize(TEX_3D, getTexelFormat(), width, height, depth, numSamples, 0);
|
||||
}
|
||||
Texture::Size Texture::resizeCube(uint16 width, uint16 numSamples) {
|
||||
return resize(TEX_CUBE, getTexelFormat(), width, 1, 1, numSamples, 1);
|
||||
return resize(TEX_CUBE, getTexelFormat(), width, 1, 1, numSamples, 0);
|
||||
}
|
||||
|
||||
Texture::Size Texture::reformat(const Element& texelFormat) {
|
||||
return resize(_type, texelFormat, getWidth(), getHeight(), getDepth(), getNumSamples(), getNumSlices());
|
||||
return resize(_type, texelFormat, getWidth(), getHeight(), getDepth(), getNumSamples(), _numSlices);
|
||||
}
|
||||
|
||||
bool Texture::isColorRenderTarget() const {
|
||||
|
@ -426,69 +380,83 @@ uint16 Texture::evalNumMips() const {
|
|||
return evalNumMips({ _width, _height, _depth });
|
||||
}
|
||||
|
||||
bool Texture::assignStoredMip(uint16 level, const Element& format, Size size, const Byte* bytes) {
|
||||
void Texture::setStoredMipFormat(const Element& format) {
|
||||
_storage->setFormat(format);
|
||||
}
|
||||
|
||||
const Element& Texture::getStoredMipFormat() const {
|
||||
return _storage->getFormat();
|
||||
}
|
||||
|
||||
void Texture::assignStoredMip(uint16 level, Size size, const Byte* bytes) {
|
||||
storage::StoragePointer storage = std::make_shared<storage::MemoryStorage>(size, bytes);
|
||||
assignStoredMip(level, storage);
|
||||
}
|
||||
|
||||
void Texture::assignStoredMipFace(uint16 level, uint8 face, Size size, const Byte* bytes) {
|
||||
storage::StoragePointer storage = std::make_shared<storage::MemoryStorage>(size, bytes);
|
||||
assignStoredMipFace(level, face, storage);
|
||||
}
|
||||
|
||||
void Texture::assignStoredMip(uint16 level, storage::StoragePointer& storage) {
|
||||
// Check that level accessed make sense
|
||||
if (level != 0) {
|
||||
if (_autoGenerateMips) {
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
if (level >= evalNumMips()) {
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// THen check that the mem texture passed make sense with its format
|
||||
Size expectedSize = evalStoredMipSize(level, format);
|
||||
if (size == expectedSize) {
|
||||
_storage->assignMipData(level, format, size, bytes);
|
||||
Size expectedSize = evalStoredMipSize(level, getStoredMipFormat());
|
||||
auto size = storage->size();
|
||||
if (storage->size() == expectedSize) {
|
||||
_storage->assignMipData(level, storage);
|
||||
_maxMip = std::max(_maxMip, level);
|
||||
_stamp++;
|
||||
return true;
|
||||
} else if (size > expectedSize) {
|
||||
// NOTE: We are facing this case sometime because apparently QImage (from where we get the bits) is generating images
|
||||
// and alligning the line of pixels to 32 bits.
|
||||
// We should probably consider something a bit more smart to get the correct result but for now (UI elements)
|
||||
// it seems to work...
|
||||
_storage->assignMipData(level, format, size, bytes);
|
||||
_storage->assignMipData(level, storage);
|
||||
_maxMip = std::max(_maxMip, level);
|
||||
_stamp++;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool Texture::assignStoredMipFace(uint16 level, const Element& format, Size size, const Byte* bytes, uint8 face) {
|
||||
void Texture::assignStoredMipFace(uint16 level, uint8 face, storage::StoragePointer& storage) {
|
||||
// Check that level accessed make sense
|
||||
if (level != 0) {
|
||||
if (_autoGenerateMips) {
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
if (level >= evalNumMips()) {
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// THen check that the mem texture passed make sense with its format
|
||||
Size expectedSize = evalStoredMipFaceSize(level, format);
|
||||
Size expectedSize = evalStoredMipFaceSize(level, getStoredMipFormat());
|
||||
auto size = storage->size();
|
||||
if (size == expectedSize) {
|
||||
_storage->assignMipFaceData(level, format, size, bytes, face);
|
||||
_storage->assignMipFaceData(level, face, storage);
|
||||
_maxMip = std::max(_maxMip, level);
|
||||
_stamp++;
|
||||
return true;
|
||||
} else if (size > expectedSize) {
|
||||
// NOTE: We are facing this case sometime because apparently QImage (from where we get the bits) is generating images
|
||||
// and alligning the line of pixels to 32 bits.
|
||||
// We should probably consider something a bit more smart to get the correct result but for now (UI elements)
|
||||
// it seems to work...
|
||||
_storage->assignMipFaceData(level, format, size, bytes, face);
|
||||
_storage->assignMipFaceData(level, face, storage);
|
||||
_maxMip = std::max(_maxMip, level);
|
||||
_stamp++;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
uint16 Texture::autoGenerateMips(uint16 maxMip) {
|
||||
bool changed = false;
|
||||
if (!_autoGenerateMips) {
|
||||
|
@ -522,7 +490,7 @@ uint16 Texture::getStoredMipHeight(uint16 level) const {
|
|||
if (mip && mip->getSize()) {
|
||||
return evalMipHeight(level);
|
||||
}
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint16 Texture::getStoredMipDepth(uint16 level) const {
|
||||
|
@ -794,7 +762,7 @@ bool sphericalHarmonicsFromTexture(const gpu::Texture& cubeTexture, std::vector<
|
|||
for(int face=0; face < gpu::Texture::NUM_CUBE_FACES; face++) {
|
||||
PROFILE_RANGE(render_gpu, "ProcessFace");
|
||||
|
||||
auto mipFormat = cubeTexture.accessStoredMipFace(0, face)->getFormat();
|
||||
auto mipFormat = cubeTexture.getStoredMipFormat();
|
||||
auto numComponents = mipFormat.getScalarCount();
|
||||
int roffset { 0 };
|
||||
int goffset { 1 };
|
||||
|
@ -1008,3 +976,7 @@ Texture::ExternalUpdates Texture::getUpdates() const {
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Texture::setStorage(std::unique_ptr<Storage>& newStorage) {
|
||||
_storage.swap(newStorage);
|
||||
}
|
||||
|
|
|
@ -17,9 +17,17 @@
|
|||
#include <QMetaType>
|
||||
#include <QUrl>
|
||||
|
||||
#include <shared/Storage.h>
|
||||
|
||||
#include "Forward.h"
|
||||
#include "Resource.h"
|
||||
|
||||
namespace ktx {
|
||||
class KTX;
|
||||
using KTXUniquePointer = std::unique_ptr<KTX>;
|
||||
struct Header;
|
||||
}
|
||||
|
||||
namespace gpu {
|
||||
|
||||
// THe spherical harmonics is a nice tool for cubemap, so if required, the irradiance SH can be automatically generated
|
||||
|
@ -135,10 +143,18 @@ public:
|
|||
uint8 getMinMip() const { return _desc._minMip; }
|
||||
uint8 getMaxMip() const { return _desc._maxMip; }
|
||||
|
||||
const Desc& getDesc() const { return _desc; }
|
||||
protected:
|
||||
Desc _desc;
|
||||
};
|
||||
|
||||
enum class TextureUsageType {
|
||||
RENDERBUFFER, // Used as attachments to a framebuffer
|
||||
RESOURCE, // Resource textures, like materials... subject to memory manipulation
|
||||
STRICT_RESOURCE, // Resource textures not subject to manipulation, like the normal fitting texture
|
||||
EXTERNAL,
|
||||
};
|
||||
|
||||
class Texture : public Resource {
|
||||
static std::atomic<uint32_t> _textureCPUCount;
|
||||
static std::atomic<Size> _textureCPUMemoryUsage;
|
||||
|
@ -147,10 +163,12 @@ class Texture : public Resource {
|
|||
static void updateTextureCPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
|
||||
|
||||
public:
|
||||
static const uint32_t CUBE_FACE_COUNT { 6 };
|
||||
static uint32_t getTextureCPUCount();
|
||||
static Size getTextureCPUMemoryUsage();
|
||||
static uint32_t getTextureGPUCount();
|
||||
static uint32_t getTextureGPUSparseCount();
|
||||
static Size getTextureTransferPendingSize();
|
||||
static Size getTextureGPUMemoryUsage();
|
||||
static Size getTextureGPUVirtualMemoryUsage();
|
||||
static Size getTextureGPUFramebufferMemoryUsage();
|
||||
|
@ -173,9 +191,9 @@ public:
|
|||
NORMAL, // Texture is a normal map
|
||||
ALPHA, // Texture has an alpha channel
|
||||
ALPHA_MASK, // Texture alpha channel is a Mask 0/1
|
||||
EXTERNAL,
|
||||
NUM_FLAGS,
|
||||
};
|
||||
|
||||
typedef std::bitset<NUM_FLAGS> Flags;
|
||||
|
||||
// The key is the Flags
|
||||
|
@ -199,7 +217,6 @@ public:
|
|||
Builder& withNormal() { _flags.set(NORMAL); return (*this); }
|
||||
Builder& withAlpha() { _flags.set(ALPHA); return (*this); }
|
||||
Builder& withAlphaMask() { _flags.set(ALPHA_MASK); return (*this); }
|
||||
Builder& withExternal() { _flags.set(EXTERNAL); return (*this); }
|
||||
};
|
||||
Usage(const Builder& builder) : Usage(builder._flags) {}
|
||||
|
||||
|
@ -208,37 +225,12 @@ public:
|
|||
|
||||
bool isAlpha() const { return _flags[ALPHA]; }
|
||||
bool isAlphaMask() const { return _flags[ALPHA_MASK]; }
|
||||
bool isExternal() const { return _flags[EXTERNAL]; }
|
||||
|
||||
|
||||
bool operator==(const Usage& usage) { return (_flags == usage._flags); }
|
||||
bool operator!=(const Usage& usage) { return (_flags != usage._flags); }
|
||||
};
|
||||
|
||||
class Pixels {
|
||||
public:
|
||||
Pixels() {}
|
||||
Pixels(const Pixels& pixels) = default;
|
||||
Pixels(const Element& format, Size size, const Byte* bytes);
|
||||
~Pixels();
|
||||
|
||||
const Byte* readData() const { return _sysmem.readData(); }
|
||||
Size getSize() const { return _sysmem.getSize(); }
|
||||
Size resize(Size pSize);
|
||||
Size setData(const Element& format, Size size, const Byte* bytes );
|
||||
|
||||
const Element& getFormat() const { return _format; }
|
||||
|
||||
void notifyGPULoaded();
|
||||
|
||||
protected:
|
||||
Element _format;
|
||||
Sysmem _sysmem;
|
||||
bool _isGPULoaded;
|
||||
|
||||
friend class Texture;
|
||||
};
|
||||
typedef std::shared_ptr< Pixels > PixelsPointer;
|
||||
using PixelsPointer = storage::StoragePointer;
|
||||
|
||||
enum Type {
|
||||
TEX_1D = 0,
|
||||
|
@ -261,46 +253,78 @@ public:
|
|||
NUM_CUBE_FACES, // Not a valid vace index
|
||||
};
|
||||
|
||||
|
||||
class Storage {
|
||||
public:
|
||||
Storage() {}
|
||||
virtual ~Storage() {}
|
||||
virtual void reset();
|
||||
virtual PixelsPointer editMipFace(uint16 level, uint8 face = 0);
|
||||
virtual const PixelsPointer getMipFace(uint16 level, uint8 face = 0) const;
|
||||
virtual bool allocateMip(uint16 level);
|
||||
virtual bool assignMipData(uint16 level, const Element& format, Size size, const Byte* bytes);
|
||||
virtual bool assignMipFaceData(uint16 level, const Element& format, Size size, const Byte* bytes, uint8 face);
|
||||
virtual bool isMipAvailable(uint16 level, uint8 face = 0) const;
|
||||
|
||||
virtual void reset() = 0;
|
||||
virtual PixelsPointer getMipFace(uint16 level, uint8 face = 0) const = 0;
|
||||
virtual void assignMipData(uint16 level, const storage::StoragePointer& storage) = 0;
|
||||
virtual void assignMipFaceData(uint16 level, uint8 face, const storage::StoragePointer& storage) = 0;
|
||||
virtual bool isMipAvailable(uint16 level, uint8 face = 0) const = 0;
|
||||
Texture::Type getType() const { return _type; }
|
||||
|
||||
|
||||
Stamp getStamp() const { return _stamp; }
|
||||
Stamp bumpStamp() { return ++_stamp; }
|
||||
protected:
|
||||
Stamp _stamp = 0;
|
||||
Texture* _texture = nullptr; // Points to the parent texture (not owned)
|
||||
Texture::Type _type = Texture::TEX_2D; // The type of texture is needed to know the number of faces to expect
|
||||
std::vector<std::vector<PixelsPointer>> _mips; // an array of mips, each mip is an array of faces
|
||||
|
||||
void setFormat(const Element& format) { _format = format; }
|
||||
const Element& getFormat() const { return _format; }
|
||||
|
||||
private:
|
||||
Stamp _stamp { 0 };
|
||||
Element _format;
|
||||
Texture::Type _type { Texture::TEX_2D }; // The type of texture is needed to know the number of faces to expect
|
||||
Texture* _texture { nullptr }; // Points to the parent texture (not owned)
|
||||
virtual void assignTexture(Texture* tex); // Texture storage is pointing to ONE corrresponding Texture.
|
||||
const Texture* getTexture() const { return _texture; }
|
||||
|
||||
friend class Texture;
|
||||
|
||||
// THis should be only called by the Texture from the Backend to notify the storage that the specified mip face pixels
|
||||
// have been uploaded to the GPU memory. IT is possible for the storage to free the system memory then
|
||||
virtual void notifyMipFaceGPULoaded(uint16 level, uint8 face) const;
|
||||
};
|
||||
|
||||
|
||||
class MemoryStorage : public Storage {
|
||||
public:
|
||||
void reset() override;
|
||||
PixelsPointer getMipFace(uint16 level, uint8 face = 0) const override;
|
||||
void assignMipData(uint16 level, const storage::StoragePointer& storage) override;
|
||||
void assignMipFaceData(uint16 level, uint8 face, const storage::StoragePointer& storage) override;
|
||||
bool isMipAvailable(uint16 level, uint8 face = 0) const override;
|
||||
|
||||
protected:
|
||||
bool allocateMip(uint16 level);
|
||||
std::vector<std::vector<PixelsPointer>> _mips; // an array of mips, each mip is an array of faces
|
||||
};
|
||||
|
||||
class KtxStorage : public Storage {
|
||||
public:
|
||||
KtxStorage(ktx::KTXUniquePointer& ktxData);
|
||||
PixelsPointer getMipFace(uint16 level, uint8 face = 0) const override;
|
||||
// By convention, all mip levels and faces MUST be populated when using KTX backing
|
||||
bool isMipAvailable(uint16 level, uint8 face = 0) const override { return true; }
|
||||
|
||||
void assignMipData(uint16 level, const storage::StoragePointer& storage) override {
|
||||
throw std::runtime_error("Invalid call");
|
||||
}
|
||||
|
||||
void assignMipFaceData(uint16 level, uint8 face, const storage::StoragePointer& storage) override {
|
||||
throw std::runtime_error("Invalid call");
|
||||
}
|
||||
void reset() override { }
|
||||
|
||||
protected:
|
||||
ktx::KTXUniquePointer _ktxData;
|
||||
friend class Texture;
|
||||
};
|
||||
|
||||
static Texture* create1D(const Element& texelFormat, uint16 width, const Sampler& sampler = Sampler());
|
||||
static Texture* create2D(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler = Sampler());
|
||||
static Texture* create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth, const Sampler& sampler = Sampler());
|
||||
static Texture* createCube(const Element& texelFormat, uint16 width, const Sampler& sampler = Sampler());
|
||||
static Texture* createExternal2D(const ExternalRecycler& recycler, const Sampler& sampler = Sampler());
|
||||
static Texture* createRenderBuffer(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler = Sampler());
|
||||
static Texture* createStrict(const Element& texelFormat, uint16 width, uint16 height, const Sampler& sampler = Sampler());
|
||||
static Texture* createExternal(const ExternalRecycler& recycler, const Sampler& sampler = Sampler());
|
||||
|
||||
Texture();
|
||||
Texture(TextureUsageType usageType);
|
||||
Texture(const Texture& buf); // deep copy of the sysmem texture
|
||||
Texture& operator=(const Texture& buf); // deep copy of the sysmem texture
|
||||
~Texture();
|
||||
|
@ -325,6 +349,7 @@ public:
|
|||
|
||||
// Size and format
|
||||
Type getType() const { return _type; }
|
||||
TextureUsageType getUsageType() const { return _usageType; }
|
||||
|
||||
bool isColorRenderTarget() const;
|
||||
bool isDepthStencilRenderTarget() const;
|
||||
|
@ -347,7 +372,12 @@ public:
|
|||
|
||||
uint32 getNumTexels() const { return _width * _height * _depth * getNumFaces(); }
|
||||
|
||||
uint16 getNumSlices() const { return _numSlices; }
|
||||
// The texture is an array if the _numSlices is not 0.
|
||||
// otherwise, if _numSLices is 0, then the texture is NOT an array
|
||||
// The number of slices returned is 1 at the minimum (if not an array) or the actual _numSlices.
|
||||
bool isArray() const { return _numSlices > 0; }
|
||||
uint16 getNumSlices() const { return (isArray() ? _numSlices : 1); }
|
||||
|
||||
uint16 getNumSamples() const { return _numSamples; }
|
||||
|
||||
|
||||
|
@ -429,18 +459,29 @@ public:
|
|||
|
||||
// Managing Storage and mips
|
||||
|
||||
// Mip storage format is constant across all mips
|
||||
void setStoredMipFormat(const Element& format);
|
||||
const Element& getStoredMipFormat() const;
|
||||
|
||||
// Manually allocate the mips down until the specified maxMip
|
||||
// this is just allocating the sysmem version of it
|
||||
// in case autoGen is on, this doesn't allocate
|
||||
// Explicitely assign mip data for a certain level
|
||||
// If Bytes is NULL then simply allocate the space so mip sysmem can be accessed
|
||||
bool assignStoredMip(uint16 level, const Element& format, Size size, const Byte* bytes);
|
||||
bool assignStoredMipFace(uint16 level, const Element& format, Size size, const Byte* bytes, uint8 face);
|
||||
|
||||
void assignStoredMip(uint16 level, Size size, const Byte* bytes);
|
||||
void assignStoredMipFace(uint16 level, uint8 face, Size size, const Byte* bytes);
|
||||
|
||||
void assignStoredMip(uint16 level, storage::StoragePointer& storage);
|
||||
void assignStoredMipFace(uint16 level, uint8 face, storage::StoragePointer& storage);
|
||||
|
||||
// Access the the sub mips
|
||||
bool isStoredMipFaceAvailable(uint16 level, uint8 face = 0) const { return _storage->isMipAvailable(level, face); }
|
||||
const PixelsPointer accessStoredMipFace(uint16 level, uint8 face = 0) const { return _storage->getMipFace(level, face); }
|
||||
|
||||
void setStorage(std::unique_ptr<Storage>& newStorage);
|
||||
void setKtxBacking(ktx::KTXUniquePointer& newBacking);
|
||||
|
||||
// access sizes for the stored mips
|
||||
uint16 getStoredMipWidth(uint16 level) const;
|
||||
uint16 getStoredMipHeight(uint16 level) const;
|
||||
|
@ -464,8 +505,8 @@ public:
|
|||
const Sampler& getSampler() const { return _sampler; }
|
||||
Stamp getSamplerStamp() const { return _samplerStamp; }
|
||||
|
||||
// Only callable by the Backend
|
||||
void notifyMipFaceGPULoaded(uint16 level, uint8 face = 0) const { return _storage->notifyMipFaceGPULoaded(level, face); }
|
||||
void setFallbackTexture(const TexturePointer& fallback) { _fallback = fallback; }
|
||||
TexturePointer getFallbackTexture() const { return _fallback.lock(); }
|
||||
|
||||
void setExternalTexture(uint32 externalId, void* externalFence);
|
||||
void setExternalRecycler(const ExternalRecycler& recycler);
|
||||
|
@ -475,36 +516,45 @@ public:
|
|||
|
||||
ExternalUpdates getUpdates() const;
|
||||
|
||||
// Textures can be serialized directly to ktx data file, here is how
|
||||
static ktx::KTXUniquePointer serialize(const Texture& texture);
|
||||
static Texture* unserialize(const ktx::KTXUniquePointer& srcData, TextureUsageType usageType = TextureUsageType::RESOURCE, Usage usage = Usage(), const Sampler::Desc& sampler = Sampler::Desc());
|
||||
static bool evalKTXFormat(const Element& mipFormat, const Element& texelFormat, ktx::Header& header);
|
||||
static bool evalTextureFormat(const ktx::Header& header, Element& mipFormat, Element& texelFormat);
|
||||
|
||||
protected:
|
||||
const TextureUsageType _usageType;
|
||||
|
||||
// Should only be accessed internally or by the backend sync function
|
||||
mutable Mutex _externalMutex;
|
||||
mutable std::list<ExternalIdAndFence> _externalUpdates;
|
||||
ExternalRecycler _externalRecycler;
|
||||
|
||||
|
||||
std::weak_ptr<Texture> _fallback;
|
||||
// Not strictly necessary, but incredibly useful for debugging
|
||||
std::string _source;
|
||||
std::unique_ptr< Storage > _storage;
|
||||
|
||||
Stamp _stamp = 0;
|
||||
Stamp _stamp { 0 };
|
||||
|
||||
Sampler _sampler;
|
||||
Stamp _samplerStamp;
|
||||
Stamp _samplerStamp { 0 };
|
||||
|
||||
uint32 _size = 0;
|
||||
uint32 _size { 0 };
|
||||
Element _texelFormat;
|
||||
|
||||
uint16 _width = 1;
|
||||
uint16 _height = 1;
|
||||
uint16 _depth = 1;
|
||||
uint16 _width { 1 };
|
||||
uint16 _height { 1 };
|
||||
uint16 _depth { 1 };
|
||||
|
||||
uint16 _numSamples = 1;
|
||||
uint16 _numSlices = 1;
|
||||
uint16 _numSamples { 1 };
|
||||
uint16 _numSlices { 0 }; // if _numSlices is 0, the texture is not an "Array", the getNumSlices reported is 1
|
||||
|
||||
uint16 _maxMip { 0 };
|
||||
uint16 _minMip { 0 };
|
||||
|
||||
Type _type = TEX_1D;
|
||||
Type _type { TEX_1D };
|
||||
|
||||
Usage _usage;
|
||||
|
||||
|
@ -513,7 +563,7 @@ protected:
|
|||
bool _isIrradianceValid = false;
|
||||
bool _defined = false;
|
||||
|
||||
static Texture* create(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler);
|
||||
static Texture* create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler);
|
||||
|
||||
Size resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices);
|
||||
};
|
||||
|
|
289
libraries/gpu/src/gpu/Texture_ktx.cpp
Normal file
289
libraries/gpu/src/gpu/Texture_ktx.cpp
Normal file
|
@ -0,0 +1,289 @@
|
|||
//
|
||||
// Texture_ktx.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 2/16/2017.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
#include "Texture.h"
|
||||
|
||||
#include <ktx/KTX.h>
|
||||
using namespace gpu;
|
||||
|
||||
using PixelsPointer = Texture::PixelsPointer;
|
||||
using KtxStorage = Texture::KtxStorage;
|
||||
|
||||
struct GPUKTXPayload {
|
||||
Sampler::Desc _samplerDesc;
|
||||
Texture::Usage _usage;
|
||||
TextureUsageType _usageType;
|
||||
|
||||
static std::string KEY;
|
||||
static bool isGPUKTX(const ktx::KeyValue& val) {
|
||||
return (val._key.compare(KEY) == 0);
|
||||
}
|
||||
|
||||
static bool findInKeyValues(const ktx::KeyValues& keyValues, GPUKTXPayload& payload) {
|
||||
auto found = std::find_if(keyValues.begin(), keyValues.end(), isGPUKTX);
|
||||
if (found != keyValues.end()) {
|
||||
if ((*found)._value.size() == sizeof(GPUKTXPayload)) {
|
||||
memcpy(&payload, (*found)._value.data(), sizeof(GPUKTXPayload));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
std::string GPUKTXPayload::KEY { "hifi.gpu" };
|
||||
|
||||
KtxStorage::KtxStorage(ktx::KTXUniquePointer& ktxData) {
|
||||
|
||||
// if the source ktx is valid let's config this KtxStorage correctly
|
||||
if (ktxData && ktxData->getHeader()) {
|
||||
|
||||
// now that we know the ktx, let's get the header info to configure this Texture::Storage:
|
||||
Format mipFormat = Format::COLOR_BGRA_32;
|
||||
Format texelFormat = Format::COLOR_SRGBA_32;
|
||||
if (Texture::evalTextureFormat(*ktxData->getHeader(), mipFormat, texelFormat)) {
|
||||
_format = mipFormat;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
_ktxData.reset(ktxData.release());
|
||||
}
|
||||
|
||||
PixelsPointer KtxStorage::getMipFace(uint16 level, uint8 face) const {
|
||||
return _ktxData->getMipFaceTexelsData(level, face);
|
||||
}
|
||||
|
||||
void Texture::setKtxBacking(ktx::KTXUniquePointer& ktxBacking) {
|
||||
auto newBacking = std::unique_ptr<Storage>(new KtxStorage(ktxBacking));
|
||||
setStorage(newBacking);
|
||||
}
|
||||
|
||||
ktx::KTXUniquePointer Texture::serialize(const Texture& texture) {
|
||||
ktx::Header header;
|
||||
|
||||
// From texture format to ktx format description
|
||||
auto texelFormat = texture.getTexelFormat();
|
||||
auto mipFormat = texture.getStoredMipFormat();
|
||||
|
||||
if (!Texture::evalKTXFormat(mipFormat, texelFormat, header)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Set Dimensions
|
||||
uint32_t numFaces = 1;
|
||||
switch (texture.getType()) {
|
||||
case TEX_1D: {
|
||||
if (texture.isArray()) {
|
||||
header.set1DArray(texture.getWidth(), texture.getNumSlices());
|
||||
} else {
|
||||
header.set1D(texture.getWidth());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TEX_2D: {
|
||||
if (texture.isArray()) {
|
||||
header.set2DArray(texture.getWidth(), texture.getHeight(), texture.getNumSlices());
|
||||
} else {
|
||||
header.set2D(texture.getWidth(), texture.getHeight());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TEX_3D: {
|
||||
if (texture.isArray()) {
|
||||
header.set3DArray(texture.getWidth(), texture.getHeight(), texture.getDepth(), texture.getNumSlices());
|
||||
} else {
|
||||
header.set3D(texture.getWidth(), texture.getHeight(), texture.getDepth());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TEX_CUBE: {
|
||||
if (texture.isArray()) {
|
||||
header.setCubeArray(texture.getWidth(), texture.getHeight(), texture.getNumSlices());
|
||||
} else {
|
||||
header.setCube(texture.getWidth(), texture.getHeight());
|
||||
}
|
||||
numFaces = Texture::CUBE_FACE_COUNT;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Number level of mips coming
|
||||
header.numberOfMipmapLevels = texture.maxMip() + 1;
|
||||
|
||||
ktx::Images images;
|
||||
for (uint32_t level = 0; level < header.numberOfMipmapLevels; level++) {
|
||||
auto mip = texture.accessStoredMipFace(level);
|
||||
if (mip) {
|
||||
if (numFaces == 1) {
|
||||
images.emplace_back(ktx::Image((uint32_t)mip->getSize(), 0, mip->readData()));
|
||||
} else {
|
||||
ktx::Image::FaceBytes cubeFaces(Texture::CUBE_FACE_COUNT);
|
||||
cubeFaces[0] = mip->readData();
|
||||
for (uint32_t face = 1; face < Texture::CUBE_FACE_COUNT; face++) {
|
||||
cubeFaces[face] = texture.accessStoredMipFace(level, face)->readData();
|
||||
}
|
||||
images.emplace_back(ktx::Image((uint32_t)mip->getSize(), 0, cubeFaces));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GPUKTXPayload keyval;
|
||||
keyval._samplerDesc = texture.getSampler().getDesc();
|
||||
keyval._usage = texture.getUsage();
|
||||
keyval._usageType = texture.getUsageType();
|
||||
ktx::KeyValues keyValues;
|
||||
keyValues.emplace_back(ktx::KeyValue(GPUKTXPayload::KEY, sizeof(GPUKTXPayload), (ktx::Byte*) &keyval));
|
||||
|
||||
auto ktxBuffer = ktx::KTX::create(header, images, keyValues);
|
||||
#if 0
|
||||
auto expectedMipCount = texture.evalNumMips();
|
||||
assert(expectedMipCount == ktxBuffer->_images.size());
|
||||
assert(expectedMipCount == header.numberOfMipmapLevels);
|
||||
|
||||
assert(0 == memcmp(&header, ktxBuffer->getHeader(), sizeof(ktx::Header)));
|
||||
assert(ktxBuffer->_images.size() == images.size());
|
||||
auto start = ktxBuffer->_storage->data();
|
||||
for (size_t i = 0; i < images.size(); ++i) {
|
||||
auto expected = images[i];
|
||||
auto actual = ktxBuffer->_images[i];
|
||||
assert(expected._padding == actual._padding);
|
||||
assert(expected._numFaces == actual._numFaces);
|
||||
assert(expected._imageSize == actual._imageSize);
|
||||
assert(expected._faceSize == actual._faceSize);
|
||||
assert(actual._faceBytes.size() == actual._numFaces);
|
||||
for (uint32_t face = 0; face < expected._numFaces; ++face) {
|
||||
auto expectedFace = expected._faceBytes[face];
|
||||
auto actualFace = actual._faceBytes[face];
|
||||
auto offset = actualFace - start;
|
||||
assert(offset % 4 == 0);
|
||||
assert(expectedFace != actualFace);
|
||||
assert(0 == memcmp(expectedFace, actualFace, expected._faceSize));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return ktxBuffer;
|
||||
}
|
||||
|
||||
Texture* Texture::unserialize(const ktx::KTXUniquePointer& srcData, TextureUsageType usageType, Usage usage, const Sampler::Desc& sampler) {
|
||||
if (!srcData) {
|
||||
return nullptr;
|
||||
}
|
||||
const auto& header = *srcData->getHeader();
|
||||
|
||||
Format mipFormat = Format::COLOR_BGRA_32;
|
||||
Format texelFormat = Format::COLOR_SRGBA_32;
|
||||
|
||||
if (!Texture::evalTextureFormat(header, mipFormat, texelFormat)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Find Texture Type based on dimensions
|
||||
Type type = TEX_1D;
|
||||
if (header.pixelWidth == 0) {
|
||||
return nullptr;
|
||||
} else if (header.pixelHeight == 0) {
|
||||
type = TEX_1D;
|
||||
} else if (header.pixelDepth == 0) {
|
||||
if (header.numberOfFaces == ktx::NUM_CUBEMAPFACES) {
|
||||
type = TEX_CUBE;
|
||||
} else {
|
||||
type = TEX_2D;
|
||||
}
|
||||
} else {
|
||||
type = TEX_3D;
|
||||
}
|
||||
|
||||
|
||||
// If found, use the
|
||||
GPUKTXPayload gpuktxKeyValue;
|
||||
bool isGPUKTXPayload = GPUKTXPayload::findInKeyValues(srcData->_keyValues, gpuktxKeyValue);
|
||||
|
||||
auto tex = Texture::create( (isGPUKTXPayload ? gpuktxKeyValue._usageType : usageType),
|
||||
type,
|
||||
texelFormat,
|
||||
header.getPixelWidth(),
|
||||
header.getPixelHeight(),
|
||||
header.getPixelDepth(),
|
||||
1, // num Samples
|
||||
header.getNumberOfSlices(),
|
||||
(isGPUKTXPayload ? gpuktxKeyValue._samplerDesc : sampler));
|
||||
|
||||
tex->setUsage((isGPUKTXPayload ? gpuktxKeyValue._usage : usage));
|
||||
|
||||
// Assing the mips availables
|
||||
tex->setStoredMipFormat(mipFormat);
|
||||
uint16_t level = 0;
|
||||
for (auto& image : srcData->_images) {
|
||||
for (uint32_t face = 0; face < image._numFaces; face++) {
|
||||
tex->assignStoredMipFace(level, face, image._faceSize, image._faceBytes[face]);
|
||||
}
|
||||
level++;
|
||||
}
|
||||
|
||||
return tex;
|
||||
}
|
||||
|
||||
bool Texture::evalKTXFormat(const Element& mipFormat, const Element& texelFormat, ktx::Header& header) {
|
||||
if (texelFormat == Format::COLOR_RGBA_32 && mipFormat == Format::COLOR_BGRA_32) {
|
||||
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::BGRA, ktx::GLInternalFormat_Uncompressed::RGBA8, ktx::GLBaseInternalFormat::RGBA);
|
||||
} else if (texelFormat == Format::COLOR_RGBA_32 && mipFormat == Format::COLOR_RGBA_32) {
|
||||
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::RGBA, ktx::GLInternalFormat_Uncompressed::RGBA8, ktx::GLBaseInternalFormat::RGBA);
|
||||
} else if (texelFormat == Format::COLOR_SRGBA_32 && mipFormat == Format::COLOR_SBGRA_32) {
|
||||
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::BGRA, ktx::GLInternalFormat_Uncompressed::SRGB8_ALPHA8, ktx::GLBaseInternalFormat::RGBA);
|
||||
} else if (texelFormat == Format::COLOR_SRGBA_32 && mipFormat == Format::COLOR_SRGBA_32) {
|
||||
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::RGBA, ktx::GLInternalFormat_Uncompressed::SRGB8_ALPHA8, ktx::GLBaseInternalFormat::RGBA);
|
||||
} else if (texelFormat == Format::COLOR_R_8 && mipFormat == Format::COLOR_R_8) {
|
||||
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::RED, ktx::GLInternalFormat_Uncompressed::R8, ktx::GLBaseInternalFormat::RED);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Texture::evalTextureFormat(const ktx::Header& header, Element& mipFormat, Element& texelFormat) {
|
||||
if (header.getGLFormat() == ktx::GLFormat::BGRA && header.getGLType() == ktx::GLType::UNSIGNED_BYTE && header.getTypeSize() == 1) {
|
||||
if (header.getGLInternaFormat_Uncompressed() == ktx::GLInternalFormat_Uncompressed::RGBA8) {
|
||||
mipFormat = Format::COLOR_BGRA_32;
|
||||
texelFormat = Format::COLOR_RGBA_32;
|
||||
} else if (header.getGLInternaFormat_Uncompressed() == ktx::GLInternalFormat_Uncompressed::SRGB8_ALPHA8) {
|
||||
mipFormat = Format::COLOR_SBGRA_32;
|
||||
texelFormat = Format::COLOR_SRGBA_32;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else if (header.getGLFormat() == ktx::GLFormat::RGBA && header.getGLType() == ktx::GLType::UNSIGNED_BYTE && header.getTypeSize() == 1) {
|
||||
if (header.getGLInternaFormat_Uncompressed() == ktx::GLInternalFormat_Uncompressed::RGBA8) {
|
||||
mipFormat = Format::COLOR_RGBA_32;
|
||||
texelFormat = Format::COLOR_RGBA_32;
|
||||
} else if (header.getGLInternaFormat_Uncompressed() == ktx::GLInternalFormat_Uncompressed::SRGB8_ALPHA8) {
|
||||
mipFormat = Format::COLOR_SRGBA_32;
|
||||
texelFormat = Format::COLOR_SRGBA_32;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else if (header.getGLFormat() == ktx::GLFormat::RED && header.getGLType() == ktx::GLType::UNSIGNED_BYTE && header.getTypeSize() == 1) {
|
||||
mipFormat = Format::COLOR_R_8;
|
||||
if (header.getGLInternaFormat_Uncompressed() == ktx::GLInternalFormat_Uncompressed::R8) {
|
||||
texelFormat = Format::COLOR_R_8;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
3
libraries/ktx/CMakeLists.txt
Normal file
3
libraries/ktx/CMakeLists.txt
Normal file
|
@ -0,0 +1,3 @@
|
|||
set(TARGET_NAME ktx)
|
||||
setup_hifi_library()
|
||||
link_hifi_libraries()
|
165
libraries/ktx/src/ktx/KTX.cpp
Normal file
165
libraries/ktx/src/ktx/KTX.cpp
Normal file
|
@ -0,0 +1,165 @@
|
|||
//
|
||||
// KTX.cpp
|
||||
// ktx/src/ktx
|
||||
//
|
||||
// Created by Zach Pomerantz on 2/08/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "KTX.h"
|
||||
|
||||
#include <algorithm> //min max and more
|
||||
|
||||
using namespace ktx;
|
||||
|
||||
uint32_t Header::evalPadding(size_t byteSize) {
|
||||
//auto padding = byteSize % PACKING_SIZE;
|
||||
// return (uint32_t) (padding ? PACKING_SIZE - padding : 0);
|
||||
return (uint32_t) (3 - (byteSize + 3) % PACKING_SIZE);// padding ? PACKING_SIZE - padding : 0);
|
||||
}
|
||||
|
||||
|
||||
const Header::Identifier ktx::Header::IDENTIFIER {{
|
||||
0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A
|
||||
}};
|
||||
|
||||
Header::Header() {
|
||||
memcpy(identifier, IDENTIFIER.data(), IDENTIFIER_LENGTH);
|
||||
}
|
||||
|
||||
uint32_t Header::evalMaxDimension() const {
|
||||
return std::max(getPixelWidth(), std::max(getPixelHeight(), getPixelDepth()));
|
||||
}
|
||||
|
||||
uint32_t Header::evalPixelWidth(uint32_t level) const {
|
||||
return std::max(getPixelWidth() >> level, 1U);
|
||||
}
|
||||
uint32_t Header::evalPixelHeight(uint32_t level) const {
|
||||
return std::max(getPixelHeight() >> level, 1U);
|
||||
}
|
||||
uint32_t Header::evalPixelDepth(uint32_t level) const {
|
||||
return std::max(getPixelDepth() >> level, 1U);
|
||||
}
|
||||
|
||||
size_t Header::evalPixelSize() const {
|
||||
return glTypeSize; // Really we should generate the size from the FOrmat etc
|
||||
}
|
||||
|
||||
size_t Header::evalRowSize(uint32_t level) const {
|
||||
auto pixWidth = evalPixelWidth(level);
|
||||
auto pixSize = evalPixelSize();
|
||||
auto netSize = pixWidth * pixSize;
|
||||
auto padding = evalPadding(netSize);
|
||||
return netSize + padding;
|
||||
}
|
||||
size_t Header::evalFaceSize(uint32_t level) const {
|
||||
auto pixHeight = evalPixelHeight(level);
|
||||
auto pixDepth = evalPixelDepth(level);
|
||||
auto rowSize = evalRowSize(level);
|
||||
return pixDepth * pixHeight * rowSize;
|
||||
}
|
||||
size_t Header::evalImageSize(uint32_t level) const {
|
||||
auto faceSize = evalFaceSize(level);
|
||||
if (numberOfFaces == NUM_CUBEMAPFACES && numberOfArrayElements == 0) {
|
||||
return faceSize;
|
||||
} else {
|
||||
return (getNumberOfSlices() * numberOfFaces * faceSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
KeyValue::KeyValue(const std::string& key, uint32_t valueByteSize, const Byte* value) :
|
||||
_byteSize((uint32_t) key.size() + 1 + valueByteSize), // keyString size + '\0' ending char + the value size
|
||||
_key(key),
|
||||
_value(valueByteSize)
|
||||
{
|
||||
if (_value.size() && value) {
|
||||
memcpy(_value.data(), value, valueByteSize);
|
||||
}
|
||||
}
|
||||
|
||||
KeyValue::KeyValue(const std::string& key, const std::string& value) :
|
||||
KeyValue(key, (uint32_t) value.size(), (const Byte*) value.data())
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
uint32_t KeyValue::serializedByteSize() const {
|
||||
return (uint32_t) (sizeof(uint32_t) + _byteSize + Header::evalPadding(_byteSize));
|
||||
}
|
||||
|
||||
uint32_t KeyValue::serializedKeyValuesByteSize(const KeyValues& keyValues) {
|
||||
uint32_t keyValuesSize = 0;
|
||||
for (auto& keyval : keyValues) {
|
||||
keyValuesSize += keyval.serializedByteSize();
|
||||
}
|
||||
return (keyValuesSize + Header::evalPadding(keyValuesSize));
|
||||
}
|
||||
|
||||
|
||||
KTX::KTX() {
|
||||
}
|
||||
|
||||
KTX::~KTX() {
|
||||
}
|
||||
|
||||
void KTX::resetStorage(const StoragePointer& storage) {
|
||||
_storage = storage;
|
||||
}
|
||||
|
||||
const Header* KTX::getHeader() const {
|
||||
if (!_storage) {
|
||||
return nullptr;
|
||||
}
|
||||
return reinterpret_cast<const Header*>(_storage->data());
|
||||
}
|
||||
|
||||
|
||||
size_t KTX::getKeyValueDataSize() const {
|
||||
if (_storage) {
|
||||
return getHeader()->bytesOfKeyValueData;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
size_t KTX::getTexelsDataSize() const {
|
||||
if (_storage) {
|
||||
//return _storage->size() - (sizeof(Header) + getKeyValueDataSize());
|
||||
return (_storage->data() + _storage->size()) - getTexelsData();
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
const Byte* KTX::getKeyValueData() const {
|
||||
if (_storage) {
|
||||
return (_storage->data() + sizeof(Header));
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
const Byte* KTX::getTexelsData() const {
|
||||
if (_storage) {
|
||||
return (_storage->data() + sizeof(Header) + getKeyValueDataSize());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
storage::StoragePointer KTX::getMipFaceTexelsData(uint16_t mip, uint8_t face) const {
|
||||
storage::StoragePointer result;
|
||||
if (mip < _images.size()) {
|
||||
const auto& faces = _images[mip];
|
||||
if (face < faces._numFaces) {
|
||||
auto faceOffset = faces._faceBytes[face] - _storage->data();
|
||||
auto faceSize = faces._faceSize;
|
||||
result = _storage->createView(faceSize, faceOffset);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
494
libraries/ktx/src/ktx/KTX.h
Normal file
494
libraries/ktx/src/ktx/KTX.h
Normal file
|
@ -0,0 +1,494 @@
|
|||
//
|
||||
// KTX.h
|
||||
// ktx/src/ktx
|
||||
//
|
||||
// Created by Zach Pomerantz on 2/08/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#pragma once
|
||||
#ifndef hifi_ktx_KTX_h
|
||||
#define hifi_ktx_KTX_h
|
||||
|
||||
#include <array>
|
||||
#include <list>
|
||||
#include <vector>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
#include <shared/Storage.h>
|
||||
|
||||
/* KTX Spec:
|
||||
|
||||
Byte[12] identifier
|
||||
UInt32 endianness
|
||||
UInt32 glType
|
||||
UInt32 glTypeSize
|
||||
UInt32 glFormat
|
||||
Uint32 glInternalFormat
|
||||
Uint32 glBaseInternalFormat
|
||||
UInt32 pixelWidth
|
||||
UInt32 pixelHeight
|
||||
UInt32 pixelDepth
|
||||
UInt32 numberOfArrayElements
|
||||
UInt32 numberOfFaces
|
||||
UInt32 numberOfMipmapLevels
|
||||
UInt32 bytesOfKeyValueData
|
||||
|
||||
for each keyValuePair that fits in bytesOfKeyValueData
|
||||
UInt32 keyAndValueByteSize
|
||||
Byte keyAndValue[keyAndValueByteSize]
|
||||
Byte valuePadding[3 - ((keyAndValueByteSize + 3) % 4)]
|
||||
end
|
||||
|
||||
for each mipmap_level in numberOfMipmapLevels*
|
||||
UInt32 imageSize;
|
||||
for each array_element in numberOfArrayElements*
|
||||
for each face in numberOfFaces
|
||||
for each z_slice in pixelDepth*
|
||||
for each row or row_of_blocks in pixelHeight*
|
||||
for each pixel or block_of_pixels in pixelWidth
|
||||
Byte data[format-specific-number-of-bytes]**
|
||||
end
|
||||
end
|
||||
end
|
||||
Byte cubePadding[0-3]
|
||||
end
|
||||
end
|
||||
Byte mipPadding[3 - ((imageSize + 3) % 4)]
|
||||
end
|
||||
|
||||
* Replace with 1 if this field is 0.
|
||||
|
||||
** Uncompressed texture data matches a GL_UNPACK_ALIGNMENT of 4.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
namespace ktx {
|
||||
const uint32_t PACKING_SIZE { sizeof(uint32_t) };
|
||||
using Byte = uint8_t;
|
||||
|
||||
enum class GLType : uint32_t {
|
||||
COMPRESSED_TYPE = 0,
|
||||
|
||||
// GL 4.4 Table 8.2
|
||||
UNSIGNED_BYTE = 0x1401,
|
||||
BYTE = 0x1400,
|
||||
UNSIGNED_SHORT = 0x1403,
|
||||
SHORT = 0x1402,
|
||||
UNSIGNED_INT = 0x1405,
|
||||
INT = 0x1404,
|
||||
HALF_FLOAT = 0x140B,
|
||||
FLOAT = 0x1406,
|
||||
UNSIGNED_BYTE_3_3_2 = 0x8032,
|
||||
UNSIGNED_BYTE_2_3_3_REV = 0x8362,
|
||||
UNSIGNED_SHORT_5_6_5 = 0x8363,
|
||||
UNSIGNED_SHORT_5_6_5_REV = 0x8364,
|
||||
UNSIGNED_SHORT_4_4_4_4 = 0x8033,
|
||||
UNSIGNED_SHORT_4_4_4_4_REV = 0x8365,
|
||||
UNSIGNED_SHORT_5_5_5_1 = 0x8034,
|
||||
UNSIGNED_SHORT_1_5_5_5_REV = 0x8366,
|
||||
UNSIGNED_INT_8_8_8_8 = 0x8035,
|
||||
UNSIGNED_INT_8_8_8_8_REV = 0x8367,
|
||||
UNSIGNED_INT_10_10_10_2 = 0x8036,
|
||||
UNSIGNED_INT_2_10_10_10_REV = 0x8368,
|
||||
UNSIGNED_INT_24_8 = 0x84FA,
|
||||
UNSIGNED_INT_10F_11F_11F_REV = 0x8C3B,
|
||||
UNSIGNED_INT_5_9_9_9_REV = 0x8C3E,
|
||||
FLOAT_32_UNSIGNED_INT_24_8_REV = 0x8DAD,
|
||||
|
||||
NUM_GLTYPES = 25,
|
||||
};
|
||||
|
||||
enum class GLFormat : uint32_t {
|
||||
COMPRESSED_FORMAT = 0,
|
||||
|
||||
// GL 4.4 Table 8.3
|
||||
STENCIL_INDEX = 0x1901,
|
||||
DEPTH_COMPONENT = 0x1902,
|
||||
DEPTH_STENCIL = 0x84F9,
|
||||
|
||||
RED = 0x1903,
|
||||
GREEN = 0x1904,
|
||||
BLUE = 0x1905,
|
||||
RG = 0x8227,
|
||||
RGB = 0x1907,
|
||||
RGBA = 0x1908,
|
||||
BGR = 0x80E0,
|
||||
BGRA = 0x80E1,
|
||||
|
||||
RG_INTEGER = 0x8228,
|
||||
RED_INTEGER = 0x8D94,
|
||||
GREEN_INTEGER = 0x8D95,
|
||||
BLUE_INTEGER = 0x8D96,
|
||||
RGB_INTEGER = 0x8D98,
|
||||
RGBA_INTEGER = 0x8D99,
|
||||
BGR_INTEGER = 0x8D9A,
|
||||
BGRA_INTEGER = 0x8D9B,
|
||||
|
||||
NUM_GLFORMATS = 20,
|
||||
};
|
||||
|
||||
enum class GLInternalFormat_Uncompressed : uint32_t {
|
||||
// GL 4.4 Table 8.12
|
||||
R8 = 0x8229,
|
||||
R8_SNORM = 0x8F94,
|
||||
|
||||
R16 = 0x822A,
|
||||
R16_SNORM = 0x8F98,
|
||||
|
||||
RG8 = 0x822B,
|
||||
RG8_SNORM = 0x8F95,
|
||||
|
||||
RG16 = 0x822C,
|
||||
RG16_SNORM = 0x8F99,
|
||||
|
||||
R3_G3_B2 = 0x2A10,
|
||||
RGB4 = 0x804F,
|
||||
RGB5 = 0x8050,
|
||||
RGB565 = 0x8D62,
|
||||
|
||||
RGB8 = 0x8051,
|
||||
RGB8_SNORM = 0x8F96,
|
||||
RGB10 = 0x8052,
|
||||
RGB12 = 0x8053,
|
||||
|
||||
RGB16 = 0x8054,
|
||||
RGB16_SNORM = 0x8F9A,
|
||||
|
||||
RGBA2 = 0x8055,
|
||||
RGBA4 = 0x8056,
|
||||
RGB5_A1 = 0x8057,
|
||||
RGBA8 = 0x8058,
|
||||
RGBA8_SNORM = 0x8F97,
|
||||
|
||||
RGB10_A2 = 0x8059,
|
||||
RGB10_A2UI = 0x906F,
|
||||
|
||||
RGBA12 = 0x805A,
|
||||
RGBA16 = 0x805B,
|
||||
RGBA16_SNORM = 0x8F9B,
|
||||
|
||||
SRGB8 = 0x8C41,
|
||||
SRGB8_ALPHA8 = 0x8C43,
|
||||
|
||||
R16F = 0x822D,
|
||||
RG16F = 0x822F,
|
||||
RGB16F = 0x881B,
|
||||
RGBA16F = 0x881A,
|
||||
|
||||
R32F = 0x822E,
|
||||
RG32F = 0x8230,
|
||||
RGB32F = 0x8815,
|
||||
RGBA32F = 0x8814,
|
||||
|
||||
R11F_G11F_B10F = 0x8C3A,
|
||||
RGB9_E5 = 0x8C3D,
|
||||
|
||||
|
||||
R8I = 0x8231,
|
||||
R8UI = 0x8232,
|
||||
R16I = 0x8233,
|
||||
R16UI = 0x8234,
|
||||
R32I = 0x8235,
|
||||
R32UI = 0x8236,
|
||||
RG8I = 0x8237,
|
||||
RG8UI = 0x8238,
|
||||
RG16I = 0x8239,
|
||||
RG16UI = 0x823A,
|
||||
RG32I = 0x823B,
|
||||
RG32UI = 0x823C,
|
||||
|
||||
RGB8I = 0x8D8F,
|
||||
RGB8UI = 0x8D7D,
|
||||
RGB16I = 0x8D89,
|
||||
RGB16UI = 0x8D77,
|
||||
|
||||
RGB32I = 0x8D83,
|
||||
RGB32UI = 0x8D71,
|
||||
RGBA8I = 0x8D8E,
|
||||
RGBA8UI = 0x8D7C,
|
||||
RGBA16I = 0x8D88,
|
||||
RGBA16UI = 0x8D76,
|
||||
RGBA32I = 0x8D82,
|
||||
|
||||
RGBA32UI = 0x8D70,
|
||||
|
||||
// GL 4.4 Table 8.13
|
||||
DEPTH_COMPONENT16 = 0x81A5,
|
||||
DEPTH_COMPONENT24 = 0x81A6,
|
||||
DEPTH_COMPONENT32 = 0x81A7,
|
||||
|
||||
DEPTH_COMPONENT32F = 0x8CAC,
|
||||
DEPTH24_STENCIL8 = 0x88F0,
|
||||
DEPTH32F_STENCIL8 = 0x8CAD,
|
||||
|
||||
STENCIL_INDEX1 = 0x8D46,
|
||||
STENCIL_INDEX4 = 0x8D47,
|
||||
STENCIL_INDEX8 = 0x8D48,
|
||||
STENCIL_INDEX16 = 0x8D49,
|
||||
|
||||
NUM_UNCOMPRESSED_GLINTERNALFORMATS = 74,
|
||||
};
|
||||
|
||||
enum class GLInternalFormat_Compressed : uint32_t {
|
||||
// GL 4.4 Table 8.14
|
||||
COMPRESSED_RED = 0x8225,
|
||||
COMPRESSED_RG = 0x8226,
|
||||
COMPRESSED_RGB = 0x84ED,
|
||||
COMPRESSED_RGBA = 0x84EE,
|
||||
|
||||
COMPRESSED_SRGB = 0x8C48,
|
||||
COMPRESSED_SRGB_ALPHA = 0x8C49,
|
||||
|
||||
COMPRESSED_RED_RGTC1 = 0x8DBB,
|
||||
COMPRESSED_SIGNED_RED_RGTC1 = 0x8DBC,
|
||||
COMPRESSED_RG_RGTC2 = 0x8DBD,
|
||||
COMPRESSED_SIGNED_RG_RGTC2 = 0x8DBE,
|
||||
|
||||
COMPRESSED_RGBA_BPTC_UNORM = 0x8E8C,
|
||||
COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 0x8E8D,
|
||||
COMPRESSED_RGB_BPTC_SIGNED_FLOAT = 0x8E8E,
|
||||
COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT = 0x8E8F,
|
||||
|
||||
COMPRESSED_RGB8_ETC2 = 0x9274,
|
||||
COMPRESSED_SRGB8_ETC2 = 0x9275,
|
||||
COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9276,
|
||||
COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9277,
|
||||
COMPRESSED_RGBA8_ETC2_EAC = 0x9278,
|
||||
COMPRESSED_SRGB8_ALPHA8_ETC2_EAC = 0x9279,
|
||||
|
||||
COMPRESSED_R11_EAC = 0x9270,
|
||||
COMPRESSED_SIGNED_R11_EAC = 0x9271,
|
||||
COMPRESSED_RG11_EAC = 0x9272,
|
||||
COMPRESSED_SIGNED_RG11_EAC = 0x9273,
|
||||
|
||||
NUM_COMPRESSED_GLINTERNALFORMATS = 24,
|
||||
};
|
||||
|
||||
enum class GLBaseInternalFormat : uint32_t {
|
||||
// GL 4.4 Table 8.11
|
||||
DEPTH_COMPONENT = 0x1902,
|
||||
DEPTH_STENCIL = 0x84F9,
|
||||
RED = 0x1903,
|
||||
RG = 0x8227,
|
||||
RGB = 0x1907,
|
||||
RGBA = 0x1908,
|
||||
STENCIL_INDEX = 0x1901,
|
||||
|
||||
NUM_GLBASEINTERNALFORMATS = 7,
|
||||
};
|
||||
|
||||
enum CubeMapFace {
|
||||
POS_X = 0,
|
||||
NEG_X = 1,
|
||||
POS_Y = 2,
|
||||
NEG_Y = 3,
|
||||
POS_Z = 4,
|
||||
NEG_Z = 5,
|
||||
NUM_CUBEMAPFACES = 6,
|
||||
};
|
||||
|
||||
using Storage = storage::Storage;
|
||||
using StoragePointer = std::shared_ptr<Storage>;
|
||||
|
||||
// Header
|
||||
struct Header {
|
||||
static const size_t IDENTIFIER_LENGTH = 12;
|
||||
using Identifier = std::array<uint8_t, IDENTIFIER_LENGTH>;
|
||||
static const Identifier IDENTIFIER;
|
||||
|
||||
static const uint32_t ENDIAN_TEST = 0x04030201;
|
||||
static const uint32_t REVERSE_ENDIAN_TEST = 0x01020304;
|
||||
|
||||
static uint32_t evalPadding(size_t byteSize);
|
||||
|
||||
Header();
|
||||
|
||||
Byte identifier[IDENTIFIER_LENGTH];
|
||||
uint32_t endianness { ENDIAN_TEST };
|
||||
|
||||
uint32_t glType;
|
||||
uint32_t glTypeSize { 0 };
|
||||
uint32_t glFormat;
|
||||
uint32_t glInternalFormat;
|
||||
uint32_t glBaseInternalFormat;
|
||||
|
||||
uint32_t pixelWidth { 1 };
|
||||
uint32_t pixelHeight { 0 };
|
||||
uint32_t pixelDepth { 0 };
|
||||
uint32_t numberOfArrayElements { 0 };
|
||||
uint32_t numberOfFaces { 1 };
|
||||
uint32_t numberOfMipmapLevels { 1 };
|
||||
|
||||
uint32_t bytesOfKeyValueData { 0 };
|
||||
|
||||
uint32_t getPixelWidth() const { return (pixelWidth ? pixelWidth : 1); }
|
||||
uint32_t getPixelHeight() const { return (pixelHeight ? pixelHeight : 1); }
|
||||
uint32_t getPixelDepth() const { return (pixelDepth ? pixelDepth : 1); }
|
||||
uint32_t getNumberOfSlices() const { return (numberOfArrayElements ? numberOfArrayElements : 1); }
|
||||
uint32_t getNumberOfLevels() const { return (numberOfMipmapLevels ? numberOfMipmapLevels : 1); }
|
||||
|
||||
uint32_t evalMaxDimension() const;
|
||||
uint32_t evalPixelWidth(uint32_t level) const;
|
||||
uint32_t evalPixelHeight(uint32_t level) const;
|
||||
uint32_t evalPixelDepth(uint32_t level) const;
|
||||
|
||||
size_t evalPixelSize() const;
|
||||
size_t evalRowSize(uint32_t level) const;
|
||||
size_t evalFaceSize(uint32_t level) const;
|
||||
size_t evalImageSize(uint32_t level) const;
|
||||
|
||||
void setUncompressed(GLType type, uint32_t typeSize, GLFormat format, GLInternalFormat_Uncompressed internalFormat, GLBaseInternalFormat baseInternalFormat) {
|
||||
glType = (uint32_t) type;
|
||||
glTypeSize = typeSize;
|
||||
glFormat = (uint32_t) format;
|
||||
glInternalFormat = (uint32_t) internalFormat;
|
||||
glBaseInternalFormat = (uint32_t) baseInternalFormat;
|
||||
}
|
||||
void setCompressed(GLInternalFormat_Compressed internalFormat, GLBaseInternalFormat baseInternalFormat) {
|
||||
glType = (uint32_t) GLType::COMPRESSED_TYPE;
|
||||
glTypeSize = 1;
|
||||
glFormat = (uint32_t) GLFormat::COMPRESSED_FORMAT;
|
||||
glInternalFormat = (uint32_t) internalFormat;
|
||||
glBaseInternalFormat = (uint32_t) baseInternalFormat;
|
||||
}
|
||||
|
||||
GLType getGLType() const { return (GLType)glType; }
|
||||
uint32_t getTypeSize() const { return glTypeSize; }
|
||||
GLFormat getGLFormat() const { return (GLFormat)glFormat; }
|
||||
GLInternalFormat_Uncompressed getGLInternaFormat_Uncompressed() const { return (GLInternalFormat_Uncompressed)glInternalFormat; }
|
||||
GLInternalFormat_Compressed getGLInternaFormat_Compressed() const { return (GLInternalFormat_Compressed)glInternalFormat; }
|
||||
GLBaseInternalFormat getGLBaseInternalFormat() const { return (GLBaseInternalFormat)glBaseInternalFormat; }
|
||||
|
||||
|
||||
void setDimensions(uint32_t width, uint32_t height = 0, uint32_t depth = 0, uint32_t numSlices = 0, uint32_t numFaces = 1) {
|
||||
pixelWidth = (width > 0 ? width : 1);
|
||||
pixelHeight = height;
|
||||
pixelDepth = depth;
|
||||
numberOfArrayElements = numSlices;
|
||||
numberOfFaces = ((numFaces == 1) || (numFaces == NUM_CUBEMAPFACES) ? numFaces : 1);
|
||||
}
|
||||
void set1D(uint32_t width) { setDimensions(width); }
|
||||
void set1DArray(uint32_t width, uint32_t numSlices) { setDimensions(width, 0, 0, (numSlices > 0 ? numSlices : 1)); }
|
||||
void set2D(uint32_t width, uint32_t height) { setDimensions(width, height); }
|
||||
void set2DArray(uint32_t width, uint32_t height, uint32_t numSlices) { setDimensions(width, height, 0, (numSlices > 0 ? numSlices : 1)); }
|
||||
void set3D(uint32_t width, uint32_t height, uint32_t depth) { setDimensions(width, height, depth); }
|
||||
void set3DArray(uint32_t width, uint32_t height, uint32_t depth, uint32_t numSlices) { setDimensions(width, height, depth, (numSlices > 0 ? numSlices : 1)); }
|
||||
void setCube(uint32_t width, uint32_t height) { setDimensions(width, height, 0, 0, NUM_CUBEMAPFACES); }
|
||||
void setCubeArray(uint32_t width, uint32_t height, uint32_t numSlices) { setDimensions(width, height, 0, (numSlices > 0 ? numSlices : 1), NUM_CUBEMAPFACES); }
|
||||
|
||||
};
|
||||
|
||||
// Key Values
|
||||
struct KeyValue {
|
||||
uint32_t _byteSize { 0 };
|
||||
std::string _key;
|
||||
std::vector<Byte> _value;
|
||||
|
||||
|
||||
KeyValue(const std::string& key, uint32_t valueByteSize, const Byte* value);
|
||||
|
||||
KeyValue(const std::string& key, const std::string& value);
|
||||
|
||||
uint32_t serializedByteSize() const;
|
||||
|
||||
static KeyValue parseSerializedKeyAndValue(uint32_t srcSize, const Byte* srcBytes);
|
||||
static uint32_t writeSerializedKeyAndValue(Byte* destBytes, uint32_t destByteSize, const KeyValue& keyval);
|
||||
|
||||
using KeyValues = std::list<KeyValue>;
|
||||
static uint32_t serializedKeyValuesByteSize(const KeyValues& keyValues);
|
||||
|
||||
};
|
||||
using KeyValues = KeyValue::KeyValues;
|
||||
|
||||
|
||||
struct Image {
|
||||
using FaceBytes = std::vector<const Byte*>;
|
||||
|
||||
uint32_t _numFaces{ 1 };
|
||||
uint32_t _imageSize;
|
||||
uint32_t _faceSize;
|
||||
uint32_t _padding;
|
||||
FaceBytes _faceBytes;
|
||||
|
||||
|
||||
Image(uint32_t imageSize, uint32_t padding, const Byte* bytes) :
|
||||
_numFaces(1),
|
||||
_imageSize(imageSize),
|
||||
_faceSize(imageSize),
|
||||
_padding(padding),
|
||||
_faceBytes(1, bytes) {}
|
||||
|
||||
Image(uint32_t pageSize, uint32_t padding, const FaceBytes& cubeFaceBytes) :
|
||||
_numFaces(NUM_CUBEMAPFACES),
|
||||
_imageSize(pageSize * NUM_CUBEMAPFACES),
|
||||
_faceSize(pageSize),
|
||||
_padding(padding)
|
||||
{
|
||||
if (cubeFaceBytes.size() == NUM_CUBEMAPFACES) {
|
||||
_faceBytes = cubeFaceBytes;
|
||||
}
|
||||
}
|
||||
};
|
||||
using Images = std::vector<Image>;
|
||||
|
||||
class KTX {
|
||||
void resetStorage(const StoragePointer& src);
|
||||
|
||||
KTX();
|
||||
public:
|
||||
|
||||
~KTX();
|
||||
|
||||
// Define a KTX object manually to write it somewhere (in a file on disk?)
|
||||
// This path allocate the Storage where to store header, keyvalues and copy mips
|
||||
// Then COPY all the data
|
||||
static std::unique_ptr<KTX> create(const Header& header, const Images& images, const KeyValues& keyValues = KeyValues());
|
||||
|
||||
// Instead of creating a full Copy of the src data in a KTX object, the write serialization can be performed with the
|
||||
// following two functions
|
||||
// size_t sizeNeeded = KTX::evalStorageSize(header, images);
|
||||
//
|
||||
// //allocate a buffer of size "sizeNeeded" or map a file with enough capacity
|
||||
// Byte* destBytes = new Byte[sizeNeeded];
|
||||
//
|
||||
// // THen perform the writing of the src data to the destinnation buffer
|
||||
// write(destBytes, sizeNeeded, header, images);
|
||||
//
|
||||
// This is exactly what is done in the create function
|
||||
static size_t evalStorageSize(const Header& header, const Images& images, const KeyValues& keyValues = KeyValues());
|
||||
static size_t write(Byte* destBytes, size_t destByteSize, const Header& header, const Images& images, const KeyValues& keyValues = KeyValues());
|
||||
static size_t writeKeyValues(Byte* destBytes, size_t destByteSize, const KeyValues& keyValues);
|
||||
static Images writeImages(Byte* destBytes, size_t destByteSize, const Images& images);
|
||||
|
||||
// Parse a block of memory and create a KTX object from it
|
||||
static std::unique_ptr<KTX> create(const StoragePointer& src);
|
||||
|
||||
static bool checkHeaderFromStorage(size_t srcSize, const Byte* srcBytes);
|
||||
static KeyValues parseKeyValues(size_t srcSize, const Byte* srcBytes);
|
||||
static Images parseImages(const Header& header, size_t srcSize, const Byte* srcBytes);
|
||||
|
||||
// Access raw pointers to the main sections of the KTX
|
||||
const Header* getHeader() const;
|
||||
const Byte* getKeyValueData() const;
|
||||
const Byte* getTexelsData() const;
|
||||
storage::StoragePointer getMipFaceTexelsData(uint16_t mip = 0, uint8_t face = 0) const;
|
||||
const StoragePointer& getStorage() const { return _storage; }
|
||||
|
||||
size_t getKeyValueDataSize() const;
|
||||
size_t getTexelsDataSize() const;
|
||||
|
||||
StoragePointer _storage;
|
||||
KeyValues _keyValues;
|
||||
Images _images;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // hifi_ktx_KTX_h
|
195
libraries/ktx/src/ktx/Reader.cpp
Normal file
195
libraries/ktx/src/ktx/Reader.cpp
Normal file
|
@ -0,0 +1,195 @@
|
|||
//
|
||||
// Reader.cpp
|
||||
// ktx/src/ktx
|
||||
//
|
||||
// Created by Zach Pomerantz on 2/08/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "KTX.h"
|
||||
|
||||
#include <list>
|
||||
#include <QtGlobal>
|
||||
#include <QtCore/QDebug>
|
||||
|
||||
#ifndef _MSC_VER
|
||||
#define NOEXCEPT noexcept
|
||||
#else
|
||||
#define NOEXCEPT
|
||||
#endif
|
||||
|
||||
namespace ktx {
|
||||
class ReaderException: public std::exception {
|
||||
public:
|
||||
ReaderException(const std::string& explanation) : _explanation("KTX deserialization error: " + explanation) {}
|
||||
const char* what() const NOEXCEPT override { return _explanation.c_str(); }
|
||||
private:
|
||||
const std::string _explanation;
|
||||
};
|
||||
|
||||
bool checkEndianness(uint32_t endianness, bool& matching) {
|
||||
switch (endianness) {
|
||||
case Header::ENDIAN_TEST: {
|
||||
matching = true;
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
case Header::REVERSE_ENDIAN_TEST:
|
||||
{
|
||||
matching = false;
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw ReaderException("endianness field has invalid value");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool checkIdentifier(const Byte* identifier) {
|
||||
if (!(0 == memcmp(identifier, Header::IDENTIFIER.data(), Header::IDENTIFIER_LENGTH))) {
|
||||
throw ReaderException("identifier field invalid");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool KTX::checkHeaderFromStorage(size_t srcSize, const Byte* srcBytes) {
|
||||
try {
|
||||
// validation
|
||||
if (srcSize < sizeof(Header)) {
|
||||
throw ReaderException("length is too short for header");
|
||||
}
|
||||
const Header* header = reinterpret_cast<const Header*>(srcBytes);
|
||||
|
||||
checkIdentifier(header->identifier);
|
||||
|
||||
bool endianMatch { true };
|
||||
checkEndianness(header->endianness, endianMatch);
|
||||
|
||||
// TODO: endian conversion if !endianMatch - for now, this is for local use and is unnecessary
|
||||
|
||||
|
||||
// TODO: calculated bytesOfTexData
|
||||
if (srcSize < (sizeof(Header) + header->bytesOfKeyValueData)) {
|
||||
throw ReaderException("length is too short for metadata");
|
||||
}
|
||||
|
||||
size_t bytesOfTexData = 0;
|
||||
if (srcSize < (sizeof(Header) + header->bytesOfKeyValueData + bytesOfTexData)) {
|
||||
|
||||
throw ReaderException("length is too short for data");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
catch (const ReaderException& e) {
|
||||
qWarning() << e.what();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
KeyValue KeyValue::parseSerializedKeyAndValue(uint32_t srcSize, const Byte* srcBytes) {
|
||||
uint32_t keyAndValueByteSize;
|
||||
memcpy(&keyAndValueByteSize, srcBytes, sizeof(uint32_t));
|
||||
if (keyAndValueByteSize + sizeof(uint32_t) > srcSize) {
|
||||
throw ReaderException("invalid key-value size");
|
||||
}
|
||||
auto keyValueBytes = srcBytes + sizeof(uint32_t);
|
||||
|
||||
// find the first null character \0 and extract the key
|
||||
uint32_t keyLength = 0;
|
||||
while (reinterpret_cast<const char*>(keyValueBytes)[++keyLength] != '\0') {
|
||||
if (keyLength == keyAndValueByteSize) {
|
||||
// key must be null-terminated, and there must be space for the value
|
||||
throw ReaderException("invalid key-value " + std::string(reinterpret_cast<const char*>(keyValueBytes), keyLength));
|
||||
}
|
||||
}
|
||||
uint32_t valueStartOffset = keyLength + 1;
|
||||
|
||||
// parse the key-value
|
||||
return KeyValue(std::string(reinterpret_cast<const char*>(keyValueBytes), keyLength),
|
||||
keyAndValueByteSize - valueStartOffset, keyValueBytes + valueStartOffset);
|
||||
}
|
||||
|
||||
KeyValues KTX::parseKeyValues(size_t srcSize, const Byte* srcBytes) {
|
||||
KeyValues keyValues;
|
||||
try {
|
||||
auto src = srcBytes;
|
||||
uint32_t length = (uint32_t) srcSize;
|
||||
uint32_t offset = 0;
|
||||
while (offset < length) {
|
||||
auto keyValue = KeyValue::parseSerializedKeyAndValue(length - offset, src);
|
||||
keyValues.emplace_back(keyValue);
|
||||
|
||||
// advance offset/src
|
||||
offset += keyValue.serializedByteSize();
|
||||
src += keyValue.serializedByteSize();
|
||||
}
|
||||
}
|
||||
catch (const ReaderException& e) {
|
||||
qWarning() << e.what();
|
||||
}
|
||||
return keyValues;
|
||||
}
|
||||
|
||||
Images KTX::parseImages(const Header& header, size_t srcSize, const Byte* srcBytes) {
|
||||
Images images;
|
||||
auto currentPtr = srcBytes;
|
||||
auto numFaces = header.numberOfFaces;
|
||||
|
||||
// Keep identifying new mip as long as we can at list query the next imageSize
|
||||
while ((currentPtr - srcBytes) + sizeof(uint32_t) <= (srcSize)) {
|
||||
|
||||
// Grab the imageSize coming up
|
||||
size_t imageSize = *reinterpret_cast<const uint32_t*>(currentPtr);
|
||||
currentPtr += sizeof(uint32_t);
|
||||
|
||||
// If enough data ahead then capture the pointer
|
||||
if ((currentPtr - srcBytes) + imageSize <= (srcSize)) {
|
||||
auto padding = Header::evalPadding(imageSize);
|
||||
|
||||
if (numFaces == NUM_CUBEMAPFACES) {
|
||||
size_t faceSize = imageSize / NUM_CUBEMAPFACES;
|
||||
Image::FaceBytes faces(NUM_CUBEMAPFACES);
|
||||
for (uint32_t face = 0; face < NUM_CUBEMAPFACES; face++) {
|
||||
faces[face] = currentPtr;
|
||||
currentPtr += faceSize;
|
||||
}
|
||||
images.emplace_back(Image((uint32_t) faceSize, padding, faces));
|
||||
currentPtr += padding;
|
||||
} else {
|
||||
images.emplace_back(Image((uint32_t) imageSize, padding, currentPtr));
|
||||
currentPtr += imageSize + padding;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return images;
|
||||
}
|
||||
|
||||
std::unique_ptr<KTX> KTX::create(const StoragePointer& src) {
|
||||
if (!src) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!checkHeaderFromStorage(src->size(), src->data())) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<KTX> result(new KTX());
|
||||
result->resetStorage(src);
|
||||
|
||||
// read metadata
|
||||
result->_keyValues = parseKeyValues(result->getHeader()->bytesOfKeyValueData, result->getKeyValueData());
|
||||
|
||||
// populate image table
|
||||
result->_images = parseImages(*result->getHeader(), result->getTexelsDataSize(), result->getTexelsData());
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
171
libraries/ktx/src/ktx/Writer.cpp
Normal file
171
libraries/ktx/src/ktx/Writer.cpp
Normal file
|
@ -0,0 +1,171 @@
|
|||
//
|
||||
// Writer.cpp
|
||||
// ktx/src/ktx
|
||||
//
|
||||
// Created by Zach Pomerantz on 2/08/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "KTX.h"
|
||||
|
||||
|
||||
#include <QtGlobal>
|
||||
#include <QtCore/QDebug>
|
||||
#ifndef _MSC_VER
|
||||
#define NOEXCEPT noexcept
|
||||
#else
|
||||
#define NOEXCEPT
|
||||
#endif
|
||||
|
||||
namespace ktx {
|
||||
|
||||
class WriterException : public std::exception {
|
||||
public:
|
||||
WriterException(const std::string& explanation) : _explanation("KTX serialization error: " + explanation) {}
|
||||
const char* what() const NOEXCEPT override { return _explanation.c_str(); }
|
||||
private:
|
||||
const std::string _explanation;
|
||||
};
|
||||
|
||||
std::unique_ptr<KTX> KTX::create(const Header& header, const Images& images, const KeyValues& keyValues) {
|
||||
StoragePointer storagePointer;
|
||||
{
|
||||
auto storageSize = ktx::KTX::evalStorageSize(header, images, keyValues);
|
||||
auto memoryStorage = new storage::MemoryStorage(storageSize);
|
||||
ktx::KTX::write(memoryStorage->data(), memoryStorage->size(), header, images, keyValues);
|
||||
storagePointer.reset(memoryStorage);
|
||||
}
|
||||
return create(storagePointer);
|
||||
}
|
||||
|
||||
size_t KTX::evalStorageSize(const Header& header, const Images& images, const KeyValues& keyValues) {
|
||||
size_t storageSize = sizeof(Header);
|
||||
|
||||
if (!keyValues.empty()) {
|
||||
size_t keyValuesSize = KeyValue::serializedKeyValuesByteSize(keyValues);
|
||||
storageSize += keyValuesSize;
|
||||
}
|
||||
|
||||
auto numMips = header.getNumberOfLevels();
|
||||
for (uint32_t l = 0; l < numMips; l++) {
|
||||
if (images.size() > l) {
|
||||
storageSize += sizeof(uint32_t);
|
||||
storageSize += images[l]._imageSize;
|
||||
storageSize += Header::evalPadding(images[l]._imageSize);
|
||||
}
|
||||
}
|
||||
return storageSize;
|
||||
}
|
||||
|
||||
size_t KTX::write(Byte* destBytes, size_t destByteSize, const Header& header, const Images& srcImages, const KeyValues& keyValues) {
|
||||
// Check again that we have enough destination capacity
|
||||
if (!destBytes || (destByteSize < evalStorageSize(header, srcImages, keyValues))) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto currentDestPtr = destBytes;
|
||||
// Header
|
||||
auto destHeader = reinterpret_cast<Header*>(currentDestPtr);
|
||||
memcpy(currentDestPtr, &header, sizeof(Header));
|
||||
currentDestPtr += sizeof(Header);
|
||||
|
||||
// KeyValues
|
||||
if (!keyValues.empty()) {
|
||||
destHeader->bytesOfKeyValueData = (uint32_t) writeKeyValues(currentDestPtr, destByteSize - sizeof(Header), keyValues);
|
||||
} else {
|
||||
// Make sure the header contains the right bytesOfKeyValueData size
|
||||
destHeader->bytesOfKeyValueData = 0;
|
||||
}
|
||||
currentDestPtr += destHeader->bytesOfKeyValueData;
|
||||
|
||||
// Images
|
||||
auto destImages = writeImages(currentDestPtr, destByteSize - sizeof(Header) - destHeader->bytesOfKeyValueData, srcImages);
|
||||
// We chould check here that the amoutn of dest IMages generated is the same as the source
|
||||
|
||||
return destByteSize;
|
||||
}
|
||||
|
||||
uint32_t KeyValue::writeSerializedKeyAndValue(Byte* destBytes, uint32_t destByteSize, const KeyValue& keyval) {
|
||||
uint32_t keyvalSize = keyval.serializedByteSize();
|
||||
if (keyvalSize > destByteSize) {
|
||||
throw WriterException("invalid key-value size");
|
||||
}
|
||||
|
||||
*((uint32_t*) destBytes) = keyval._byteSize;
|
||||
|
||||
auto dest = destBytes + sizeof(uint32_t);
|
||||
|
||||
auto keySize = keyval._key.size() + 1; // Add 1 for the '\0' character at the end of the string
|
||||
memcpy(dest, keyval._key.data(), keySize);
|
||||
dest += keySize;
|
||||
|
||||
memcpy(dest, keyval._value.data(), keyval._value.size());
|
||||
|
||||
return keyvalSize;
|
||||
}
|
||||
|
||||
size_t KTX::writeKeyValues(Byte* destBytes, size_t destByteSize, const KeyValues& keyValues) {
|
||||
size_t writtenByteSize = 0;
|
||||
try {
|
||||
auto dest = destBytes;
|
||||
for (auto& keyval : keyValues) {
|
||||
size_t keyvalSize = KeyValue::writeSerializedKeyAndValue(dest, (uint32_t) (destByteSize - writtenByteSize), keyval);
|
||||
writtenByteSize += keyvalSize;
|
||||
dest += keyvalSize;
|
||||
}
|
||||
}
|
||||
catch (const WriterException& e) {
|
||||
qWarning() << e.what();
|
||||
}
|
||||
return writtenByteSize;
|
||||
}
|
||||
|
||||
Images KTX::writeImages(Byte* destBytes, size_t destByteSize, const Images& srcImages) {
|
||||
Images destImages;
|
||||
auto imagesDataPtr = destBytes;
|
||||
if (!imagesDataPtr) {
|
||||
return destImages;
|
||||
}
|
||||
auto allocatedImagesDataSize = destByteSize;
|
||||
size_t currentDataSize = 0;
|
||||
auto currentPtr = imagesDataPtr;
|
||||
|
||||
for (uint32_t l = 0; l < srcImages.size(); l++) {
|
||||
if (currentDataSize + sizeof(uint32_t) < allocatedImagesDataSize) {
|
||||
size_t imageSize = srcImages[l]._imageSize;
|
||||
*(reinterpret_cast<uint32_t*> (currentPtr)) = (uint32_t) imageSize;
|
||||
currentPtr += sizeof(uint32_t);
|
||||
currentDataSize += sizeof(uint32_t);
|
||||
|
||||
// If enough data ahead then capture the copy source pointer
|
||||
if (currentDataSize + imageSize <= (allocatedImagesDataSize)) {
|
||||
auto padding = Header::evalPadding(imageSize);
|
||||
|
||||
// Single face vs cubes
|
||||
if (srcImages[l]._numFaces == 1) {
|
||||
memcpy(currentPtr, srcImages[l]._faceBytes[0], imageSize);
|
||||
destImages.emplace_back(Image((uint32_t) imageSize, padding, currentPtr));
|
||||
currentPtr += imageSize;
|
||||
} else {
|
||||
Image::FaceBytes faceBytes(NUM_CUBEMAPFACES);
|
||||
auto faceSize = srcImages[l]._faceSize;
|
||||
for (int face = 0; face < NUM_CUBEMAPFACES; face++) {
|
||||
memcpy(currentPtr, srcImages[l]._faceBytes[face], faceSize);
|
||||
faceBytes[face] = currentPtr;
|
||||
currentPtr += faceSize;
|
||||
}
|
||||
destImages.emplace_back(Image(faceSize, padding, faceBytes));
|
||||
}
|
||||
|
||||
currentPtr += padding;
|
||||
currentDataSize += imageSize + padding;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return destImages;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET_NAME model-networking)
|
||||
setup_hifi_library()
|
||||
link_hifi_libraries(shared networking model fbx)
|
||||
link_hifi_libraries(shared networking model fbx ktx)
|
||||
|
||||
|
|
47
libraries/model-networking/src/model-networking/KTXCache.cpp
Normal file
47
libraries/model-networking/src/model-networking/KTXCache.cpp
Normal file
|
@ -0,0 +1,47 @@
|
|||
//
|
||||
// KTXCache.cpp
|
||||
// libraries/model-networking/src
|
||||
//
|
||||
// Created by Zach Pomerantz on 2/22/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "KTXCache.h"
|
||||
|
||||
#include <ktx/KTX.h>
|
||||
|
||||
using File = cache::File;
|
||||
using FilePointer = cache::FilePointer;
|
||||
|
||||
KTXCache::KTXCache(const std::string& dir, const std::string& ext) :
|
||||
FileCache(dir, ext) {
|
||||
initialize();
|
||||
}
|
||||
|
||||
KTXFilePointer KTXCache::writeFile(const char* data, Metadata&& metadata) {
|
||||
FilePointer file = FileCache::writeFile(data, std::move(metadata));
|
||||
return std::static_pointer_cast<KTXFile>(file);
|
||||
}
|
||||
|
||||
KTXFilePointer KTXCache::getFile(const Key& key) {
|
||||
return std::static_pointer_cast<KTXFile>(FileCache::getFile(key));
|
||||
}
|
||||
|
||||
std::unique_ptr<File> KTXCache::createFile(Metadata&& metadata, const std::string& filepath) {
|
||||
qCInfo(file_cache) << "Wrote KTX" << metadata.key.c_str();
|
||||
return std::unique_ptr<File>(new KTXFile(std::move(metadata), filepath));
|
||||
}
|
||||
|
||||
KTXFile::KTXFile(Metadata&& metadata, const std::string& filepath) :
|
||||
cache::File(std::move(metadata), filepath) {}
|
||||
|
||||
std::unique_ptr<ktx::KTX> KTXFile::getKTX() const {
|
||||
ktx::StoragePointer storage = std::make_shared<storage::FileStorage>(getFilepath().c_str());
|
||||
if (*storage) {
|
||||
return ktx::KTX::create(storage);
|
||||
}
|
||||
return {};
|
||||
}
|
51
libraries/model-networking/src/model-networking/KTXCache.h
Normal file
51
libraries/model-networking/src/model-networking/KTXCache.h
Normal file
|
@ -0,0 +1,51 @@
|
|||
//
|
||||
// KTXCache.h
|
||||
// libraries/model-networking/src
|
||||
//
|
||||
// Created by Zach Pomerantz 2/22/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_KTXCache_h
|
||||
#define hifi_KTXCache_h
|
||||
|
||||
#include <QUrl>
|
||||
|
||||
#include <FileCache.h>
|
||||
|
||||
namespace ktx {
|
||||
class KTX;
|
||||
}
|
||||
|
||||
class KTXFile;
|
||||
using KTXFilePointer = std::shared_ptr<KTXFile>;
|
||||
|
||||
class KTXCache : public cache::FileCache {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
KTXCache(const std::string& dir, const std::string& ext);
|
||||
|
||||
KTXFilePointer writeFile(const char* data, Metadata&& metadata);
|
||||
KTXFilePointer getFile(const Key& key);
|
||||
|
||||
protected:
|
||||
std::unique_ptr<cache::File> createFile(Metadata&& metadata, const std::string& filepath) override final;
|
||||
};
|
||||
|
||||
class KTXFile : public cache::File {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
std::unique_ptr<ktx::KTX> getKTX() const;
|
||||
|
||||
protected:
|
||||
friend class KTXCache;
|
||||
|
||||
KTXFile(Metadata&& metadata, const std::string& filepath);
|
||||
};
|
||||
|
||||
#endif // hifi_KTXCache_h
|
|
@ -18,27 +18,37 @@
|
|||
#include <QRunnable>
|
||||
#include <QThreadPool>
|
||||
#include <QImageReader>
|
||||
|
||||
#if DEBUG_DUMP_TEXTURE_LOADS
|
||||
#include <QtCore/QFile>
|
||||
#include <QtCore/QFileInfo>
|
||||
#endif
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/random.hpp>
|
||||
|
||||
#include <gpu/Batch.h>
|
||||
|
||||
#include <ktx/KTX.h>
|
||||
|
||||
#include <NumericalConstants.h>
|
||||
#include <shared/NsightHelpers.h>
|
||||
|
||||
#include <Finally.h>
|
||||
#include <PathUtils.h>
|
||||
|
||||
#include "ModelNetworkingLogging.h"
|
||||
#include <Trace.h>
|
||||
#include <StatTracker.h>
|
||||
|
||||
Q_LOGGING_CATEGORY(trace_resource_parse_image, "trace.resource.parse.image")
|
||||
Q_LOGGING_CATEGORY(trace_resource_parse_image_raw, "trace.resource.parse.image.raw")
|
||||
Q_LOGGING_CATEGORY(trace_resource_parse_image_ktx, "trace.resource.parse.image.ktx")
|
||||
|
||||
TextureCache::TextureCache() {
|
||||
const std::string TextureCache::KTX_DIRNAME { "ktx_cache" };
|
||||
const std::string TextureCache::KTX_EXT { "ktx" };
|
||||
|
||||
TextureCache::TextureCache() :
|
||||
_ktxCache(KTX_DIRNAME, KTX_EXT) {
|
||||
setUnusedResourceCacheSize(0);
|
||||
setObjectName("TextureCache");
|
||||
|
||||
|
@ -61,7 +71,7 @@ TextureCache::~TextureCache() {
|
|||
// this list taken from Ken Perlin's Improved Noise reference implementation (orig. in Java) at
|
||||
// http://mrl.nyu.edu/~perlin/noise/
|
||||
|
||||
const int permutation[256] =
|
||||
const int permutation[256] =
|
||||
{
|
||||
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225,
|
||||
140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6, 148,
|
||||
|
@ -108,7 +118,8 @@ const gpu::TexturePointer& TextureCache::getPermutationNormalTexture() {
|
|||
}
|
||||
|
||||
_permutationNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), 256, 2));
|
||||
_permutationNormalTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(data), data);
|
||||
_permutationNormalTexture->setStoredMipFormat(_permutationNormalTexture->getTexelFormat());
|
||||
_permutationNormalTexture->assignStoredMip(0, sizeof(data), data);
|
||||
}
|
||||
return _permutationNormalTexture;
|
||||
}
|
||||
|
@ -120,36 +131,40 @@ const unsigned char OPAQUE_BLACK[] = { 0x00, 0x00, 0x00, 0xFF };
|
|||
|
||||
const gpu::TexturePointer& TextureCache::getWhiteTexture() {
|
||||
if (!_whiteTexture) {
|
||||
_whiteTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
|
||||
_whiteTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
|
||||
_whiteTexture->setSource("TextureCache::_whiteTexture");
|
||||
_whiteTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
|
||||
_whiteTexture->setStoredMipFormat(_whiteTexture->getTexelFormat());
|
||||
_whiteTexture->assignStoredMip(0, sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
|
||||
}
|
||||
return _whiteTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getGrayTexture() {
|
||||
if (!_grayTexture) {
|
||||
_grayTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
|
||||
_grayTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
|
||||
_grayTexture->setSource("TextureCache::_grayTexture");
|
||||
_grayTexture->assignStoredMip(0, _grayTexture->getTexelFormat(), sizeof(OPAQUE_GRAY), OPAQUE_GRAY);
|
||||
_grayTexture->setStoredMipFormat(_grayTexture->getTexelFormat());
|
||||
_grayTexture->assignStoredMip(0, sizeof(OPAQUE_GRAY), OPAQUE_GRAY);
|
||||
}
|
||||
return _grayTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getBlueTexture() {
|
||||
if (!_blueTexture) {
|
||||
_blueTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
|
||||
_blueTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
|
||||
_blueTexture->setSource("TextureCache::_blueTexture");
|
||||
_blueTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
|
||||
_blueTexture->setStoredMipFormat(_blueTexture->getTexelFormat());
|
||||
_blueTexture->assignStoredMip(0, sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
|
||||
}
|
||||
return _blueTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getBlackTexture() {
|
||||
if (!_blackTexture) {
|
||||
_blackTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, 1, 1));
|
||||
_blackTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
|
||||
_blackTexture->setSource("TextureCache::_blackTexture");
|
||||
_blackTexture->assignStoredMip(0, _blackTexture->getTexelFormat(), sizeof(OPAQUE_BLACK), OPAQUE_BLACK);
|
||||
_blackTexture->setStoredMipFormat(_blackTexture->getTexelFormat());
|
||||
_blackTexture->assignStoredMip(0, sizeof(OPAQUE_BLACK), OPAQUE_BLACK);
|
||||
}
|
||||
return _blackTexture;
|
||||
}
|
||||
|
@ -173,6 +188,72 @@ NetworkTexturePointer TextureCache::getTexture(const QUrl& url, Type type, const
|
|||
return ResourceCache::getResource(url, QUrl(), &extra).staticCast<NetworkTexture>();
|
||||
}
|
||||
|
||||
gpu::TexturePointer TextureCache::getTextureByHash(const std::string& hash) {
|
||||
std::weak_ptr<gpu::Texture> weakPointer;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(_texturesByHashesMutex);
|
||||
weakPointer = _texturesByHashes[hash];
|
||||
}
|
||||
auto result = weakPointer.lock();
|
||||
if (result) {
|
||||
qCWarning(modelnetworking) << "QQQ Returning live texture for hash " << hash.c_str();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
gpu::TexturePointer TextureCache::cacheTextureByHash(const std::string& hash, const gpu::TexturePointer& texture) {
|
||||
gpu::TexturePointer result;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(_texturesByHashesMutex);
|
||||
result = _texturesByHashes[hash].lock();
|
||||
if (!result) {
|
||||
_texturesByHashes[hash] = texture;
|
||||
result = texture;
|
||||
} else {
|
||||
qCWarning(modelnetworking) << "QQQ Swapping out texture with previous live texture in hash " << hash.c_str();
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
gpu::TexturePointer getFallbackTextureForType(NetworkTexture::Type type) {
|
||||
gpu::TexturePointer result;
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
// Since this can be called on a background thread, there's a chance that the cache
|
||||
// will be destroyed by the time we request it
|
||||
if (!textureCache) {
|
||||
return result;
|
||||
}
|
||||
switch (type) {
|
||||
case NetworkTexture::DEFAULT_TEXTURE:
|
||||
case NetworkTexture::ALBEDO_TEXTURE:
|
||||
case NetworkTexture::ROUGHNESS_TEXTURE:
|
||||
case NetworkTexture::OCCLUSION_TEXTURE:
|
||||
result = textureCache->getWhiteTexture();
|
||||
break;
|
||||
|
||||
case NetworkTexture::NORMAL_TEXTURE:
|
||||
result = textureCache->getBlueTexture();
|
||||
break;
|
||||
|
||||
case NetworkTexture::EMISSIVE_TEXTURE:
|
||||
case NetworkTexture::LIGHTMAP_TEXTURE:
|
||||
result = textureCache->getBlackTexture();
|
||||
break;
|
||||
|
||||
case NetworkTexture::BUMP_TEXTURE:
|
||||
case NetworkTexture::SPECULAR_TEXTURE:
|
||||
case NetworkTexture::GLOSS_TEXTURE:
|
||||
case NetworkTexture::CUBE_TEXTURE:
|
||||
case NetworkTexture::CUSTOM_TEXTURE:
|
||||
case NetworkTexture::STRICT_TEXTURE:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
NetworkTexture::TextureLoaderFunc getTextureLoaderForType(NetworkTexture::Type type,
|
||||
const QVariantMap& options = QVariantMap()) {
|
||||
|
@ -219,11 +300,16 @@ NetworkTexture::TextureLoaderFunc getTextureLoaderForType(NetworkTexture::Type t
|
|||
return model::TextureUsage::createMetallicTextureFromImage;
|
||||
break;
|
||||
}
|
||||
case Type::STRICT_TEXTURE: {
|
||||
return model::TextureUsage::createStrict2DTextureFromImage;
|
||||
break;
|
||||
}
|
||||
case Type::CUSTOM_TEXTURE: {
|
||||
Q_ASSERT(false);
|
||||
return NetworkTexture::TextureLoaderFunc();
|
||||
break;
|
||||
}
|
||||
|
||||
case Type::DEFAULT_TEXTURE:
|
||||
default: {
|
||||
return model::TextureUsage::create2DTextureFromImage;
|
||||
|
@ -245,8 +331,8 @@ QSharedPointer<Resource> TextureCache::createResource(const QUrl& url, const QSh
|
|||
auto type = textureExtra ? textureExtra->type : Type::DEFAULT_TEXTURE;
|
||||
auto content = textureExtra ? textureExtra->content : QByteArray();
|
||||
auto maxNumPixels = textureExtra ? textureExtra->maxNumPixels : ABSOLUTE_MAX_TEXTURE_NUM_PIXELS;
|
||||
return QSharedPointer<Resource>(new NetworkTexture(url, type, content, maxNumPixels),
|
||||
&Resource::deleter);
|
||||
NetworkTexture* texture = new NetworkTexture(url, type, content, maxNumPixels);
|
||||
return QSharedPointer<Resource>(texture, &Resource::deleter);
|
||||
}
|
||||
|
||||
NetworkTexture::NetworkTexture(const QUrl& url, Type type, const QByteArray& content, int maxNumPixels) :
|
||||
|
@ -260,7 +346,6 @@ NetworkTexture::NetworkTexture(const QUrl& url, Type type, const QByteArray& con
|
|||
_loaded = true;
|
||||
}
|
||||
|
||||
std::string theName = url.toString().toStdString();
|
||||
// if we have content, load it after we have our self pointer
|
||||
if (!content.isEmpty()) {
|
||||
_startedLoading = true;
|
||||
|
@ -268,12 +353,6 @@ NetworkTexture::NetworkTexture(const QUrl& url, Type type, const QByteArray& con
|
|||
}
|
||||
}
|
||||
|
||||
NetworkTexture::NetworkTexture(const QUrl& url, const TextureLoaderFunc& textureLoader, const QByteArray& content) :
|
||||
NetworkTexture(url, CUSTOM_TEXTURE, content, ABSOLUTE_MAX_TEXTURE_NUM_PIXELS)
|
||||
{
|
||||
_textureLoader = textureLoader;
|
||||
}
|
||||
|
||||
NetworkTexture::TextureLoaderFunc NetworkTexture::getTextureLoader() const {
|
||||
if (_type == CUSTOM_TEXTURE) {
|
||||
return _textureLoader;
|
||||
|
@ -281,149 +360,6 @@ NetworkTexture::TextureLoaderFunc NetworkTexture::getTextureLoader() const {
|
|||
return getTextureLoaderForType(_type);
|
||||
}
|
||||
|
||||
|
||||
class ImageReader : public QRunnable {
|
||||
public:
|
||||
|
||||
ImageReader(const QWeakPointer<Resource>& resource, const QByteArray& data,
|
||||
const QUrl& url = QUrl(), int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);
|
||||
|
||||
virtual void run() override;
|
||||
|
||||
private:
|
||||
static void listSupportedImageFormats();
|
||||
|
||||
QWeakPointer<Resource> _resource;
|
||||
QUrl _url;
|
||||
QByteArray _content;
|
||||
int _maxNumPixels;
|
||||
};
|
||||
|
||||
void NetworkTexture::downloadFinished(const QByteArray& data) {
|
||||
// send the reader off to the thread pool
|
||||
QThreadPool::globalInstance()->start(new ImageReader(_self, data, _url));
|
||||
}
|
||||
|
||||
void NetworkTexture::loadContent(const QByteArray& content) {
|
||||
QThreadPool::globalInstance()->start(new ImageReader(_self, content, _url, _maxNumPixels));
|
||||
}
|
||||
|
||||
ImageReader::ImageReader(const QWeakPointer<Resource>& resource, const QByteArray& data,
|
||||
const QUrl& url, int maxNumPixels) :
|
||||
_resource(resource),
|
||||
_url(url),
|
||||
_content(data),
|
||||
_maxNumPixels(maxNumPixels)
|
||||
{
|
||||
#if DEBUG_DUMP_TEXTURE_LOADS
|
||||
static auto start = usecTimestampNow() / USECS_PER_MSEC;
|
||||
auto now = usecTimestampNow() / USECS_PER_MSEC - start;
|
||||
QString urlStr = _url.toString();
|
||||
auto dot = urlStr.lastIndexOf(".");
|
||||
QString outFileName = QString(QCryptographicHash::hash(urlStr.toLocal8Bit(), QCryptographicHash::Md5).toHex()) + urlStr.right(urlStr.length() - dot);
|
||||
QFile loadRecord("h:/textures/loads.txt");
|
||||
loadRecord.open(QFile::Text | QFile::Append | QFile::ReadWrite);
|
||||
loadRecord.write(QString("%1 %2\n").arg(now).arg(outFileName).toLocal8Bit());
|
||||
outFileName = "h:/textures/" + outFileName;
|
||||
QFileInfo outInfo(outFileName);
|
||||
if (!outInfo.exists()) {
|
||||
QFile outFile(outFileName);
|
||||
outFile.open(QFile::WriteOnly | QFile::Truncate);
|
||||
outFile.write(data);
|
||||
outFile.close();
|
||||
}
|
||||
#endif
|
||||
DependencyManager::get<StatTracker>()->incrementStat("PendingProcessing");
|
||||
}
|
||||
|
||||
void ImageReader::listSupportedImageFormats() {
|
||||
static std::once_flag once;
|
||||
std::call_once(once, []{
|
||||
auto supportedFormats = QImageReader::supportedImageFormats();
|
||||
qCDebug(modelnetworking) << "List of supported Image formats:" << supportedFormats.join(", ");
|
||||
});
|
||||
}
|
||||
|
||||
void ImageReader::run() {
|
||||
DependencyManager::get<StatTracker>()->decrementStat("PendingProcessing");
|
||||
|
||||
CounterStat counter("Processing");
|
||||
|
||||
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffff0000, 0, { { "url", _url.toString() } });
|
||||
auto originalPriority = QThread::currentThread()->priority();
|
||||
if (originalPriority == QThread::InheritPriority) {
|
||||
originalPriority = QThread::NormalPriority;
|
||||
}
|
||||
QThread::currentThread()->setPriority(QThread::LowPriority);
|
||||
Finally restorePriority([originalPriority]{
|
||||
QThread::currentThread()->setPriority(originalPriority);
|
||||
});
|
||||
|
||||
if (!_resource.data()) {
|
||||
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
|
||||
return;
|
||||
}
|
||||
listSupportedImageFormats();
|
||||
|
||||
// Help the QImage loader by extracting the image file format from the url filename ext.
|
||||
// Some tga are not created properly without it.
|
||||
auto filename = _url.fileName().toStdString();
|
||||
auto filenameExtension = filename.substr(filename.find_last_of('.') + 1);
|
||||
QImage image = QImage::fromData(_content, filenameExtension.c_str());
|
||||
|
||||
// Note that QImage.format is the pixel format which is different from the "format" of the image file...
|
||||
auto imageFormat = image.format();
|
||||
int imageWidth = image.width();
|
||||
int imageHeight = image.height();
|
||||
|
||||
if (imageWidth == 0 || imageHeight == 0 || imageFormat == QImage::Format_Invalid) {
|
||||
if (filenameExtension.empty()) {
|
||||
qCDebug(modelnetworking) << "QImage failed to create from content, no file extension:" << _url;
|
||||
} else {
|
||||
qCDebug(modelnetworking) << "QImage failed to create from content" << _url;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (imageWidth * imageHeight > _maxNumPixels) {
|
||||
float scaleFactor = sqrtf(_maxNumPixels / (float)(imageWidth * imageHeight));
|
||||
int originalWidth = imageWidth;
|
||||
int originalHeight = imageHeight;
|
||||
imageWidth = (int)(scaleFactor * (float)imageWidth + 0.5f);
|
||||
imageHeight = (int)(scaleFactor * (float)imageHeight + 0.5f);
|
||||
QImage newImage = image.scaled(QSize(imageWidth, imageHeight), Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||
image.swap(newImage);
|
||||
qCDebug(modelnetworking) << "Downscale image" << _url
|
||||
<< "from" << originalWidth << "x" << originalHeight
|
||||
<< "to" << imageWidth << "x" << imageHeight;
|
||||
}
|
||||
|
||||
gpu::TexturePointer texture = nullptr;
|
||||
{
|
||||
// Double-check the resource still exists between long operations.
|
||||
auto resource = _resource.toStrongRef();
|
||||
if (!resource) {
|
||||
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
|
||||
return;
|
||||
}
|
||||
|
||||
auto url = _url.toString().toStdString();
|
||||
|
||||
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffffff00, 0);
|
||||
texture.reset(resource.dynamicCast<NetworkTexture>()->getTextureLoader()(image, url));
|
||||
}
|
||||
|
||||
// Ensure the resource has not been deleted
|
||||
auto resource = _resource.toStrongRef();
|
||||
if (!resource) {
|
||||
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
|
||||
} else {
|
||||
QMetaObject::invokeMethod(resource.data(), "setImage",
|
||||
Q_ARG(gpu::TexturePointer, texture),
|
||||
Q_ARG(int, imageWidth), Q_ARG(int, imageHeight));
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkTexture::setImage(gpu::TexturePointer texture, int originalWidth,
|
||||
int originalHeight) {
|
||||
_originalWidth = originalWidth;
|
||||
|
@ -446,3 +382,231 @@ void NetworkTexture::setImage(gpu::TexturePointer texture, int originalWidth,
|
|||
|
||||
emit networkTextureCreated(qWeakPointerCast<NetworkTexture, Resource> (_self));
|
||||
}
|
||||
|
||||
gpu::TexturePointer NetworkTexture::getFallbackTexture() const {
|
||||
if (_type == CUSTOM_TEXTURE) {
|
||||
return gpu::TexturePointer();
|
||||
}
|
||||
return getFallbackTextureForType(_type);
|
||||
}
|
||||
|
||||
class Reader : public QRunnable {
|
||||
public:
|
||||
Reader(const QWeakPointer<Resource>& resource, const QUrl& url);
|
||||
void run() override final;
|
||||
virtual void read() = 0;
|
||||
|
||||
protected:
|
||||
QWeakPointer<Resource> _resource;
|
||||
QUrl _url;
|
||||
};
|
||||
|
||||
class ImageReader : public Reader {
|
||||
public:
|
||||
ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url,
|
||||
const QByteArray& data, const std::string& hash, int maxNumPixels);
|
||||
void read() override final;
|
||||
|
||||
private:
|
||||
static void listSupportedImageFormats();
|
||||
|
||||
QByteArray _content;
|
||||
std::string _hash;
|
||||
int _maxNumPixels;
|
||||
};
|
||||
|
||||
void NetworkTexture::downloadFinished(const QByteArray& data) {
|
||||
loadContent(data);
|
||||
}
|
||||
|
||||
void NetworkTexture::loadContent(const QByteArray& content) {
|
||||
// Hash the source image to for KTX caching
|
||||
std::string hash;
|
||||
{
|
||||
QCryptographicHash hasher(QCryptographicHash::Md5);
|
||||
hasher.addData(content);
|
||||
hash = hasher.result().toHex().toStdString();
|
||||
}
|
||||
|
||||
auto textureCache = static_cast<TextureCache*>(_cache.data());
|
||||
|
||||
if (textureCache != nullptr) {
|
||||
// If we already have a live texture with the same hash, use it
|
||||
auto texture = textureCache->getTextureByHash(hash);
|
||||
|
||||
// If there is no live texture, check if there's an existing KTX file
|
||||
if (!texture) {
|
||||
KTXFilePointer ktxFile = textureCache->_ktxCache.getFile(hash);
|
||||
if (ktxFile) {
|
||||
// Ensure that the KTX deserialization worked
|
||||
auto ktx = ktxFile->getKTX();
|
||||
if (ktx) {
|
||||
texture.reset(gpu::Texture::unserialize(ktx));
|
||||
// Ensure that the texture population worked
|
||||
if (texture) {
|
||||
texture->setKtxBacking(ktx);
|
||||
texture = textureCache->cacheTextureByHash(hash, texture);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we found the texture either because it's in use or via KTX deserialization,
|
||||
// set the image and return immediately.
|
||||
if (texture) {
|
||||
setImage(texture, texture->getWidth(), texture->getHeight());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// We failed to find an existing live or KTX texture, so trigger an image reader
|
||||
QThreadPool::globalInstance()->start(new ImageReader(_self, _url, content, hash, _maxNumPixels));
|
||||
}
|
||||
|
||||
Reader::Reader(const QWeakPointer<Resource>& resource, const QUrl& url) :
|
||||
_resource(resource), _url(url) {
|
||||
DependencyManager::get<StatTracker>()->incrementStat("PendingProcessing");
|
||||
}
|
||||
|
||||
void Reader::run() {
|
||||
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffff0000, 0, { { "url", _url.toString() } });
|
||||
DependencyManager::get<StatTracker>()->decrementStat("PendingProcessing");
|
||||
CounterStat counter("Processing");
|
||||
|
||||
auto originalPriority = QThread::currentThread()->priority();
|
||||
if (originalPriority == QThread::InheritPriority) {
|
||||
originalPriority = QThread::NormalPriority;
|
||||
}
|
||||
QThread::currentThread()->setPriority(QThread::LowPriority);
|
||||
Finally restorePriority([originalPriority]{ QThread::currentThread()->setPriority(originalPriority); });
|
||||
|
||||
if (!_resource.data()) {
|
||||
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
|
||||
return;
|
||||
}
|
||||
|
||||
read();
|
||||
}
|
||||
|
||||
ImageReader::ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url,
|
||||
const QByteArray& data, const std::string& hash, int maxNumPixels) :
|
||||
Reader(resource, url), _content(data), _hash(hash), _maxNumPixels(maxNumPixels) {
|
||||
listSupportedImageFormats();
|
||||
|
||||
#if DEBUG_DUMP_TEXTURE_LOADS
|
||||
static auto start = usecTimestampNow() / USECS_PER_MSEC;
|
||||
auto now = usecTimestampNow() / USECS_PER_MSEC - start;
|
||||
QString urlStr = _url.toString();
|
||||
auto dot = urlStr.lastIndexOf(".");
|
||||
QString outFileName = QString(QCryptographicHash::hash(urlStr.toLocal8Bit(), QCryptographicHash::Md5).toHex()) + urlStr.right(urlStr.length() - dot);
|
||||
QFile loadRecord("h:/textures/loads.txt");
|
||||
loadRecord.open(QFile::Text | QFile::Append | QFile::ReadWrite);
|
||||
loadRecord.write(QString("%1 %2\n").arg(now).arg(outFileName).toLocal8Bit());
|
||||
outFileName = "h:/textures/" + outFileName;
|
||||
QFileInfo outInfo(outFileName);
|
||||
if (!outInfo.exists()) {
|
||||
QFile outFile(outFileName);
|
||||
outFile.open(QFile::WriteOnly | QFile::Truncate);
|
||||
outFile.write(data);
|
||||
outFile.close();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void ImageReader::listSupportedImageFormats() {
|
||||
static std::once_flag once;
|
||||
std::call_once(once, []{
|
||||
auto supportedFormats = QImageReader::supportedImageFormats();
|
||||
qCDebug(modelnetworking) << "List of supported Image formats:" << supportedFormats.join(", ");
|
||||
});
|
||||
}
|
||||
|
||||
void ImageReader::read() {
|
||||
// Help the QImage loader by extracting the image file format from the url filename ext.
|
||||
// Some tga are not created properly without it.
|
||||
auto filename = _url.fileName().toStdString();
|
||||
auto filenameExtension = filename.substr(filename.find_last_of('.') + 1);
|
||||
QImage image = QImage::fromData(_content, filenameExtension.c_str());
|
||||
int imageWidth = image.width();
|
||||
int imageHeight = image.height();
|
||||
|
||||
// Validate that the image loaded
|
||||
if (imageWidth == 0 || imageHeight == 0 || image.format() == QImage::Format_Invalid) {
|
||||
QString reason(filenameExtension.empty() ? "" : "(no file extension)");
|
||||
qCWarning(modelnetworking) << "Failed to load" << _url << reason;
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate the image is less than _maxNumPixels, and downscale if necessary
|
||||
if (imageWidth * imageHeight > _maxNumPixels) {
|
||||
float scaleFactor = sqrtf(_maxNumPixels / (float)(imageWidth * imageHeight));
|
||||
int originalWidth = imageWidth;
|
||||
int originalHeight = imageHeight;
|
||||
imageWidth = (int)(scaleFactor * (float)imageWidth + 0.5f);
|
||||
imageHeight = (int)(scaleFactor * (float)imageHeight + 0.5f);
|
||||
QImage newImage = image.scaled(QSize(imageWidth, imageHeight), Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||
image.swap(newImage);
|
||||
qCDebug(modelnetworking).nospace() << "Downscaled " << _url << " (" <<
|
||||
QSize(originalWidth, originalHeight) << " to " <<
|
||||
QSize(imageWidth, imageHeight) << ")";
|
||||
}
|
||||
|
||||
gpu::TexturePointer texture = nullptr;
|
||||
{
|
||||
auto resource = _resource.lock(); // to ensure the resource is still needed
|
||||
if (!resource) {
|
||||
qCDebug(modelnetworking) << _url << "loading stopped; resource out of scope";
|
||||
return;
|
||||
}
|
||||
|
||||
auto url = _url.toString().toStdString();
|
||||
|
||||
PROFILE_RANGE_EX(resource_parse_image_raw, __FUNCTION__, 0xffff0000, 0);
|
||||
// Load the image into a gpu::Texture
|
||||
auto networkTexture = resource.staticCast<NetworkTexture>();
|
||||
texture.reset(networkTexture->getTextureLoader()(image, url));
|
||||
texture->setSource(url);
|
||||
if (texture) {
|
||||
texture->setFallbackTexture(networkTexture->getFallbackTexture());
|
||||
}
|
||||
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
// Save the image into a KTXFile
|
||||
auto memKtx = gpu::Texture::serialize(*texture);
|
||||
if (!memKtx) {
|
||||
qCWarning(modelnetworking) << "Unable to serialize texture to KTX " << _url;
|
||||
}
|
||||
|
||||
if (memKtx && textureCache) {
|
||||
const char* data = reinterpret_cast<const char*>(memKtx->_storage->data());
|
||||
size_t length = memKtx->_storage->size();
|
||||
KTXFilePointer file;
|
||||
auto& ktxCache = textureCache->_ktxCache;
|
||||
if (!memKtx || !(file = ktxCache.writeFile(data, KTXCache::Metadata(_hash, length)))) {
|
||||
qCWarning(modelnetworking) << _url << "file cache failed";
|
||||
} else {
|
||||
resource.staticCast<NetworkTexture>()->_file = file;
|
||||
auto fileKtx = file->getKTX();
|
||||
if (fileKtx) {
|
||||
texture->setKtxBacking(fileKtx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We replace the texture with the one stored in the cache. This deals with the possible race condition of two different
|
||||
// images with the same hash being loaded concurrently. Only one of them will make it into the cache by hash first and will
|
||||
// be the winner
|
||||
if (textureCache) {
|
||||
texture = textureCache->cacheTextureByHash(_hash, texture);
|
||||
}
|
||||
}
|
||||
|
||||
auto resource = _resource.lock(); // to ensure the resource is still needed
|
||||
if (resource) {
|
||||
QMetaObject::invokeMethod(resource.data(), "setImage",
|
||||
Q_ARG(gpu::TexturePointer, texture),
|
||||
Q_ARG(int, imageWidth), Q_ARG(int, imageHeight));
|
||||
} else {
|
||||
qCDebug(modelnetworking) << _url << "loading stopped; resource out of scope";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <ResourceCache.h>
|
||||
#include <model/TextureMap.h>
|
||||
|
||||
#include "KTXCache.h"
|
||||
|
||||
const int ABSOLUTE_MAX_TEXTURE_NUM_PIXELS = 8192 * 8192;
|
||||
|
||||
namespace gpu {
|
||||
|
@ -43,6 +45,7 @@ class NetworkTexture : public Resource, public Texture {
|
|||
public:
|
||||
enum Type {
|
||||
DEFAULT_TEXTURE,
|
||||
STRICT_TEXTURE,
|
||||
ALBEDO_TEXTURE,
|
||||
NORMAL_TEXTURE,
|
||||
BUMP_TEXTURE,
|
||||
|
@ -63,7 +66,6 @@ public:
|
|||
using TextureLoaderFunc = std::function<TextureLoader>;
|
||||
|
||||
NetworkTexture(const QUrl& url, Type type, const QByteArray& content, int maxNumPixels);
|
||||
NetworkTexture(const QUrl& url, const TextureLoaderFunc& textureLoader, const QByteArray& content);
|
||||
|
||||
QString getType() const override { return "NetworkTexture"; }
|
||||
|
||||
|
@ -74,12 +76,12 @@ public:
|
|||
Type getTextureType() const { return _type; }
|
||||
|
||||
TextureLoaderFunc getTextureLoader() const;
|
||||
gpu::TexturePointer getFallbackTexture() const;
|
||||
|
||||
signals:
|
||||
void networkTextureCreated(const QWeakPointer<NetworkTexture>& self);
|
||||
|
||||
protected:
|
||||
|
||||
virtual bool isCacheable() const override { return _loaded; }
|
||||
|
||||
virtual void downloadFinished(const QByteArray& data) override;
|
||||
|
@ -88,8 +90,12 @@ protected:
|
|||
Q_INVOKABLE void setImage(gpu::TexturePointer texture, int originalWidth, int originalHeight);
|
||||
|
||||
private:
|
||||
friend class KTXReader;
|
||||
friend class ImageReader;
|
||||
|
||||
Type _type;
|
||||
TextureLoaderFunc _textureLoader { [](const QImage&, const std::string&){ return nullptr; } };
|
||||
KTXFilePointer _file;
|
||||
int _originalWidth { 0 };
|
||||
int _originalHeight { 0 };
|
||||
int _width { 0 };
|
||||
|
@ -131,6 +137,10 @@ public:
|
|||
NetworkTexturePointer getTexture(const QUrl& url, Type type = Type::DEFAULT_TEXTURE,
|
||||
const QByteArray& content = QByteArray(), int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);
|
||||
|
||||
|
||||
gpu::TexturePointer getTextureByHash(const std::string& hash);
|
||||
gpu::TexturePointer cacheTextureByHash(const std::string& hash, const gpu::TexturePointer& texture);
|
||||
|
||||
protected:
|
||||
// Overload ResourceCache::prefetch to allow specifying texture type for loads
|
||||
Q_INVOKABLE ScriptableResource* prefetch(const QUrl& url, int type, int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);
|
||||
|
@ -139,9 +149,19 @@ protected:
|
|||
const void* extra) override;
|
||||
|
||||
private:
|
||||
friend class ImageReader;
|
||||
friend class NetworkTexture;
|
||||
friend class DilatableNetworkTexture;
|
||||
|
||||
TextureCache();
|
||||
virtual ~TextureCache();
|
||||
friend class DilatableNetworkTexture;
|
||||
|
||||
static const std::string KTX_DIRNAME;
|
||||
static const std::string KTX_EXT;
|
||||
KTXCache _ktxCache;
|
||||
// Map from image hashes to texture weak pointers
|
||||
std::unordered_map<std::string, std::weak_ptr<gpu::Texture>> _texturesByHashes;
|
||||
std::mutex _texturesByHashesMutex;
|
||||
|
||||
gpu::TexturePointer _permutationNormalTexture;
|
||||
gpu::TexturePointer _whiteTexture;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
set(TARGET_NAME model)
|
||||
AUTOSCRIBE_SHADER_LIB(gpu model)
|
||||
setup_hifi_library()
|
||||
link_hifi_libraries(shared gpu)
|
||||
link_hifi_libraries(shared ktx gpu)
|
||||
|
||||
|
|
|
@ -10,10 +10,15 @@
|
|||
//
|
||||
#include "TextureMap.h"
|
||||
|
||||
#include <ktx/KTX.h>
|
||||
|
||||
#include <QImage>
|
||||
#include <QPainter>
|
||||
#include <QDebug>
|
||||
|
||||
#include <QStandardPaths>
|
||||
#include <QFileInfo>
|
||||
#include <QDir>
|
||||
#include <QCryptographicHash>
|
||||
#include <Profile.h>
|
||||
|
||||
#include "ModelLogging.h"
|
||||
|
@ -149,7 +154,7 @@ const QImage TextureUsage::process2DImageColor(const QImage& srcImage, bool& val
|
|||
return image;
|
||||
}
|
||||
|
||||
void TextureUsage::defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
|
||||
void TextureUsage::defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
|
||||
const QImage& image, bool isLinear, bool doCompress) {
|
||||
|
||||
#ifdef COMPRESS_TEXTURES
|
||||
|
@ -202,7 +207,7 @@ const QImage& image, bool isLinear, bool doCompress) {
|
|||
|
||||
#define CPU_MIPMAPS 1
|
||||
|
||||
void generateMips(gpu::Texture* texture, QImage& image, gpu::Element formatMip, bool fastResize) {
|
||||
void generateMips(gpu::Texture* texture, QImage& image, bool fastResize) {
|
||||
#if CPU_MIPMAPS
|
||||
PROFILE_RANGE(resource_parse, "generateMips");
|
||||
auto numMips = texture->evalNumMips();
|
||||
|
@ -210,32 +215,33 @@ void generateMips(gpu::Texture* texture, QImage& image, gpu::Element formatMip,
|
|||
QSize mipSize(texture->evalMipWidth(level), texture->evalMipHeight(level));
|
||||
if (fastResize) {
|
||||
image = image.scaled(mipSize);
|
||||
texture->assignStoredMip(level, formatMip, image.byteCount(), image.constBits());
|
||||
texture->assignStoredMip(level, image.byteCount(), image.constBits());
|
||||
} else {
|
||||
QImage mipImage = image.scaled(mipSize, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||
texture->assignStoredMip(level, formatMip, mipImage.byteCount(), mipImage.constBits());
|
||||
texture->assignStoredMip(level, mipImage.byteCount(), mipImage.constBits());
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
texture->autoGenerateMips(-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
void generateFaceMips(gpu::Texture* texture, QImage& image, gpu::Element formatMip, uint8 face) {
|
||||
void generateFaceMips(gpu::Texture* texture, QImage& image, uint8 face) {
|
||||
#if CPU_MIPMAPS
|
||||
PROFILE_RANGE(resource_parse, "generateFaceMips");
|
||||
auto numMips = texture->evalNumMips();
|
||||
for (uint16 level = 1; level < numMips; ++level) {
|
||||
QSize mipSize(texture->evalMipWidth(level), texture->evalMipHeight(level));
|
||||
QImage mipImage = image.scaled(mipSize, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||
texture->assignStoredMipFace(level, formatMip, mipImage.byteCount(), mipImage.constBits(), face);
|
||||
texture->assignStoredMipFace(level, face, mipImage.byteCount(), mipImage.constBits());
|
||||
}
|
||||
#else
|
||||
texture->autoGenerateMips(-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips) {
|
||||
gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict) {
|
||||
PROFILE_RANGE(resource_parse, "process2DTextureColorFromImage");
|
||||
bool validAlpha = false;
|
||||
bool alphaAsMask = true;
|
||||
|
@ -248,7 +254,11 @@ gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImag
|
|||
gpu::Element formatMip;
|
||||
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
if (isStrict) {
|
||||
theTexture = (gpu::Texture::createStrict(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
} else {
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
}
|
||||
theTexture->setSource(srcImageName);
|
||||
auto usage = gpu::Texture::Usage::Builder().withColor();
|
||||
if (validAlpha) {
|
||||
|
@ -258,22 +268,26 @@ gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImag
|
|||
}
|
||||
}
|
||||
theTexture->setUsage(usage.build());
|
||||
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||
|
||||
if (generateMips) {
|
||||
::generateMips(theTexture, image, formatMip, false);
|
||||
::generateMips(theTexture, image, false);
|
||||
}
|
||||
theTexture->setSource(srcImageName);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
}
|
||||
|
||||
gpu::Texture* TextureUsage::createStrict2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true, true);
|
||||
}
|
||||
|
||||
gpu::Texture* TextureUsage::create2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true);
|
||||
}
|
||||
|
||||
|
||||
gpu::Texture* TextureUsage::createAlbedoTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
|
||||
}
|
||||
|
@ -291,21 +305,25 @@ gpu::Texture* TextureUsage::createNormalTextureFromNormalImage(const QImage& src
|
|||
PROFILE_RANGE(resource_parse, "createNormalTextureFromNormalImage");
|
||||
QImage image = processSourceImage(srcImage, false);
|
||||
|
||||
// Make sure the normal map source image is RGBA32
|
||||
if (image.format() != QImage::Format_RGBA8888) {
|
||||
image = image.convertToFormat(QImage::Format_RGBA8888);
|
||||
// Make sure the normal map source image is ARGB32
|
||||
if (image.format() != QImage::Format_ARGB32) {
|
||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||
}
|
||||
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
|
||||
gpu::Element formatMip = gpu::Element::COLOR_BGRA_32;
|
||||
gpu::Element formatGPU = gpu::Element::COLOR_RGBA_32;
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->setSource(srcImageName);
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
generateMips(theTexture, image, formatMip, true);
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||
generateMips(theTexture, image, true);
|
||||
|
||||
theTexture->setSource(srcImageName);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
|
@ -336,16 +354,17 @@ gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcIm
|
|||
const double pStrength = 2.0;
|
||||
int width = image.width();
|
||||
int height = image.height();
|
||||
QImage result(width, height, QImage::Format_RGB888);
|
||||
|
||||
|
||||
QImage result(width, height, QImage::Format_ARGB32);
|
||||
|
||||
for (int i = 0; i < width; i++) {
|
||||
const int iNextClamped = clampPixelCoordinate(i + 1, width - 1);
|
||||
const int iPrevClamped = clampPixelCoordinate(i - 1, width - 1);
|
||||
|
||||
|
||||
for (int j = 0; j < height; j++) {
|
||||
const int jNextClamped = clampPixelCoordinate(j + 1, height - 1);
|
||||
const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1);
|
||||
|
||||
|
||||
// surrounding pixels
|
||||
const QRgb topLeft = image.pixel(iPrevClamped, jPrevClamped);
|
||||
const QRgb top = image.pixel(iPrevClamped, j);
|
||||
|
@ -355,7 +374,7 @@ gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcIm
|
|||
const QRgb bottom = image.pixel(iNextClamped, j);
|
||||
const QRgb bottomLeft = image.pixel(iNextClamped, jPrevClamped);
|
||||
const QRgb left = image.pixel(i, jPrevClamped);
|
||||
|
||||
|
||||
// take their gray intensities
|
||||
// since it's a grayscale image, the value of each component RGB is the same
|
||||
const double tl = qRed(topLeft);
|
||||
|
@ -366,15 +385,15 @@ gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcIm
|
|||
const double b = qRed(bottom);
|
||||
const double bl = qRed(bottomLeft);
|
||||
const double l = qRed(left);
|
||||
|
||||
|
||||
// apply the sobel filter
|
||||
const double dX = (tr + pStrength * r + br) - (tl + pStrength * l + bl);
|
||||
const double dY = (bl + pStrength * b + br) - (tl + pStrength * t + tr);
|
||||
const double dZ = RGBA_MAX / pStrength;
|
||||
|
||||
|
||||
glm::vec3 v(dX, dY, dZ);
|
||||
glm::normalize(v);
|
||||
|
||||
|
||||
// convert to rgb from the value obtained computing the filter
|
||||
QRgb qRgbValue = qRgba(mapComponent(v.x), mapComponent(v.y), mapComponent(v.z), 1.0);
|
||||
result.setPixel(i, j, qRgbValue);
|
||||
|
@ -382,13 +401,19 @@ gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcIm
|
|||
}
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB);
|
||||
if ((result.width() > 0) && (result.height() > 0)) {
|
||||
|
||||
gpu::Element formatMip = gpu::Element::COLOR_BGRA_32;
|
||||
gpu::Element formatGPU = gpu::Element::COLOR_RGBA_32;
|
||||
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, result.width(), result.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->setSource(srcImageName);
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, result.byteCount(), result.constBits());
|
||||
generateMips(theTexture, result, true);
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->setSource(srcImageName);
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
|
@ -414,16 +439,17 @@ gpu::Texture* TextureUsage::createRoughnessTextureFromImage(const QImage& srcIma
|
|||
#ifdef COMPRESS_TEXTURES
|
||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
|
||||
#else
|
||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
|
||||
#endif
|
||||
gpu::Element formatMip = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatMip = gpu::Element::COLOR_R_8;
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->setSource(srcImageName);
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
generateMips(theTexture, image, formatMip, true);
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||
generateMips(theTexture, image, true);
|
||||
|
||||
// FIXME queue for transfer to GPU and block on completion
|
||||
theTexture->setSource(srcImageName);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
|
@ -444,27 +470,28 @@ gpu::Texture* TextureUsage::createRoughnessTextureFromGlossImage(const QImage& s
|
|||
|
||||
// Gloss turned into Rough
|
||||
image.invertPixels(QImage::InvertRgba);
|
||||
|
||||
|
||||
image = image.convertToFormat(QImage::Format_Grayscale8);
|
||||
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
|
||||
|
||||
#ifdef COMPRESS_TEXTURES
|
||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
|
||||
#else
|
||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
|
||||
#endif
|
||||
gpu::Element formatMip = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatMip = gpu::Element::COLOR_R_8;
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->setSource(srcImageName);
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
generateMips(theTexture, image, formatMip, true);
|
||||
|
||||
// FIXME queue for transfer to GPU and block on completion
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||
generateMips(theTexture, image, true);
|
||||
|
||||
theTexture->setSource(srcImageName);
|
||||
}
|
||||
|
||||
|
||||
return theTexture;
|
||||
}
|
||||
|
||||
|
@ -489,16 +516,17 @@ gpu::Texture* TextureUsage::createMetallicTextureFromImage(const QImage& srcImag
|
|||
#ifdef COMPRESS_TEXTURES
|
||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
|
||||
#else
|
||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
|
||||
#endif
|
||||
gpu::Element formatMip = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::RGB);
|
||||
gpu::Element formatMip = gpu::Element::COLOR_R_8;
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->setSource(srcImageName);
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
generateMips(theTexture, image, formatMip, true);
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||
generateMips(theTexture, image, true);
|
||||
|
||||
// FIXME queue for transfer to GPU and block on completion
|
||||
theTexture->setSource(srcImageName);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
|
@ -521,18 +549,18 @@ public:
|
|||
int _y = 0;
|
||||
bool _horizontalMirror = false;
|
||||
bool _verticalMirror = false;
|
||||
|
||||
|
||||
Face() {}
|
||||
Face(int x, int y, bool horizontalMirror, bool verticalMirror) : _x(x), _y(y), _horizontalMirror(horizontalMirror), _verticalMirror(verticalMirror) {}
|
||||
};
|
||||
|
||||
|
||||
Face _faceXPos;
|
||||
Face _faceXNeg;
|
||||
Face _faceYPos;
|
||||
Face _faceYNeg;
|
||||
Face _faceZPos;
|
||||
Face _faceZNeg;
|
||||
|
||||
|
||||
CubeLayout(int wr, int hr, Face fXP, Face fXN, Face fYP, Face fYN, Face fZP, Face fZN) :
|
||||
_type(FLAT),
|
||||
_widthRatio(wr),
|
||||
|
@ -775,7 +803,7 @@ gpu::Texture* TextureUsage::processCubeTextureColorFromImage(const QImage& srcIm
|
|||
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
|
||||
|
||||
// Find the layout of the cubemap in the 2D image
|
||||
// Use the original image size since processSourceImage may have altered the size / aspect ratio
|
||||
// Use the original image size since processSourceImage may have altered the size / aspect ratio
|
||||
int foundLayout = CubeLayout::findLayout(srcImage.width(), srcImage.height());
|
||||
|
||||
std::vector<QImage> faces;
|
||||
|
@ -810,11 +838,12 @@ gpu::Texture* TextureUsage::processCubeTextureColorFromImage(const QImage& srcIm
|
|||
if (faces.size() == gpu::Texture::NUM_FACES_PER_TYPE[gpu::Texture::TEX_CUBE]) {
|
||||
theTexture = gpu::Texture::createCube(formatGPU, faces[0].width(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||
theTexture->setSource(srcImageName);
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
int f = 0;
|
||||
for (auto& face : faces) {
|
||||
theTexture->assignStoredMipFace(0, formatMip, face.byteCount(), face.constBits(), f);
|
||||
theTexture->assignStoredMipFace(0, f, face.byteCount(), face.constBits());
|
||||
if (generateMips) {
|
||||
generateFaceMips(theTexture, face, formatMip, f);
|
||||
generateFaceMips(theTexture, face, f);
|
||||
}
|
||||
f++;
|
||||
}
|
||||
|
@ -829,6 +858,8 @@ gpu::Texture* TextureUsage::processCubeTextureColorFromImage(const QImage& srcIm
|
|||
PROFILE_RANGE(resource_parse, "generateIrradiance");
|
||||
theTexture->generateIrradiance();
|
||||
}
|
||||
|
||||
theTexture->setSource(srcImageName);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ public:
|
|||
int _environmentUsage = 0;
|
||||
|
||||
static gpu::Texture* create2DTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||
static gpu::Texture* createStrict2DTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||
static gpu::Texture* createAlbedoTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||
static gpu::Texture* createEmissiveTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||
static gpu::Texture* createNormalTextureFromNormalImage(const QImage& image, const std::string& srcImageName);
|
||||
|
@ -47,7 +48,7 @@ public:
|
|||
static const QImage process2DImageColor(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask);
|
||||
static void defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
|
||||
const QImage& srcImage, bool isLinear, bool doCompress);
|
||||
static gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips);
|
||||
static gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict = false);
|
||||
static gpu::Texture* processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool generateIrradiance);
|
||||
|
||||
};
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include "udt/PacketHeaders.h"
|
||||
#include "SharedUtil.h"
|
||||
#include "UUID.h"
|
||||
#include "ServerPathUtils.h"
|
||||
|
||||
#include <QtCore/QDataStream>
|
||||
|
||||
|
|
243
libraries/networking/src/FileCache.cpp
Normal file
243
libraries/networking/src/FileCache.cpp
Normal file
|
@ -0,0 +1,243 @@
|
|||
//
|
||||
// FileCache.cpp
|
||||
// libraries/model-networking/src
|
||||
//
|
||||
// Created by Zach Pomerantz on 2/21/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "FileCache.h"
|
||||
|
||||
#include <cstdio>
|
||||
#include <cassert>
|
||||
#include <fstream>
|
||||
#include <unordered_set>
|
||||
|
||||
#include <QDir>
|
||||
|
||||
#include <PathUtils.h>
|
||||
|
||||
Q_LOGGING_CATEGORY(file_cache, "hifi.file_cache", QtWarningMsg)
|
||||
|
||||
using namespace cache;
|
||||
|
||||
static const std::string MANIFEST_NAME = "manifest";
|
||||
|
||||
static const size_t BYTES_PER_MEGABYTES = 1024 * 1024;
|
||||
static const size_t BYTES_PER_GIGABYTES = 1024 * BYTES_PER_MEGABYTES;
|
||||
const size_t FileCache::DEFAULT_UNUSED_MAX_SIZE = 5 * BYTES_PER_GIGABYTES; // 5GB
|
||||
const size_t FileCache::MAX_UNUSED_MAX_SIZE = 100 * BYTES_PER_GIGABYTES; // 100GB
|
||||
const size_t FileCache::DEFAULT_OFFLINE_MAX_SIZE = 2 * BYTES_PER_GIGABYTES; // 2GB
|
||||
|
||||
void FileCache::setUnusedFileCacheSize(size_t unusedFilesMaxSize) {
|
||||
_unusedFilesMaxSize = std::min(unusedFilesMaxSize, MAX_UNUSED_MAX_SIZE);
|
||||
reserve(0);
|
||||
emit dirty();
|
||||
}
|
||||
|
||||
void FileCache::setOfflineFileCacheSize(size_t offlineFilesMaxSize) {
|
||||
_offlineFilesMaxSize = std::min(offlineFilesMaxSize, MAX_UNUSED_MAX_SIZE);
|
||||
}
|
||||
|
||||
FileCache::FileCache(const std::string& dirname, const std::string& ext, QObject* parent) :
|
||||
QObject(parent),
|
||||
_ext(ext),
|
||||
_dirname(dirname),
|
||||
_dirpath(PathUtils::getAppLocalDataFilePath(dirname.c_str()).toStdString()) {}
|
||||
|
||||
FileCache::~FileCache() {
|
||||
clear();
|
||||
}
|
||||
|
||||
void fileDeleter(File* file) {
|
||||
file->deleter();
|
||||
}
|
||||
|
||||
void FileCache::initialize() {
|
||||
QDir dir(_dirpath.c_str());
|
||||
|
||||
if (dir.exists()) {
|
||||
auto nameFilters = QStringList(("*." + _ext).c_str());
|
||||
auto filters = QDir::Filters(QDir::NoDotAndDotDot | QDir::Files);
|
||||
auto sort = QDir::SortFlags(QDir::Time);
|
||||
auto files = dir.entryList(nameFilters, filters, sort);
|
||||
|
||||
// load persisted files
|
||||
foreach(QString filename, files) {
|
||||
const Key key = filename.section('.', 0, 1).toStdString();
|
||||
const std::string filepath = dir.filePath(filename).toStdString();
|
||||
const size_t length = std::ifstream(filepath, std::ios::binary | std::ios::ate).tellg();
|
||||
addFile(Metadata(key, length), filepath);
|
||||
}
|
||||
|
||||
qCDebug(file_cache, "[%s] Initialized %s", _dirname.c_str(), _dirpath.c_str());
|
||||
} else {
|
||||
dir.mkpath(_dirpath.c_str());
|
||||
qCDebug(file_cache, "[%s] Created %s", _dirname.c_str(), _dirpath.c_str());
|
||||
}
|
||||
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
FilePointer FileCache::addFile(Metadata&& metadata, const std::string& filepath) {
|
||||
FilePointer file(createFile(std::move(metadata), filepath).release(), &fileDeleter);
|
||||
if (file) {
|
||||
_numTotalFiles += 1;
|
||||
_totalFilesSize += file->getLength();
|
||||
file->_cache = this;
|
||||
emit dirty();
|
||||
|
||||
Lock lock(_filesMutex);
|
||||
_files[file->getKey()] = file;
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
||||
FilePointer FileCache::writeFile(const char* data, File::Metadata&& metadata) {
|
||||
assert(_initialized);
|
||||
|
||||
std::string filepath = getFilepath(metadata.key);
|
||||
|
||||
Lock lock(_filesMutex);
|
||||
|
||||
// if file already exists, return it
|
||||
FilePointer file = getFile(metadata.key);
|
||||
if (file) {
|
||||
qCWarning(file_cache, "[%s] Attempted to overwrite %s", _dirname.c_str(), metadata.key.c_str());
|
||||
return file;
|
||||
}
|
||||
|
||||
// write the new file
|
||||
FILE* saveFile = fopen(filepath.c_str(), "wb");
|
||||
if (saveFile != nullptr && fwrite(data, metadata.length, 1, saveFile) && fclose(saveFile) == 0) {
|
||||
file = addFile(std::move(metadata), filepath);
|
||||
} else {
|
||||
qCWarning(file_cache, "[%s] Failed to write %s (%s)", _dirname.c_str(), metadata.key.c_str(), strerror(errno));
|
||||
errno = 0;
|
||||
}
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
FilePointer FileCache::getFile(const Key& key) {
|
||||
assert(_initialized);
|
||||
|
||||
FilePointer file;
|
||||
|
||||
Lock lock(_filesMutex);
|
||||
|
||||
// check if file exists
|
||||
const auto it = _files.find(key);
|
||||
if (it != _files.cend()) {
|
||||
file = it->second.lock();
|
||||
if (file) {
|
||||
// if it exists, it is active - remove it from the cache
|
||||
removeUnusedFile(file);
|
||||
qCDebug(file_cache, "[%s] Found %s", _dirname.c_str(), key.c_str());
|
||||
emit dirty();
|
||||
} else {
|
||||
// if not, remove the weak_ptr
|
||||
_files.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
std::string FileCache::getFilepath(const Key& key) {
|
||||
return _dirpath + '/' + key + '.' + _ext;
|
||||
}
|
||||
|
||||
void FileCache::addUnusedFile(const FilePointer file) {
|
||||
{
|
||||
Lock lock(_filesMutex);
|
||||
_files[file->getKey()] = file;
|
||||
}
|
||||
|
||||
reserve(file->getLength());
|
||||
file->_LRUKey = ++_lastLRUKey;
|
||||
|
||||
{
|
||||
Lock lock(_unusedFilesMutex);
|
||||
_unusedFiles.insert({ file->_LRUKey, file });
|
||||
_numUnusedFiles += 1;
|
||||
_unusedFilesSize += file->getLength();
|
||||
}
|
||||
|
||||
emit dirty();
|
||||
}
|
||||
|
||||
void FileCache::removeUnusedFile(const FilePointer file) {
|
||||
Lock lock(_unusedFilesMutex);
|
||||
const auto it = _unusedFiles.find(file->_LRUKey);
|
||||
if (it != _unusedFiles.cend()) {
|
||||
_unusedFiles.erase(it);
|
||||
_numUnusedFiles -= 1;
|
||||
_unusedFilesSize -= file->getLength();
|
||||
}
|
||||
}
|
||||
|
||||
void FileCache::reserve(size_t length) {
|
||||
Lock unusedLock(_unusedFilesMutex);
|
||||
while (!_unusedFiles.empty() &&
|
||||
_unusedFilesSize + length > _unusedFilesMaxSize) {
|
||||
auto it = _unusedFiles.begin();
|
||||
auto file = it->second;
|
||||
auto length = file->getLength();
|
||||
|
||||
unusedLock.unlock();
|
||||
{
|
||||
file->_cache = nullptr;
|
||||
Lock lock(_filesMutex);
|
||||
_files.erase(file->getKey());
|
||||
}
|
||||
unusedLock.lock();
|
||||
|
||||
_unusedFiles.erase(it);
|
||||
_numTotalFiles -= 1;
|
||||
_numUnusedFiles -= 1;
|
||||
_totalFilesSize -= length;
|
||||
_unusedFilesSize -= length;
|
||||
}
|
||||
}
|
||||
|
||||
void FileCache::clear() {
|
||||
Lock unusedFilesLock(_unusedFilesMutex);
|
||||
for (const auto& pair : _unusedFiles) {
|
||||
auto& file = pair.second;
|
||||
file->_cache = nullptr;
|
||||
|
||||
if (_totalFilesSize > _offlineFilesMaxSize) {
|
||||
_totalFilesSize -= file->getLength();
|
||||
} else {
|
||||
file->_shouldPersist = true;
|
||||
qCDebug(file_cache, "[%s] Persisting %s", _dirname.c_str(), file->getKey().c_str());
|
||||
}
|
||||
}
|
||||
_unusedFiles.clear();
|
||||
}
|
||||
|
||||
void File::deleter() {
|
||||
if (_cache) {
|
||||
FilePointer self(this, &fileDeleter);
|
||||
_cache->addUnusedFile(self);
|
||||
} else {
|
||||
deleteLater();
|
||||
}
|
||||
}
|
||||
|
||||
File::File(Metadata&& metadata, const std::string& filepath) :
|
||||
_key(std::move(metadata.key)),
|
||||
_length(metadata.length),
|
||||
_filepath(filepath) {}
|
||||
|
||||
File::~File() {
|
||||
QFile file(getFilepath().c_str());
|
||||
if (file.exists() && !_shouldPersist) {
|
||||
qCInfo(file_cache, "Unlinked %s", getFilepath().c_str());
|
||||
file.remove();
|
||||
}
|
||||
}
|
158
libraries/networking/src/FileCache.h
Normal file
158
libraries/networking/src/FileCache.h
Normal file
|
@ -0,0 +1,158 @@
|
|||
//
|
||||
// FileCache.h
|
||||
// libraries/networking/src
|
||||
//
|
||||
// Created by Zach Pomerantz on 2/21/2017.
|
||||
// Copyright 2017 High Fidelity, Inc. // // Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_FileCache_h
|
||||
#define hifi_FileCache_h
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <QObject>
|
||||
#include <QLoggingCategory>
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(file_cache)
|
||||
|
||||
namespace cache {
|
||||
|
||||
class File;
|
||||
using FilePointer = std::shared_ptr<File>;
|
||||
|
||||
class FileCache : public QObject {
|
||||
Q_OBJECT
|
||||
Q_PROPERTY(size_t numTotal READ getNumTotalFiles NOTIFY dirty)
|
||||
Q_PROPERTY(size_t numCached READ getNumCachedFiles NOTIFY dirty)
|
||||
Q_PROPERTY(size_t sizeTotal READ getSizeTotalFiles NOTIFY dirty)
|
||||
Q_PROPERTY(size_t sizeCached READ getSizeCachedFiles NOTIFY dirty)
|
||||
|
||||
static const size_t DEFAULT_UNUSED_MAX_SIZE;
|
||||
static const size_t MAX_UNUSED_MAX_SIZE;
|
||||
static const size_t DEFAULT_OFFLINE_MAX_SIZE;
|
||||
|
||||
public:
|
||||
size_t getNumTotalFiles() const { return _numTotalFiles; }
|
||||
size_t getNumCachedFiles() const { return _numUnusedFiles; }
|
||||
size_t getSizeTotalFiles() const { return _totalFilesSize; }
|
||||
size_t getSizeCachedFiles() const { return _unusedFilesSize; }
|
||||
|
||||
void setUnusedFileCacheSize(size_t unusedFilesMaxSize);
|
||||
size_t getUnusedFileCacheSize() const { return _unusedFilesSize; }
|
||||
|
||||
void setOfflineFileCacheSize(size_t offlineFilesMaxSize);
|
||||
|
||||
// initialize FileCache with a directory name (not a path, ex.: "temp_jpgs") and an ext (ex.: "jpg")
|
||||
FileCache(const std::string& dirname, const std::string& ext, QObject* parent = nullptr);
|
||||
virtual ~FileCache();
|
||||
|
||||
using Key = std::string;
|
||||
struct Metadata {
|
||||
Metadata(const Key& key, size_t length) :
|
||||
key(key), length(length) {}
|
||||
Key key;
|
||||
size_t length;
|
||||
};
|
||||
|
||||
// derived classes should implement a setter/getter, for example, for a FileCache backing a network cache:
|
||||
//
|
||||
// DerivedFilePointer writeFile(const char* data, DerivedMetadata&& metadata) {
|
||||
// return writeFile(data, std::forward(metadata));
|
||||
// }
|
||||
//
|
||||
// DerivedFilePointer getFile(const QUrl& url) {
|
||||
// auto key = lookup_hash_for(url); // assuming hashing url in create/evictedFile overrides
|
||||
// return getFile(key);
|
||||
// }
|
||||
|
||||
signals:
|
||||
void dirty();
|
||||
|
||||
protected:
|
||||
/// must be called after construction to create the cache on the fs and restore persisted files
|
||||
void initialize();
|
||||
|
||||
FilePointer writeFile(const char* data, Metadata&& metadata);
|
||||
FilePointer getFile(const Key& key);
|
||||
|
||||
/// create a file
|
||||
virtual std::unique_ptr<File> createFile(Metadata&& metadata, const std::string& filepath) = 0;
|
||||
|
||||
private:
|
||||
using Mutex = std::recursive_mutex;
|
||||
using Lock = std::unique_lock<Mutex>;
|
||||
|
||||
friend class File;
|
||||
|
||||
std::string getFilepath(const Key& key);
|
||||
|
||||
FilePointer addFile(Metadata&& metadata, const std::string& filepath);
|
||||
void addUnusedFile(const FilePointer file);
|
||||
void removeUnusedFile(const FilePointer file);
|
||||
void reserve(size_t length);
|
||||
void clear();
|
||||
|
||||
std::atomic<size_t> _numTotalFiles { 0 };
|
||||
std::atomic<size_t> _numUnusedFiles { 0 };
|
||||
std::atomic<size_t> _totalFilesSize { 0 };
|
||||
std::atomic<size_t> _unusedFilesSize { 0 };
|
||||
|
||||
std::string _ext;
|
||||
std::string _dirname;
|
||||
std::string _dirpath;
|
||||
bool _initialized { false };
|
||||
|
||||
std::unordered_map<Key, std::weak_ptr<File>> _files;
|
||||
Mutex _filesMutex;
|
||||
|
||||
std::map<int, FilePointer> _unusedFiles;
|
||||
Mutex _unusedFilesMutex;
|
||||
size_t _unusedFilesMaxSize { DEFAULT_UNUSED_MAX_SIZE };
|
||||
int _lastLRUKey { 0 };
|
||||
|
||||
size_t _offlineFilesMaxSize { DEFAULT_OFFLINE_MAX_SIZE };
|
||||
};
|
||||
|
||||
class File : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
using Key = FileCache::Key;
|
||||
using Metadata = FileCache::Metadata;
|
||||
|
||||
Key getKey() const { return _key; }
|
||||
size_t getLength() const { return _length; }
|
||||
std::string getFilepath() const { return _filepath; }
|
||||
|
||||
virtual ~File();
|
||||
/// overrides should call File::deleter to maintain caching behavior
|
||||
virtual void deleter();
|
||||
|
||||
protected:
|
||||
/// when constructed, the file has already been created/written
|
||||
File(Metadata&& metadata, const std::string& filepath);
|
||||
|
||||
private:
|
||||
friend class FileCache;
|
||||
|
||||
const Key _key;
|
||||
const size_t _length;
|
||||
const std::string _filepath;
|
||||
|
||||
FileCache* _cache;
|
||||
int _LRUKey { 0 };
|
||||
|
||||
bool _shouldPersist { false };
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // hifi_FileCache_h
|
|
@ -3,7 +3,7 @@ AUTOSCRIBE_SHADER_LIB(gpu model render)
|
|||
# pull in the resources.qrc file
|
||||
qt5_add_resources(QT_RESOURCES_FILE "${CMAKE_CURRENT_SOURCE_DIR}/res/fonts/fonts.qrc")
|
||||
setup_hifi_library(Widgets OpenGL Network Qml Quick Script)
|
||||
link_hifi_libraries(shared gpu model model-networking render animation fbx entities)
|
||||
link_hifi_libraries(shared ktx gpu model model-networking render animation fbx entities)
|
||||
|
||||
if (NOT ANDROID)
|
||||
target_nsight()
|
||||
|
|
|
@ -52,7 +52,7 @@ const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
|
|||
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
|
||||
auto format = gpu::Element::COLOR_SRGBA_32; // DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
_antialiasingTexture = gpu::TexturePointer(gpu::Texture::create2D(format, width, height, defaultSampler));
|
||||
_antialiasingTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(format, width, height, defaultSampler));
|
||||
_antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture);
|
||||
}
|
||||
|
||||
|
|
|
@ -53,9 +53,9 @@ void DeferredFramebuffer::allocate() {
|
|||
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
|
||||
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(linearFormat, width, height, defaultSampler));
|
||||
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
|
||||
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(colorFormat, width, height, defaultSampler));
|
||||
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(linearFormat, width, height, defaultSampler));
|
||||
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(colorFormat, width, height, defaultSampler));
|
||||
|
||||
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
|
||||
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
|
||||
|
@ -65,7 +65,7 @@ void DeferredFramebuffer::allocate() {
|
|||
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
if (!_primaryDepthTexture) {
|
||||
_primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, width, height, defaultSampler));
|
||||
_primaryDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(depthFormat, width, height, defaultSampler));
|
||||
}
|
||||
|
||||
_deferredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
@ -75,7 +75,7 @@ void DeferredFramebuffer::allocate() {
|
|||
|
||||
auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR);
|
||||
|
||||
_lightingTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, defaultSampler));
|
||||
_lightingTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, defaultSampler));
|
||||
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("lighting"));
|
||||
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
|
||||
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||
|
|
|
@ -496,14 +496,14 @@ void PreparePrimaryFramebuffer::run(const SceneContextPointer& sceneContext, con
|
|||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
|
||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||
auto primaryColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, frameSize.x, frameSize.y, defaultSampler));
|
||||
auto primaryColorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, defaultSampler));
|
||||
|
||||
|
||||
_primaryFramebuffer->setRenderBuffer(0, primaryColorTexture);
|
||||
|
||||
|
||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||
auto primaryDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, frameSize.x, frameSize.y, defaultSampler));
|
||||
auto primaryDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(depthFormat, frameSize.x, frameSize.y, defaultSampler));
|
||||
|
||||
_primaryFramebuffer->setDepthStencilBuffer(primaryDepthTexture, depthFormat);
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ float fetchRoughnessMap(vec2 uv) {
|
|||
uniform sampler2D normalMap;
|
||||
vec3 fetchNormalMap(vec2 uv) {
|
||||
// unpack normal, swizzle to get into hifi tangent space with Y axis pointing out
|
||||
return normalize(texture(normalMap, uv).xzy -vec3(0.5, 0.5, 0.5));
|
||||
return normalize(texture(normalMap, uv).rbg -vec3(0.5, 0.5, 0.5));
|
||||
}
|
||||
<@endif@>
|
||||
|
||||
|
|
|
@ -194,7 +194,7 @@ RenderDeferredTask::RenderDeferredTask(RenderFetchCullSortTask::Output items) {
|
|||
{
|
||||
// Grab a texture map representing the different status icons and assign that to the drawStatsuJob
|
||||
auto iconMapPath = PathUtils::resourcesPath() + "icons/statusIconAtlas.svg";
|
||||
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath);
|
||||
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath, NetworkTexture::STRICT_TEXTURE);
|
||||
addJob<DrawStatus>("DrawStatus", opaques, DrawStatus(statusIconMap));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -414,7 +414,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringProfile(Rend
|
|||
const int PROFILE_RESOLUTION = 512;
|
||||
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
|
||||
auto profileMap = gpu::TexturePointer(gpu::Texture::create2D(pixelFormat, PROFILE_RESOLUTION, 1, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
auto profileMap = gpu::TexturePointer(gpu::Texture::createRenderBuffer(pixelFormat, PROFILE_RESOLUTION, 1, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
profileMap->setSource("Generated Scattering Profile");
|
||||
diffuseProfileGPU(profileMap, args);
|
||||
return profileMap;
|
||||
|
@ -425,7 +425,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generatePreIntegratedScatterin
|
|||
const int TABLE_RESOLUTION = 512;
|
||||
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
|
||||
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
|
||||
auto scatteringLUT = gpu::TexturePointer(gpu::Texture::create2D(pixelFormat, TABLE_RESOLUTION, TABLE_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
auto scatteringLUT = gpu::TexturePointer(gpu::Texture::createRenderBuffer(pixelFormat, TABLE_RESOLUTION, TABLE_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
//diffuseScatter(scatteringLUT);
|
||||
scatteringLUT->setSource("Generated pre-integrated scattering");
|
||||
diffuseScatterGPU(profile, scatteringLUT, args);
|
||||
|
@ -434,7 +434,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generatePreIntegratedScatterin
|
|||
|
||||
gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringSpecularBeckmann(RenderArgs* args) {
|
||||
const int SPECULAR_RESOLUTION = 256;
|
||||
auto beckmannMap = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32 /*gpu::Element(gpu::SCALAR, gpu::HALF, gpu::RGB)*/, SPECULAR_RESOLUTION, SPECULAR_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
auto beckmannMap = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32 /*gpu::Element(gpu::SCALAR, gpu::HALF, gpu::RGB)*/, SPECULAR_RESOLUTION, SPECULAR_RESOLUTION, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
||||
beckmannMap->setSource("Generated beckmannMap");
|
||||
computeSpecularBeckmannGPU(beckmannMap, args);
|
||||
return beckmannMap;
|
||||
|
|
|
@ -72,18 +72,18 @@ void LinearDepthFramebuffer::allocate() {
|
|||
auto height = _frameSize.y;
|
||||
|
||||
// For Linear Depth:
|
||||
_linearDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), width, height,
|
||||
_linearDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), width, height,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_linearDepthFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("linearDepth"));
|
||||
_linearDepthFramebuffer->setRenderBuffer(0, _linearDepthTexture);
|
||||
_linearDepthFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat());
|
||||
|
||||
// For Downsampling:
|
||||
_halfLinearDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
|
||||
_halfLinearDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_halfLinearDepthTexture->autoGenerateMips(5);
|
||||
|
||||
_halfNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
|
||||
_halfNormalTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), _halfFrameSize.x, _halfFrameSize.y,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
|
||||
_downsampleFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("halfLinearDepth"));
|
||||
|
@ -304,15 +304,15 @@ void SurfaceGeometryFramebuffer::allocate() {
|
|||
auto width = _frameSize.x;
|
||||
auto height = _frameSize.y;
|
||||
|
||||
_curvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_curvatureTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::curvature"));
|
||||
_curvatureFramebuffer->setRenderBuffer(0, _curvatureTexture);
|
||||
|
||||
_lowCurvatureTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_lowCurvatureTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_lowCurvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::lowCurvature"));
|
||||
_lowCurvatureFramebuffer->setRenderBuffer(0, _lowCurvatureTexture);
|
||||
|
||||
_blurringTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_blurringTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
||||
_blurringFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::blurring"));
|
||||
_blurringFramebuffer->setRenderBuffer(0, _blurringTexture);
|
||||
}
|
||||
|
|
|
@ -209,7 +209,8 @@ void Font::read(QIODevice& in) {
|
|||
}
|
||||
_texture = gpu::TexturePointer(gpu::Texture::create2D(formatGPU, image.width(), image.height(),
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR)));
|
||||
_texture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
_texture->setStoredMipFormat(formatMip);
|
||||
_texture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||
}
|
||||
|
||||
void Font::setupGPU() {
|
||||
|
|
|
@ -3,6 +3,6 @@ AUTOSCRIBE_SHADER_LIB(gpu model)
|
|||
setup_hifi_library()
|
||||
|
||||
# render needs octree only for getAccuracyAngle(float, int)
|
||||
link_hifi_libraries(shared gpu model octree)
|
||||
link_hifi_libraries(shared ktx gpu model octree)
|
||||
|
||||
target_nsight()
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include <QtCore/QStandardPaths>
|
||||
#include <QtCore/QVariant>
|
||||
|
||||
#include "ServerPathUtils.h"
|
||||
#include "PathUtils.h"
|
||||
#include "SharedLogging.h"
|
||||
|
||||
QVariantMap HifiConfigVariantMap::mergeCLParametersWithJSONConfig(const QStringList& argumentList) {
|
||||
|
@ -127,7 +127,7 @@ void HifiConfigVariantMap::loadConfig(const QStringList& argumentList) {
|
|||
_userConfigFilename = argumentList[userConfigIndex + 1];
|
||||
} else {
|
||||
// we weren't passed a user config path
|
||||
_userConfigFilename = ServerPathUtils::getDataFilePath(USER_CONFIG_FILE_NAME);
|
||||
_userConfigFilename = PathUtils::getAppDataFilePath(USER_CONFIG_FILE_NAME);
|
||||
|
||||
// as of 1/19/2016 this path was moved so we attempt a migration for first run post migration here
|
||||
|
||||
|
@ -153,7 +153,7 @@ void HifiConfigVariantMap::loadConfig(const QStringList& argumentList) {
|
|||
// we have the old file and not the new file - time to copy the file
|
||||
|
||||
// make the destination directory if it doesn't exist
|
||||
auto dataDirectory = ServerPathUtils::getDataDirectory();
|
||||
auto dataDirectory = PathUtils::getAppDataPath();
|
||||
if (QDir().mkpath(dataDirectory)) {
|
||||
if (oldConfigFile.copy(_userConfigFilename)) {
|
||||
qCDebug(shared) << "Migrated config file from" << oldConfigFilename << "to" << _userConfigFilename;
|
||||
|
|
|
@ -30,18 +30,20 @@ const QString& PathUtils::resourcesPath() {
|
|||
return staticResourcePath;
|
||||
}
|
||||
|
||||
QString PathUtils::getRootDataDirectory() {
|
||||
auto dataPath = QStandardPaths::writableLocation(QStandardPaths::HomeLocation);
|
||||
QString PathUtils::getAppDataPath() {
|
||||
return QStandardPaths::writableLocation(QStandardPaths::AppDataLocation) + "/";
|
||||
}
|
||||
|
||||
#ifdef Q_OS_WIN
|
||||
dataPath += "/AppData/Roaming/";
|
||||
#elif defined(Q_OS_OSX)
|
||||
dataPath += "/Library/Application Support/";
|
||||
#else
|
||||
dataPath += "/.local/share/";
|
||||
#endif
|
||||
QString PathUtils::getAppLocalDataPath() {
|
||||
return QStandardPaths::writableLocation(QStandardPaths::AppLocalDataLocation) + "/";
|
||||
}
|
||||
|
||||
return dataPath;
|
||||
QString PathUtils::getAppDataFilePath(const QString& filename) {
|
||||
return QDir(getAppDataPath()).absoluteFilePath(filename);
|
||||
}
|
||||
|
||||
QString PathUtils::getAppLocalDataFilePath(const QString& filename) {
|
||||
return QDir(getAppLocalDataPath()).absoluteFilePath(filename);
|
||||
}
|
||||
|
||||
QString fileNameWithoutExtension(const QString& fileName, const QVector<QString> possibleExtensions) {
|
||||
|
|
|
@ -27,7 +27,12 @@ class PathUtils : public QObject, public Dependency {
|
|||
Q_PROPERTY(QString resources READ resourcesPath)
|
||||
public:
|
||||
static const QString& resourcesPath();
|
||||
static QString getRootDataDirectory();
|
||||
|
||||
static QString getAppDataPath();
|
||||
static QString getAppLocalDataPath();
|
||||
|
||||
static QString getAppDataFilePath(const QString& filename);
|
||||
static QString getAppLocalDataFilePath(const QString& filename);
|
||||
|
||||
static Qt::CaseSensitivity getFSCaseSensitivity();
|
||||
static QString stripFilename(const QUrl& url);
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
//
|
||||
// ServerPathUtils.cpp
|
||||
// libraries/shared/src
|
||||
//
|
||||
// Created by Ryan Huffman on 01/12/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "ServerPathUtils.h"
|
||||
|
||||
#include <QStandardPaths>
|
||||
#include <QtCore/QDir>
|
||||
#include <QtWidgets/qapplication.h>
|
||||
#include <QDebug>
|
||||
|
||||
#include "PathUtils.h"
|
||||
|
||||
QString ServerPathUtils::getDataDirectory() {
|
||||
auto dataPath = PathUtils::getRootDataDirectory();
|
||||
|
||||
dataPath += qApp->organizationName() + "/" + qApp->applicationName();
|
||||
|
||||
return QDir::cleanPath(dataPath);
|
||||
}
|
||||
|
||||
QString ServerPathUtils::getDataFilePath(QString filename) {
|
||||
return QDir(getDataDirectory()).absoluteFilePath(filename);
|
||||
}
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
//
|
||||
// ServerPathUtils.h
|
||||
// libraries/shared/src
|
||||
//
|
||||
// Created by Ryan Huffman on 01/12/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_ServerPathUtils_h
|
||||
#define hifi_ServerPathUtils_h
|
||||
|
||||
#include <QString>
|
||||
|
||||
namespace ServerPathUtils {
|
||||
QString getDataDirectory();
|
||||
QString getDataFilePath(QString filename);
|
||||
}
|
||||
|
||||
#endif // hifi_ServerPathUtils_h
|
92
libraries/shared/src/shared/Storage.cpp
Normal file
92
libraries/shared/src/shared/Storage.cpp
Normal file
|
@ -0,0 +1,92 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/02/17
|
||||
// Copyright 2013-2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "Storage.h"
|
||||
|
||||
#include <QtCore/QFileInfo>
|
||||
#include <QtCore/QDebug>
|
||||
#include <QtCore/QLoggingCategory>
|
||||
|
||||
Q_LOGGING_CATEGORY(storagelogging, "hifi.core.storage")
|
||||
|
||||
using namespace storage;
|
||||
|
||||
ViewStorage::ViewStorage(const storage::StoragePointer& owner, size_t size, const uint8_t* data)
|
||||
: _owner(owner), _size(size), _data(data) {}
|
||||
|
||||
StoragePointer Storage::createView(size_t viewSize, size_t offset) const {
|
||||
auto selfSize = size();
|
||||
if (0 == viewSize) {
|
||||
viewSize = selfSize;
|
||||
}
|
||||
if ((viewSize + offset) > selfSize) {
|
||||
throw std::runtime_error("Invalid mapping range");
|
||||
}
|
||||
return std::make_shared<ViewStorage>(shared_from_this(), viewSize, data() + offset);
|
||||
}
|
||||
|
||||
StoragePointer Storage::toMemoryStorage() const {
|
||||
return std::make_shared<MemoryStorage>(size(), data());
|
||||
}
|
||||
|
||||
StoragePointer Storage::toFileStorage(const QString& filename) const {
|
||||
return FileStorage::create(filename, size(), data());
|
||||
}
|
||||
|
||||
MemoryStorage::MemoryStorage(size_t size, const uint8_t* data) {
|
||||
_data.resize(size);
|
||||
if (data) {
|
||||
memcpy(_data.data(), data, size);
|
||||
}
|
||||
}
|
||||
|
||||
StoragePointer FileStorage::create(const QString& filename, size_t size, const uint8_t* data) {
|
||||
QFile file(filename);
|
||||
if (!file.open(QFile::ReadWrite | QIODevice::Truncate)) {
|
||||
throw std::runtime_error("Unable to open file for writing");
|
||||
}
|
||||
if (!file.resize(size)) {
|
||||
throw std::runtime_error("Unable to resize file");
|
||||
}
|
||||
{
|
||||
auto mapped = file.map(0, size);
|
||||
if (!mapped) {
|
||||
throw std::runtime_error("Unable to map file");
|
||||
}
|
||||
memcpy(mapped, data, size);
|
||||
if (!file.unmap(mapped)) {
|
||||
throw std::runtime_error("Unable to unmap file");
|
||||
}
|
||||
}
|
||||
file.close();
|
||||
return std::make_shared<FileStorage>(filename);
|
||||
}
|
||||
|
||||
FileStorage::FileStorage(const QString& filename) : _file(filename) {
|
||||
if (_file.open(QFile::ReadOnly)) {
|
||||
_mapped = _file.map(0, _file.size());
|
||||
if (_mapped) {
|
||||
_valid = true;
|
||||
} else {
|
||||
qCWarning(storagelogging) << "Failed to map file " << filename;
|
||||
}
|
||||
} else {
|
||||
qCWarning(storagelogging) << "Failed to open file " << filename;
|
||||
}
|
||||
}
|
||||
|
||||
FileStorage::~FileStorage() {
|
||||
if (_mapped) {
|
||||
if (!_file.unmap(_mapped)) {
|
||||
throw std::runtime_error("Unable to unmap file");
|
||||
}
|
||||
}
|
||||
if (_file.isOpen()) {
|
||||
_file.close();
|
||||
}
|
||||
}
|
82
libraries/shared/src/shared/Storage.h
Normal file
82
libraries/shared/src/shared/Storage.h
Normal file
|
@ -0,0 +1,82 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/02/17
|
||||
// Copyright 2013-2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#pragma once
|
||||
#ifndef hifi_Storage_h
|
||||
#define hifi_Storage_h
|
||||
|
||||
#include <stdint.h>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <QFile>
|
||||
#include <QString>
|
||||
|
||||
namespace storage {
|
||||
class Storage;
|
||||
using StoragePointer = std::shared_ptr<const Storage>;
|
||||
|
||||
class Storage : public std::enable_shared_from_this<Storage> {
|
||||
public:
|
||||
virtual ~Storage() {}
|
||||
virtual const uint8_t* data() const = 0;
|
||||
virtual size_t size() const = 0;
|
||||
virtual operator bool() const { return true; }
|
||||
|
||||
StoragePointer createView(size_t size = 0, size_t offset = 0) const;
|
||||
StoragePointer toFileStorage(const QString& filename) const;
|
||||
StoragePointer toMemoryStorage() const;
|
||||
|
||||
// Aliases to prevent having to re-write a ton of code
|
||||
inline size_t getSize() const { return size(); }
|
||||
inline const uint8_t* readData() const { return data(); }
|
||||
};
|
||||
|
||||
class MemoryStorage : public Storage {
|
||||
public:
|
||||
MemoryStorage(size_t size, const uint8_t* data = nullptr);
|
||||
const uint8_t* data() const override { return _data.data(); }
|
||||
uint8_t* data() { return _data.data(); }
|
||||
size_t size() const override { return _data.size(); }
|
||||
operator bool() const override { return true; }
|
||||
private:
|
||||
std::vector<uint8_t> _data;
|
||||
};
|
||||
|
||||
class FileStorage : public Storage {
|
||||
public:
|
||||
static StoragePointer create(const QString& filename, size_t size, const uint8_t* data);
|
||||
FileStorage(const QString& filename);
|
||||
~FileStorage();
|
||||
// Prevent copying
|
||||
FileStorage(const FileStorage& other) = delete;
|
||||
FileStorage& operator=(const FileStorage& other) = delete;
|
||||
|
||||
const uint8_t* data() const override { return _mapped; }
|
||||
size_t size() const override { return _file.size(); }
|
||||
operator bool() const override { return _valid; }
|
||||
private:
|
||||
bool _valid { false };
|
||||
QFile _file;
|
||||
uint8_t* _mapped { nullptr };
|
||||
};
|
||||
|
||||
class ViewStorage : public Storage {
|
||||
public:
|
||||
ViewStorage(const storage::StoragePointer& owner, size_t size, const uint8_t* data);
|
||||
const uint8_t* data() const override { return _data; }
|
||||
size_t size() const override { return _size; }
|
||||
operator bool() const override { return *_owner; }
|
||||
private:
|
||||
const storage::StoragePointer _owner;
|
||||
const size_t _size;
|
||||
const uint8_t* _data;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // hifi_Storage_h
|
|
@ -255,7 +255,7 @@ void OculusLegacyDisplayPlugin::hmdPresent() {
|
|||
memset(eyePoses, 0, sizeof(ovrPosef) * 2);
|
||||
eyePoses[0].Orientation = eyePoses[1].Orientation = ovrRotation;
|
||||
|
||||
GLint texture = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0), false);
|
||||
GLint texture = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0));
|
||||
auto sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||
glFlush();
|
||||
if (_hmdWindow->makeCurrent()) {
|
||||
|
|
|
@ -494,9 +494,9 @@ void OpenVrDisplayPlugin::customizeContext() {
|
|||
_compositeInfos[0].texture = _compositeFramebuffer->getRenderBuffer(0);
|
||||
for (size_t i = 0; i < COMPOSITING_BUFFER_SIZE; ++i) {
|
||||
if (0 != i) {
|
||||
_compositeInfos[i].texture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element::COLOR_RGBA_32, _renderTargetSize.x, _renderTargetSize.y, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT)));
|
||||
_compositeInfos[i].texture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, _renderTargetSize.x, _renderTargetSize.y, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT)));
|
||||
}
|
||||
_compositeInfos[i].textureID = getGLBackend()->getTextureID(_compositeInfos[i].texture, false);
|
||||
_compositeInfos[i].textureID = getGLBackend()->getTextureID(_compositeInfos[i].texture);
|
||||
}
|
||||
_submitThread->_canvas = _submitCanvas;
|
||||
_submitThread->start(QThread::HighPriority);
|
||||
|
@ -624,7 +624,7 @@ void OpenVrDisplayPlugin::compositeLayers() {
|
|||
glFlush();
|
||||
|
||||
if (!newComposite.textureID) {
|
||||
newComposite.textureID = getGLBackend()->getTextureID(newComposite.texture, false);
|
||||
newComposite.textureID = getGLBackend()->getTextureID(newComposite.texture);
|
||||
}
|
||||
withPresentThreadLock([&] {
|
||||
_submitThread->update(newComposite);
|
||||
|
@ -638,7 +638,7 @@ void OpenVrDisplayPlugin::hmdPresent() {
|
|||
if (_threadedSubmit) {
|
||||
_submitThread->waitForPresent();
|
||||
} else {
|
||||
GLuint glTexId = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0), false);
|
||||
GLuint glTexId = getGLBackend()->getTextureID(_compositeFramebuffer->getRenderBuffer(0));
|
||||
vr::Texture_t vrTexture { (void*)glTexId, vr::API_OpenGL, vr::ColorSpace_Auto };
|
||||
vr::VRCompositor()->Submit(vr::Eye_Left, &vrTexture, &OPENVR_TEXTURE_BOUNDS_LEFT);
|
||||
vr::VRCompositor()->Submit(vr::Eye_Right, &vrTexture, &OPENVR_TEXTURE_BOUNDS_RIGHT);
|
||||
|
|
1
scripts/developer/tests/.gitignore
vendored
Normal file
1
scripts/developer/tests/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
cube_texture.ktx
|
BIN
scripts/developer/tests/scaling.png
Normal file
BIN
scripts/developer/tests/scaling.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.1 KiB |
15
tests/ktx/CMakeLists.txt
Normal file
15
tests/ktx/CMakeLists.txt
Normal file
|
@ -0,0 +1,15 @@
|
|||
|
||||
set(TARGET_NAME ktx-test)
|
||||
|
||||
if (WIN32)
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /ignore:4049 /ignore:4217")
|
||||
endif()
|
||||
|
||||
# This is not a testcase -- just set it up as a regular hifi project
|
||||
setup_hifi_project(Quick Gui OpenGL)
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
||||
|
||||
# link in the shared libraries
|
||||
link_hifi_libraries(shared octree ktx gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics)
|
||||
|
||||
package_libraries_for_deployment()
|
150
tests/ktx/src/main.cpp
Normal file
150
tests/ktx/src/main.cpp
Normal file
|
@ -0,0 +1,150 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/07/01
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <sstream>
|
||||
|
||||
#include <QProcessEnvironment>
|
||||
|
||||
#include <QtCore/QDir>
|
||||
#include <QtCore/QElapsedTimer>
|
||||
#include <QtCore/QLoggingCategory>
|
||||
#include <QtCore/QRegularExpression>
|
||||
#include <QtCore/QSettings>
|
||||
#include <QtCore/QTimer>
|
||||
#include <QtCore/QThread>
|
||||
#include <QtCore/QThreadPool>
|
||||
|
||||
#include <QtGui/QGuiApplication>
|
||||
#include <QtGui/QResizeEvent>
|
||||
#include <QtGui/QWindow>
|
||||
|
||||
#include <QtWidgets/QFileDialog>
|
||||
#include <QtWidgets/QInputDialog>
|
||||
#include <QtWidgets/QMessageBox>
|
||||
#include <QtWidgets/QApplication>
|
||||
|
||||
#include <shared/RateCounter.h>
|
||||
#include <shared/NetworkUtils.h>
|
||||
#include <shared/FileLogger.h>
|
||||
#include <shared/FileUtils.h>
|
||||
#include <StatTracker.h>
|
||||
#include <LogHandler.h>
|
||||
|
||||
|
||||
#include <gpu/Texture.h>
|
||||
#include <gl/Config.h>
|
||||
#include <model/TextureMap.h>
|
||||
#include <ktx/KTX.h>
|
||||
|
||||
|
||||
QSharedPointer<FileLogger> logger;
|
||||
|
||||
gpu::Texture* cacheTexture(const std::string& name, gpu::Texture* srcTexture, bool write = true, bool read = true);
|
||||
|
||||
|
||||
void messageHandler(QtMsgType type, const QMessageLogContext& context, const QString& message) {
|
||||
QString logMessage = LogHandler::getInstance().printMessage((LogMsgType)type, context, message);
|
||||
|
||||
if (!logMessage.isEmpty()) {
|
||||
#ifdef Q_OS_WIN
|
||||
OutputDebugStringA(logMessage.toLocal8Bit().constData());
|
||||
OutputDebugStringA("\n");
|
||||
#endif
|
||||
logger->addMessage(qPrintable(logMessage + "\n"));
|
||||
}
|
||||
}
|
||||
|
||||
const char * LOG_FILTER_RULES = R"V0G0N(
|
||||
hifi.gpu=true
|
||||
)V0G0N";
|
||||
|
||||
QString getRootPath() {
|
||||
static std::once_flag once;
|
||||
static QString result;
|
||||
std::call_once(once, [&] {
|
||||
QFileInfo file(__FILE__);
|
||||
QDir parent = file.absolutePath();
|
||||
result = QDir::cleanPath(parent.currentPath() + "/../../..");
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
const QString TEST_IMAGE = getRootPath() + "/scripts/developer/tests/cube_texture.png";
|
||||
const QString TEST_IMAGE_KTX = getRootPath() + "/scripts/developer/tests/cube_texture.ktx";
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
QApplication app(argc, argv);
|
||||
QCoreApplication::setApplicationName("KTX");
|
||||
QCoreApplication::setOrganizationName("High Fidelity");
|
||||
QCoreApplication::setOrganizationDomain("highfidelity.com");
|
||||
logger.reset(new FileLogger());
|
||||
|
||||
Q_ASSERT(sizeof(ktx::Header) == 12 + (sizeof(uint32_t) * 13));
|
||||
|
||||
DependencyManager::set<tracing::Tracer>();
|
||||
qInstallMessageHandler(messageHandler);
|
||||
QLoggingCategory::setFilterRules(LOG_FILTER_RULES);
|
||||
|
||||
QImage image(TEST_IMAGE);
|
||||
gpu::Texture* testTexture = model::TextureUsage::process2DTextureColorFromImage(image, TEST_IMAGE.toStdString(), true, false, true);
|
||||
|
||||
auto ktxMemory = gpu::Texture::serialize(*testTexture);
|
||||
{
|
||||
const auto& ktxStorage = ktxMemory->getStorage();
|
||||
QFile outFile(TEST_IMAGE_KTX);
|
||||
if (!outFile.open(QFile::Truncate | QFile::ReadWrite)) {
|
||||
throw std::runtime_error("Unable to open file");
|
||||
}
|
||||
auto ktxSize = ktxStorage->size();
|
||||
outFile.resize(ktxSize);
|
||||
auto dest = outFile.map(0, ktxSize);
|
||||
memcpy(dest, ktxStorage->data(), ktxSize);
|
||||
outFile.unmap(dest);
|
||||
outFile.close();
|
||||
}
|
||||
|
||||
auto ktxFile = ktx::KTX::create(std::shared_ptr<storage::Storage>(new storage::FileStorage(TEST_IMAGE_KTX)));
|
||||
{
|
||||
const auto& memStorage = ktxMemory->getStorage();
|
||||
const auto& fileStorage = ktxFile->getStorage();
|
||||
Q_ASSERT(memStorage->size() == fileStorage->size());
|
||||
Q_ASSERT(memStorage->data() != fileStorage->data());
|
||||
Q_ASSERT(0 == memcmp(memStorage->data(), fileStorage->data(), memStorage->size()));
|
||||
Q_ASSERT(ktxFile->_images.size() == ktxMemory->_images.size());
|
||||
auto imageCount = ktxFile->_images.size();
|
||||
auto startMemory = ktxMemory->_storage->data();
|
||||
auto startFile = ktxFile->_storage->data();
|
||||
for (size_t i = 0; i < imageCount; ++i) {
|
||||
auto memImages = ktxMemory->_images[i];
|
||||
auto fileImages = ktxFile->_images[i];
|
||||
Q_ASSERT(memImages._padding == fileImages._padding);
|
||||
Q_ASSERT(memImages._numFaces == fileImages._numFaces);
|
||||
Q_ASSERT(memImages._imageSize == fileImages._imageSize);
|
||||
Q_ASSERT(memImages._faceSize == fileImages._faceSize);
|
||||
Q_ASSERT(memImages._faceBytes.size() == memImages._numFaces);
|
||||
Q_ASSERT(fileImages._faceBytes.size() == fileImages._numFaces);
|
||||
auto faceCount = fileImages._numFaces;
|
||||
for (uint32_t face = 0; face < faceCount; ++face) {
|
||||
auto memFace = memImages._faceBytes[face];
|
||||
auto memOffset = memFace - startMemory;
|
||||
auto fileFace = fileImages._faceBytes[face];
|
||||
auto fileOffset = fileFace - startFile;
|
||||
Q_ASSERT(memOffset % 4 == 0);
|
||||
Q_ASSERT(memOffset == fileOffset);
|
||||
}
|
||||
}
|
||||
}
|
||||
testTexture->setKtxBacking(ktxFile);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include "main.moc"
|
||||
|
|
@ -10,7 +10,7 @@ setup_hifi_project(Quick Gui OpenGL)
|
|||
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
||||
|
||||
# link in the shared libraries
|
||||
link_hifi_libraries(shared octree gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics)
|
||||
link_hifi_libraries(shared octree ktx gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics)
|
||||
|
||||
package_libraries_for_deployment()
|
||||
|
||||
|
|
|
@ -642,7 +642,6 @@ protected:
|
|||
gpu::Texture::setAllowedGPUMemoryUsage(MB_TO_BYTES(64));
|
||||
return;
|
||||
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include <gpu/gl/GLTexture.h>
|
||||
#include <gpu/StandardShaderLib.h>
|
||||
|
||||
#include <GenericThread.h>
|
||||
#include <AddressManager.h>
|
||||
#include <NodeList.h>
|
||||
#include <TextureCache.h>
|
||||
|
|
75
tests/shared/src/StorageTests.cpp
Normal file
75
tests/shared/src/StorageTests.cpp
Normal file
|
@ -0,0 +1,75 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/02/17
|
||||
// Copyright 2013-2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "StorageTests.h"
|
||||
|
||||
QTEST_MAIN(StorageTests)
|
||||
|
||||
using namespace storage;
|
||||
|
||||
StorageTests::StorageTests() {
|
||||
for (size_t i = 0; i < _testData.size(); ++i) {
|
||||
_testData[i] = (uint8_t)rand();
|
||||
}
|
||||
_testFile = QDir::tempPath() + "/" + QUuid::createUuid().toString();
|
||||
}
|
||||
|
||||
StorageTests::~StorageTests() {
|
||||
QFileInfo fileInfo(_testFile);
|
||||
if (fileInfo.exists()) {
|
||||
QFile(_testFile).remove();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void StorageTests::testConversion() {
|
||||
{
|
||||
QFileInfo fileInfo(_testFile);
|
||||
QCOMPARE(fileInfo.exists(), false);
|
||||
}
|
||||
StoragePointer storagePointer = std::make_unique<MemoryStorage>(_testData.size(), _testData.data());
|
||||
QCOMPARE(storagePointer->size(), (quint64)_testData.size());
|
||||
QCOMPARE(memcmp(_testData.data(), storagePointer->data(), _testData.size()), 0);
|
||||
// Convert to a file
|
||||
storagePointer = storagePointer->toFileStorage(_testFile);
|
||||
{
|
||||
QFileInfo fileInfo(_testFile);
|
||||
QCOMPARE(fileInfo.exists(), true);
|
||||
QCOMPARE(fileInfo.size(), (qint64)_testData.size());
|
||||
}
|
||||
QCOMPARE(storagePointer->size(), (quint64)_testData.size());
|
||||
QCOMPARE(memcmp(_testData.data(), storagePointer->data(), _testData.size()), 0);
|
||||
|
||||
// Convert to memory
|
||||
storagePointer = storagePointer->toMemoryStorage();
|
||||
QCOMPARE(storagePointer->size(), (quint64)_testData.size());
|
||||
QCOMPARE(memcmp(_testData.data(), storagePointer->data(), _testData.size()), 0);
|
||||
{
|
||||
// ensure the file is unaffected
|
||||
QFileInfo fileInfo(_testFile);
|
||||
QCOMPARE(fileInfo.exists(), true);
|
||||
QCOMPARE(fileInfo.size(), (qint64)_testData.size());
|
||||
}
|
||||
|
||||
// truncate the data as a new memory object
|
||||
auto newSize = _testData.size() / 2;
|
||||
storagePointer = std::make_unique<MemoryStorage>(newSize, storagePointer->data());
|
||||
QCOMPARE(storagePointer->size(), (quint64)newSize);
|
||||
QCOMPARE(memcmp(_testData.data(), storagePointer->data(), newSize), 0);
|
||||
|
||||
// Convert back to file
|
||||
storagePointer = storagePointer->toFileStorage(_testFile);
|
||||
QCOMPARE(storagePointer->size(), (quint64)newSize);
|
||||
QCOMPARE(memcmp(_testData.data(), storagePointer->data(), newSize), 0);
|
||||
{
|
||||
// ensure the file is truncated
|
||||
QFileInfo fileInfo(_testFile);
|
||||
QCOMPARE(fileInfo.exists(), true);
|
||||
QCOMPARE(fileInfo.size(), (qint64)newSize);
|
||||
}
|
||||
}
|
32
tests/shared/src/StorageTests.h
Normal file
32
tests/shared/src/StorageTests.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/02/17
|
||||
// Copyright 2013-2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_StorageTests_h
|
||||
#define hifi_StorageTests_h
|
||||
|
||||
#include <QtTest/QtTest>
|
||||
|
||||
#include <shared/Storage.h>
|
||||
#include <array>
|
||||
|
||||
class StorageTests : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
StorageTests();
|
||||
~StorageTests();
|
||||
|
||||
private slots:
|
||||
void testConversion();
|
||||
|
||||
private:
|
||||
std::array<uint8_t, 1025> _testData;
|
||||
QString _testFile;
|
||||
};
|
||||
|
||||
#endif // hifi_StorageTests_h
|
Loading…
Reference in a new issue