mirror of
https://github.com/overte-org/overte.git
synced 2025-08-07 05:09:23 +02:00
Merge pull request #10215 from Atlante45/nvtt
Add textures BC compression
This commit is contained in:
commit
c20a4da96d
60 changed files with 1592 additions and 1370 deletions
87
cmake/externals/nvtt/CMakeLists.txt
vendored
Normal file
87
cmake/externals/nvtt/CMakeLists.txt
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
include(ExternalProject)
|
||||||
|
include(SelectLibraryConfigurations)
|
||||||
|
|
||||||
|
set(EXTERNAL_NAME nvtt)
|
||||||
|
|
||||||
|
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
|
||||||
|
|
||||||
|
if (WIN32)
|
||||||
|
ExternalProject_Add(
|
||||||
|
${EXTERNAL_NAME}
|
||||||
|
URL http://s3.amazonaws.com/hifi-public/dependencies/nvtt-win-2.1.0.zip
|
||||||
|
URL_MD5 3ea6eeadbcc69071acf9c49ba565760e
|
||||||
|
CONFIGURE_COMMAND ""
|
||||||
|
BUILD_COMMAND ""
|
||||||
|
INSTALL_COMMAND ""
|
||||||
|
LOG_DOWNLOAD 1
|
||||||
|
)
|
||||||
|
|
||||||
|
ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR)
|
||||||
|
|
||||||
|
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${SOURCE_DIR}/include CACHE PATH "Location of NVTT include directory")
|
||||||
|
set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${SOURCE_DIR}/Release/x64/nvtt.lib CACHE FILEPATH "Path to NVTT release library")
|
||||||
|
set(${EXTERNAL_NAME_UPPER}_DLL_PATH "${SOURCE_DIR}/Release>/x64" CACHE PATH "Location of NVTT release DLL")
|
||||||
|
else ()
|
||||||
|
|
||||||
|
if (ANDROID)
|
||||||
|
set(ANDROID_CMAKE_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}" "-DANDROID_NATIVE_API_LEVEL=19")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
ExternalProject_Add(
|
||||||
|
${EXTERNAL_NAME}
|
||||||
|
URL http://hifi-public.s3.amazonaws.com/dependencies/nvidia-texture-tools-2.1.0.zip
|
||||||
|
URL_MD5 81b8fa6a9ee3f986088eb6e2215d6a57
|
||||||
|
CONFIGURE_COMMAND CMAKE_ARGS ${ANDROID_CMAKE_ARGS} -DNVTT_SHARED=1 -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
|
||||||
|
LOG_DOWNLOAD 1
|
||||||
|
LOG_CONFIGURE 1
|
||||||
|
LOG_BUILD 1
|
||||||
|
)
|
||||||
|
|
||||||
|
ExternalProject_Get_Property(${EXTERNAL_NAME} INSTALL_DIR)
|
||||||
|
|
||||||
|
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${INSTALL_DIR}/include CACHE PATH "Location of NVTT include directory")
|
||||||
|
|
||||||
|
if (APPLE)
|
||||||
|
set(_LIB_EXT "dylib")
|
||||||
|
else ()
|
||||||
|
set(_LIB_EXT "so")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${INSTALL_DIR}/lib/libnvtt.${_LIB_EXT} CACHE FILEPATH "Path to NVTT library")
|
||||||
|
|
||||||
|
if (APPLE)
|
||||||
|
# on OS X we have to use install_name_tool to fix the paths found in the NVTT shared libraries
|
||||||
|
# so that they can be found and linked during the linking phase
|
||||||
|
set(_NVTT_LIB_DIR "${INSTALL_DIR}/lib")
|
||||||
|
|
||||||
|
# first fix the install names of all present libraries
|
||||||
|
ExternalProject_Add_Step(
|
||||||
|
${EXTERNAL_NAME}
|
||||||
|
change-install-name
|
||||||
|
COMMENT "Calling install_name_tool on NVTT libraries to fix install name for dylib linking"
|
||||||
|
COMMAND ${CMAKE_COMMAND} -DINSTALL_NAME_LIBRARY_DIR=${_NVTT_LIB_DIR} -P ${EXTERNAL_PROJECT_DIR}/OSXInstallNameChange.cmake
|
||||||
|
DEPENDEES install
|
||||||
|
WORKING_DIRECTORY <INSTALL_DIR>
|
||||||
|
LOG 1
|
||||||
|
)
|
||||||
|
|
||||||
|
# then, for the main library (libnvtt) fix the paths to the dependency libraries (core, image, math)
|
||||||
|
ExternalProject_Add_Step(
|
||||||
|
${EXTERNAL_NAME}
|
||||||
|
change-dependency-paths
|
||||||
|
COMMENT "Calling install_name_tool on NVTT libraries to fix paths for dependency libraries"
|
||||||
|
COMMAND install_name_tool -change libnvimage.dylib ${INSTALL_DIR}/lib/libnvimage.dylib libnvtt.dylib
|
||||||
|
COMMAND install_name_tool -change libnvcore.dylib ${INSTALL_DIR}/lib/libnvcore.dylib libnvtt.dylib
|
||||||
|
COMMAND install_name_tool -change libnvmath.dylib ${INSTALL_DIR}/lib/libnvmath.dylib libnvtt.dylib
|
||||||
|
COMMAND install_name_tool -change libnvcore.dylib ${INSTALL_DIR}/lib/libnvcore.dylib libnvimage.dylib
|
||||||
|
COMMAND install_name_tool -change libnvmath.dylib ${INSTALL_DIR}/lib/libnvmath.dylib libnvimage.dylib
|
||||||
|
COMMAND install_name_tool -change libnvcore.dylib ${INSTALL_DIR}/lib/libnvcore.dylib libnvmath.dylib
|
||||||
|
DEPENDEES install
|
||||||
|
WORKING_DIRECTORY <INSTALL_DIR>/lib
|
||||||
|
LOG 1
|
||||||
|
)
|
||||||
|
endif ()
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Hide this external target (for IDE users)
|
||||||
|
set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals")
|
37
cmake/modules/FindNVTT.cmake
Normal file
37
cmake/modules/FindNVTT.cmake
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
#
|
||||||
|
# FindNVTT.cmake
|
||||||
|
#
|
||||||
|
# Try to find NVIDIA texture tools library and include path.
|
||||||
|
# Once done this will define
|
||||||
|
#
|
||||||
|
# NVTT_FOUND
|
||||||
|
# NVTT_INCLUDE_DIRS
|
||||||
|
# NVTT_LIBRARIES
|
||||||
|
# NVTT_DLL_PATH
|
||||||
|
#
|
||||||
|
# Created on 4/14/2017 by Stephen Birarda
|
||||||
|
# Copyright 2017 High Fidelity, Inc.
|
||||||
|
#
|
||||||
|
# Distributed under the Apache License, Version 2.0.
|
||||||
|
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
#
|
||||||
|
|
||||||
|
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
|
||||||
|
hifi_library_search_hints("nvtt")
|
||||||
|
|
||||||
|
find_path(NVTT_INCLUDE_DIRS nvtt/nvtt.h PATH_SUFFIXES include HINTS ${NVTT_SEARCH_DIRS})
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
|
||||||
|
find_library(NVTT_LIBRARY_RELEASE nvtt PATH_SUFFIXES "lib" "Release.x64/lib" HINTS ${NVTT_SEARCH_DIRS})
|
||||||
|
find_library(NVTT_LIBRARY_DEBUG nvtt PATH_SUFFIXES "lib" "Debug.x64/lib" HINTS ${NVTT_SEARCH_DIRS})
|
||||||
|
|
||||||
|
include(SelectLibraryConfigurations)
|
||||||
|
select_library_configurations(NVTT)
|
||||||
|
|
||||||
|
if (WIN32)
|
||||||
|
find_path(NVTT_DLL_PATH nvtt.dll PATH_SUFFIXES "Release.x64/bin" HINTS ${NVTT_SEARCH_DIRS})
|
||||||
|
find_package_handle_standard_args(NVTT DEFAULT_MSG NVTT_INCLUDE_DIRS NVTT_LIBRARIES NVTT_DLL_PATH)
|
||||||
|
else ()
|
||||||
|
find_package_handle_standard_args(NVTT DEFAULT_MSG NVTT_INCLUDE_DIRS NVTT_LIBRARIES)
|
||||||
|
endif ()
|
|
@ -194,7 +194,7 @@ link_hifi_libraries(
|
||||||
recording fbx networking model-networking entities avatars
|
recording fbx networking model-networking entities avatars
|
||||||
audio audio-client animation script-engine physics
|
audio audio-client animation script-engine physics
|
||||||
render-utils entities-renderer avatars-renderer ui auto-updater
|
render-utils entities-renderer avatars-renderer ui auto-updater
|
||||||
controllers plugins
|
controllers plugins image
|
||||||
ui-plugins display-plugins input-plugins
|
ui-plugins display-plugins input-plugins
|
||||||
${NON_ANDROID_LIBRARIES}
|
${NON_ANDROID_LIBRARIES}
|
||||||
)
|
)
|
||||||
|
|
|
@ -211,6 +211,11 @@ Item {
|
||||||
text: "Downloads: " + root.downloads + "/" + root.downloadLimit +
|
text: "Downloads: " + root.downloads + "/" + root.downloadLimit +
|
||||||
", Pending: " + root.downloadsPending;
|
", Pending: " + root.downloadsPending;
|
||||||
}
|
}
|
||||||
|
StatText {
|
||||||
|
visible: root.expanded;
|
||||||
|
text: "Processing: " + root.processing +
|
||||||
|
", Pending: " + root.processingPending;
|
||||||
|
}
|
||||||
StatText {
|
StatText {
|
||||||
visible: root.expanded && root.downloadUrls.length > 0;
|
visible: root.expanded && root.downloadUrls.length > 0;
|
||||||
text: "Download URLs:"
|
text: "Download URLs:"
|
||||||
|
|
|
@ -1445,8 +1445,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
||||||
QString skyboxUrl { PathUtils::resourcesPath() + "images/Default-Sky-9-cubemap.jpg" };
|
QString skyboxUrl { PathUtils::resourcesPath() + "images/Default-Sky-9-cubemap.jpg" };
|
||||||
QString skyboxAmbientUrl { PathUtils::resourcesPath() + "images/Default-Sky-9-ambient.jpg" };
|
QString skyboxAmbientUrl { PathUtils::resourcesPath() + "images/Default-Sky-9-ambient.jpg" };
|
||||||
|
|
||||||
_defaultSkyboxTexture = textureCache->getImageTexture(skyboxUrl, NetworkTexture::CUBE_TEXTURE, { { "generateIrradiance", false } });
|
_defaultSkyboxTexture = textureCache->getImageTexture(skyboxUrl, image::TextureUsage::CUBE_TEXTURE, { { "generateIrradiance", false } });
|
||||||
_defaultSkyboxAmbientTexture = textureCache->getImageTexture(skyboxAmbientUrl, NetworkTexture::CUBE_TEXTURE, { { "generateIrradiance", true } });
|
_defaultSkyboxAmbientTexture = textureCache->getImageTexture(skyboxAmbientUrl, image::TextureUsage::CUBE_TEXTURE, { { "generateIrradiance", true } });
|
||||||
|
|
||||||
_defaultSkybox->setCubemap(_defaultSkyboxTexture);
|
_defaultSkybox->setCubemap(_defaultSkyboxTexture);
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ void ApplicationOverlay::renderQmlUi(RenderArgs* renderArgs) {
|
||||||
PROFILE_RANGE(app, __FUNCTION__);
|
PROFILE_RANGE(app, __FUNCTION__);
|
||||||
|
|
||||||
if (!_uiTexture) {
|
if (!_uiTexture) {
|
||||||
_uiTexture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
|
_uiTexture = gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda());
|
||||||
_uiTexture->setSource(__FUNCTION__);
|
_uiTexture->setSource(__FUNCTION__);
|
||||||
}
|
}
|
||||||
// Once we move UI rendering and screen rendering to different
|
// Once we move UI rendering and screen rendering to different
|
||||||
|
@ -207,13 +207,13 @@ void ApplicationOverlay::buildFramebufferObject() {
|
||||||
auto width = uiSize.x;
|
auto width = uiSize.x;
|
||||||
auto height = uiSize.y;
|
auto height = uiSize.y;
|
||||||
if (!_overlayFramebuffer->getDepthStencilBuffer()) {
|
if (!_overlayFramebuffer->getDepthStencilBuffer()) {
|
||||||
auto overlayDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(DEPTH_FORMAT, width, height, gpu::Texture::SINGLE_MIP, DEFAULT_SAMPLER));
|
auto overlayDepthTexture = gpu::Texture::createRenderBuffer(DEPTH_FORMAT, width, height, gpu::Texture::SINGLE_MIP, DEFAULT_SAMPLER);
|
||||||
_overlayFramebuffer->setDepthStencilBuffer(overlayDepthTexture, DEPTH_FORMAT);
|
_overlayFramebuffer->setDepthStencilBuffer(overlayDepthTexture, DEPTH_FORMAT);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_overlayFramebuffer->getRenderBuffer(0)) {
|
if (!_overlayFramebuffer->getRenderBuffer(0)) {
|
||||||
const gpu::Sampler OVERLAY_SAMPLER(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP);
|
const gpu::Sampler OVERLAY_SAMPLER(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP);
|
||||||
auto colorBuffer = gpu::TexturePointer(gpu::Texture::createRenderBuffer(COLOR_FORMAT, width, height, gpu::Texture::SINGLE_MIP, OVERLAY_SAMPLER));
|
auto colorBuffer = gpu::Texture::createRenderBuffer(COLOR_FORMAT, width, height, gpu::Texture::SINGLE_MIP, OVERLAY_SAMPLER);
|
||||||
_overlayFramebuffer->setRenderBuffer(0, colorBuffer);
|
_overlayFramebuffer->setRenderBuffer(0, colorBuffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include "Menu.h"
|
#include "Menu.h"
|
||||||
#include "Util.h"
|
#include "Util.h"
|
||||||
#include "SequenceNumberStats.h"
|
#include "SequenceNumberStats.h"
|
||||||
|
#include "StatTracker.h"
|
||||||
|
|
||||||
HIFI_QML_DEF(Stats)
|
HIFI_QML_DEF(Stats)
|
||||||
|
|
||||||
|
@ -250,6 +251,9 @@ void Stats::updateStats(bool force) {
|
||||||
STAT_UPDATE(downloads, loadingRequests.size());
|
STAT_UPDATE(downloads, loadingRequests.size());
|
||||||
STAT_UPDATE(downloadLimit, ResourceCache::getRequestLimit())
|
STAT_UPDATE(downloadLimit, ResourceCache::getRequestLimit())
|
||||||
STAT_UPDATE(downloadsPending, ResourceCache::getPendingRequestCount());
|
STAT_UPDATE(downloadsPending, ResourceCache::getPendingRequestCount());
|
||||||
|
STAT_UPDATE(processing, DependencyManager::get<StatTracker>()->getStat("Processing").toInt());
|
||||||
|
STAT_UPDATE(processingPending, DependencyManager::get<StatTracker>()->getStat("PendingProcessing").toInt());
|
||||||
|
|
||||||
|
|
||||||
// See if the active download urls have changed
|
// See if the active download urls have changed
|
||||||
bool shouldUpdateUrls = _downloads != _downloadUrls.size();
|
bool shouldUpdateUrls = _downloads != _downloadUrls.size();
|
||||||
|
|
|
@ -89,6 +89,8 @@ class Stats : public QQuickItem {
|
||||||
STATS_PROPERTY(int, downloadLimit, 0)
|
STATS_PROPERTY(int, downloadLimit, 0)
|
||||||
STATS_PROPERTY(int, downloadsPending, 0)
|
STATS_PROPERTY(int, downloadsPending, 0)
|
||||||
Q_PROPERTY(QStringList downloadUrls READ downloadUrls NOTIFY downloadUrlsChanged)
|
Q_PROPERTY(QStringList downloadUrls READ downloadUrls NOTIFY downloadUrlsChanged)
|
||||||
|
STATS_PROPERTY(int, processing, 0)
|
||||||
|
STATS_PROPERTY(int, processingPending, 0)
|
||||||
STATS_PROPERTY(int, triangles, 0)
|
STATS_PROPERTY(int, triangles, 0)
|
||||||
STATS_PROPERTY(int, quads, 0)
|
STATS_PROPERTY(int, quads, 0)
|
||||||
STATS_PROPERTY(int, materialSwitches, 0)
|
STATS_PROPERTY(int, materialSwitches, 0)
|
||||||
|
@ -214,6 +216,8 @@ signals:
|
||||||
void downloadLimitChanged();
|
void downloadLimitChanged();
|
||||||
void downloadsPendingChanged();
|
void downloadsPendingChanged();
|
||||||
void downloadUrlsChanged();
|
void downloadUrlsChanged();
|
||||||
|
void processingChanged();
|
||||||
|
void processingPendingChanged();
|
||||||
void trianglesChanged();
|
void trianglesChanged();
|
||||||
void quadsChanged();
|
void quadsChanged();
|
||||||
void materialSwitchesChanged();
|
void materialSwitchesChanged();
|
||||||
|
|
|
@ -298,7 +298,7 @@ void Web3DOverlay::render(RenderArgs* args) {
|
||||||
|
|
||||||
if (!_texture) {
|
if (!_texture) {
|
||||||
auto webSurface = _webSurface;
|
auto webSurface = _webSurface;
|
||||||
_texture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
|
_texture = gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda());
|
||||||
_texture->setSource(__FUNCTION__);
|
_texture->setSource(__FUNCTION__);
|
||||||
}
|
}
|
||||||
OffscreenQmlSurface::TextureAndFence newTextureAndFence;
|
OffscreenQmlSurface::TextureAndFence newTextureAndFence;
|
||||||
|
|
|
@ -354,12 +354,11 @@ void OpenGLDisplayPlugin::customizeContext() {
|
||||||
}
|
}
|
||||||
if ((image.width() > 0) && (image.height() > 0)) {
|
if ((image.width() > 0) && (image.height() > 0)) {
|
||||||
|
|
||||||
cursorData.texture.reset(
|
cursorData.texture = gpu::Texture::createStrict(
|
||||||
gpu::Texture::createStrict(
|
|
||||||
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
|
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
|
||||||
image.width(), image.height(),
|
image.width(), image.height(),
|
||||||
gpu::Texture::MAX_NUM_MIPS,
|
gpu::Texture::MAX_NUM_MIPS,
|
||||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR));
|
||||||
cursorData.texture->setSource("cursor texture");
|
cursorData.texture->setSource("cursor texture");
|
||||||
auto usage = gpu::Texture::Usage::Builder().withColor().withAlpha();
|
auto usage = gpu::Texture::Usage::Builder().withColor().withAlpha();
|
||||||
cursorData.texture->setUsage(usage.build());
|
cursorData.texture->setUsage(usage.build());
|
||||||
|
|
|
@ -295,12 +295,11 @@ void HmdDisplayPlugin::internalPresent() {
|
||||||
image = image.mirrored();
|
image = image.mirrored();
|
||||||
image = image.convertToFormat(QImage::Format_RGBA8888);
|
image = image.convertToFormat(QImage::Format_RGBA8888);
|
||||||
if (!_previewTexture) {
|
if (!_previewTexture) {
|
||||||
_previewTexture.reset(
|
_previewTexture = gpu::Texture::createStrict(
|
||||||
gpu::Texture::createStrict(
|
|
||||||
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
|
gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA),
|
||||||
image.width(), image.height(),
|
image.width(), image.height(),
|
||||||
gpu::Texture::MAX_NUM_MIPS,
|
gpu::Texture::MAX_NUM_MIPS,
|
||||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR));
|
||||||
_previewTexture->setSource("HMD Preview Texture");
|
_previewTexture->setSource("HMD Preview Texture");
|
||||||
_previewTexture->setUsage(gpu::Texture::Usage::Builder().withColor().build());
|
_previewTexture->setUsage(gpu::Texture::Usage::Builder().withColor().build());
|
||||||
_previewTexture->setStoredMipFormat(gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
|
_previewTexture->setStoredMipFormat(gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
set(TARGET_NAME entities-renderer)
|
set(TARGET_NAME entities-renderer)
|
||||||
AUTOSCRIBE_SHADER_LIB(gpu model procedural render render-utils)
|
AUTOSCRIBE_SHADER_LIB(gpu model procedural render render-utils)
|
||||||
setup_hifi_library(Widgets Network Script)
|
setup_hifi_library(Widgets Network Script)
|
||||||
link_hifi_libraries(shared gpu procedural model model-networking script-engine render render-utils)
|
link_hifi_libraries(shared gpu procedural model model-networking script-engine render render-utils image)
|
||||||
|
|
||||||
target_bullet()
|
target_bullet()
|
||||||
|
|
||||||
|
|
|
@ -495,7 +495,7 @@ bool EntityTreeRenderer::applySkyboxAndHasAmbient() {
|
||||||
|
|
||||||
bool isAmbientSet = false;
|
bool isAmbientSet = false;
|
||||||
if (_pendingAmbientTexture && !_ambientTexture) {
|
if (_pendingAmbientTexture && !_ambientTexture) {
|
||||||
_ambientTexture = textureCache->getTexture(_ambientTextureURL, NetworkTexture::CUBE_TEXTURE);
|
_ambientTexture = textureCache->getTexture(_ambientTextureURL, image::TextureUsage::CUBE_TEXTURE);
|
||||||
}
|
}
|
||||||
if (_ambientTexture && _ambientTexture->isLoaded()) {
|
if (_ambientTexture && _ambientTexture->isLoaded()) {
|
||||||
_pendingAmbientTexture = false;
|
_pendingAmbientTexture = false;
|
||||||
|
@ -512,7 +512,7 @@ bool EntityTreeRenderer::applySkyboxAndHasAmbient() {
|
||||||
|
|
||||||
if (_pendingSkyboxTexture &&
|
if (_pendingSkyboxTexture &&
|
||||||
(!_skyboxTexture || (_skyboxTexture->getURL() != _skyboxTextureURL))) {
|
(!_skyboxTexture || (_skyboxTexture->getURL() != _skyboxTextureURL))) {
|
||||||
_skyboxTexture = textureCache->getTexture(_skyboxTextureURL, NetworkTexture::CUBE_TEXTURE);
|
_skyboxTexture = textureCache->getTexture(_skyboxTextureURL, image::TextureUsage::CUBE_TEXTURE);
|
||||||
}
|
}
|
||||||
if (_skyboxTexture && _skyboxTexture->isLoaded()) {
|
if (_skyboxTexture && _skyboxTexture->isLoaded()) {
|
||||||
_pendingSkyboxTexture = false;
|
_pendingSkyboxTexture = false;
|
||||||
|
|
|
@ -216,7 +216,7 @@ void RenderableWebEntityItem::render(RenderArgs* args) {
|
||||||
|
|
||||||
if (!_texture) {
|
if (!_texture) {
|
||||||
auto webSurface = _webSurface;
|
auto webSurface = _webSurface;
|
||||||
_texture = gpu::TexturePointer(gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda()));
|
_texture = gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda());
|
||||||
_texture->setSource(__FUNCTION__);
|
_texture->setSource(__FUNCTION__);
|
||||||
}
|
}
|
||||||
OffscreenQmlSurface::TextureAndFence newTextureAndFence;
|
OffscreenQmlSurface::TextureAndFence newTextureAndFence;
|
||||||
|
|
|
@ -76,10 +76,6 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case gpu::COMPRESSED_R:
|
|
||||||
result = GL_COMPRESSED_RED_RGTC1;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case gpu::R11G11B10:
|
case gpu::R11G11B10:
|
||||||
// the type should be float
|
// the type should be float
|
||||||
result = GL_R11F_G11F_B10F;
|
result = GL_R11F_G11F_B10F;
|
||||||
|
@ -149,12 +145,6 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
result = GL_SRGB8; // standard 2.2 gamma correction color
|
result = GL_SRGB8; // standard 2.2 gamma correction color
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_RGB:
|
|
||||||
result = GL_COMPRESSED_RGB;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_SRGB:
|
|
||||||
result = GL_COMPRESSED_SRGB;
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
@ -217,29 +207,22 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
result = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
|
result = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_RGBA:
|
|
||||||
result = GL_COMPRESSED_RGBA;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_SRGBA:
|
|
||||||
result = GL_COMPRESSED_SRGB_ALPHA;
|
|
||||||
break;
|
|
||||||
|
|
||||||
// FIXME: WE will want to support this later
|
case gpu::COMPRESSED_BC4_RED:
|
||||||
/*
|
result = GL_COMPRESSED_RED_RGTC1;
|
||||||
case gpu::COMPRESSED_BC3_RGBA:
|
|
||||||
result = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
|
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_BC3_SRGBA:
|
case gpu::COMPRESSED_BC1_SRGB:
|
||||||
|
result = GL_COMPRESSED_SRGB_S3TC_DXT1_EXT;
|
||||||
|
break;
|
||||||
|
case gpu::COMPRESSED_BC1_SRGBA:
|
||||||
|
result = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT;
|
||||||
|
break;
|
||||||
|
case gpu::COMPRESSED_BC3_SRGBA:
|
||||||
result = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
|
result = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
|
||||||
break;
|
break;
|
||||||
|
case gpu::COMPRESSED_BC5_XY:
|
||||||
case gpu::COMPRESSED_BC7_RGBA:
|
result = GL_COMPRESSED_RG_RGTC2;
|
||||||
result = GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
|
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_BC7_SRGBA:
|
|
||||||
result = GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM;
|
|
||||||
break;
|
|
||||||
*/
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
|
@ -269,10 +252,6 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
texel.internalFormat = GL_R8;
|
texel.internalFormat = GL_R8;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case gpu::COMPRESSED_R:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RED_RGTC1;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case gpu::DEPTH:
|
case gpu::DEPTH:
|
||||||
texel.format = GL_DEPTH_COMPONENT;
|
texel.format = GL_DEPTH_COMPONENT;
|
||||||
texel.internalFormat = GL_DEPTH_COMPONENT32;
|
texel.internalFormat = GL_DEPTH_COMPONENT32;
|
||||||
|
@ -315,12 +294,6 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
case gpu::RGBA:
|
case gpu::RGBA:
|
||||||
texel.internalFormat = GL_RGB8;
|
texel.internalFormat = GL_RGB8;
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_RGB:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RGB;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_SRGB:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_SRGB;
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
@ -359,30 +332,22 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
texel.internalFormat = GL_SRGB8_ALPHA8;
|
texel.internalFormat = GL_SRGB8_ALPHA8;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case gpu::COMPRESSED_RGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RGBA;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_SRGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA;
|
|
||||||
|
|
||||||
|
case gpu::COMPRESSED_BC4_RED:
|
||||||
|
texel.internalFormat = GL_COMPRESSED_RED_RGTC1;
|
||||||
break;
|
break;
|
||||||
|
case gpu::COMPRESSED_BC1_SRGB:
|
||||||
// FIXME: WE will want to support this later
|
texel.internalFormat = GL_COMPRESSED_SRGB_S3TC_DXT1_EXT;
|
||||||
/*
|
|
||||||
case gpu::COMPRESSED_BC3_RGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
|
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_BC3_SRGBA:
|
case gpu::COMPRESSED_BC1_SRGBA:
|
||||||
|
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT;
|
||||||
|
break;
|
||||||
|
case gpu::COMPRESSED_BC3_SRGBA:
|
||||||
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
|
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
|
||||||
break;
|
break;
|
||||||
|
case gpu::COMPRESSED_BC5_XY:
|
||||||
case gpu::COMPRESSED_BC7_RGBA:
|
texel.internalFormat = GL_COMPRESSED_RG_RGTC2;
|
||||||
texel.internalFormat = GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
|
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_BC7_SRGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM;
|
|
||||||
break;
|
|
||||||
*/
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
|
@ -403,10 +368,6 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||||
|
|
||||||
switch (dstFormat.getSemantic()) {
|
switch (dstFormat.getSemantic()) {
|
||||||
case gpu::COMPRESSED_R: {
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RED_RGTC1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case gpu::RED:
|
case gpu::RED:
|
||||||
case gpu::RGB:
|
case gpu::RGB:
|
||||||
case gpu::RGBA:
|
case gpu::RGBA:
|
||||||
|
@ -564,12 +525,6 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
texel.internalFormat = GL_SRGB8; // standard 2.2 gamma correction color
|
texel.internalFormat = GL_SRGB8; // standard 2.2 gamma correction color
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_RGB:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RGB;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_SRGB:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_SRGB;
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
@ -646,11 +601,21 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
texel.internalFormat = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
|
texel.internalFormat = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_RGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RGBA;
|
case gpu::COMPRESSED_BC4_RED:
|
||||||
|
texel.internalFormat = GL_COMPRESSED_RED_RGTC1;
|
||||||
break;
|
break;
|
||||||
case gpu::COMPRESSED_SRGBA:
|
case gpu::COMPRESSED_BC1_SRGB:
|
||||||
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA;
|
texel.internalFormat = GL_COMPRESSED_SRGB_S3TC_DXT1_EXT;
|
||||||
|
break;
|
||||||
|
case gpu::COMPRESSED_BC1_SRGBA:
|
||||||
|
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT;
|
||||||
|
break;
|
||||||
|
case gpu::COMPRESSED_BC3_SRGBA:
|
||||||
|
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
|
||||||
|
break;
|
||||||
|
case gpu::COMPRESSED_BC5_XY:
|
||||||
|
texel.internalFormat = GL_COMPRESSED_RG_RGTC2;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
|
|
|
@ -120,11 +120,12 @@ void GLTexture::copyMipFaceFromTexture(uint16_t sourceMip, uint16_t targetMip, u
|
||||||
}
|
}
|
||||||
auto size = _gpuObject.evalMipDimensions(sourceMip);
|
auto size = _gpuObject.evalMipDimensions(sourceMip);
|
||||||
auto mipData = _gpuObject.accessStoredMipFace(sourceMip, face);
|
auto mipData = _gpuObject.accessStoredMipFace(sourceMip, face);
|
||||||
|
auto mipSize = _gpuObject.getStoredMipFaceSize(sourceMip, face);
|
||||||
if (mipData) {
|
if (mipData) {
|
||||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), _gpuObject.getStoredMipFormat());
|
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), _gpuObject.getStoredMipFormat());
|
||||||
copyMipFaceLinesFromTexture(targetMip, face, size, 0, texelFormat.format, texelFormat.type, mipData->readData());
|
copyMipFaceLinesFromTexture(targetMip, face, size, 0, texelFormat.internalFormat, texelFormat.format, texelFormat.type, mipSize, mipData->readData());
|
||||||
} else {
|
} else {
|
||||||
qCDebug(gpugllogging) << "Missing mipData level=" << sourceMip << " face=" << (int)face << " for texture " << _gpuObject.source().c_str();
|
qCDebug(gpugllogging) << "Missing mipData level=" << sourceMip << " face=" << (int)face << " for texture " << _gpuObject.source().c_str();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,9 +204,11 @@ TransferJob::TransferJob(const GLTexture& parent, uint16_t sourceMip, uint16_t t
|
||||||
|
|
||||||
auto transferDimensions = _parent._gpuObject.evalMipDimensions(sourceMip);
|
auto transferDimensions = _parent._gpuObject.evalMipDimensions(sourceMip);
|
||||||
GLenum format;
|
GLenum format;
|
||||||
|
GLenum internalFormat;
|
||||||
GLenum type;
|
GLenum type;
|
||||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_parent._gpuObject.getTexelFormat(), _parent._gpuObject.getStoredMipFormat());
|
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_parent._gpuObject.getTexelFormat(), _parent._gpuObject.getStoredMipFormat());
|
||||||
format = texelFormat.format;
|
format = texelFormat.format;
|
||||||
|
internalFormat = texelFormat.internalFormat;
|
||||||
type = texelFormat.type;
|
type = texelFormat.type;
|
||||||
auto mipSize = _parent._gpuObject.getStoredMipFaceSize(sourceMip, face);
|
auto mipSize = _parent._gpuObject.getStoredMipFaceSize(sourceMip, face);
|
||||||
|
|
||||||
|
@ -236,7 +239,7 @@ TransferJob::TransferJob(const GLTexture& parent, uint16_t sourceMip, uint16_t t
|
||||||
Backend::updateTextureTransferPendingSize(0, _transferSize);
|
Backend::updateTextureTransferPendingSize(0, _transferSize);
|
||||||
|
|
||||||
_transferLambda = [=] {
|
_transferLambda = [=] {
|
||||||
_parent.copyMipFaceLinesFromTexture(targetMip, face, transferDimensions, lineOffset, format, type, _buffer.data());
|
_parent.copyMipFaceLinesFromTexture(targetMip, face, transferDimensions, lineOffset, internalFormat, format, type, _buffer.size(), _buffer.data());
|
||||||
std::vector<uint8_t> emptyVector;
|
std::vector<uint8_t> emptyVector;
|
||||||
_buffer.swap(emptyVector);
|
_buffer.swap(emptyVector);
|
||||||
};
|
};
|
||||||
|
|
|
@ -163,7 +163,7 @@ public:
|
||||||
protected:
|
protected:
|
||||||
virtual Size size() const = 0;
|
virtual Size size() const = 0;
|
||||||
virtual void generateMips() const = 0;
|
virtual void generateMips() const = 0;
|
||||||
virtual void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const = 0;
|
virtual void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum internalFormat, GLenum format, GLenum type, Size sourceSize, const void* sourcePointer) const = 0;
|
||||||
virtual void copyMipFaceFromTexture(uint16_t sourceMip, uint16_t targetMip, uint8_t face) const final;
|
virtual void copyMipFaceFromTexture(uint16_t sourceMip, uint16_t targetMip, uint8_t face) const final;
|
||||||
|
|
||||||
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
|
GLTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
|
||||||
|
@ -177,7 +177,7 @@ public:
|
||||||
protected:
|
protected:
|
||||||
GLExternalTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
|
GLExternalTexture(const std::weak_ptr<gl::GLBackend>& backend, const Texture& texture, GLuint id);
|
||||||
void generateMips() const override {}
|
void generateMips() const override {}
|
||||||
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const override {}
|
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum internalFormat, GLenum format, GLenum type, Size sourceSize, const void* sourcePointer) const override {}
|
||||||
|
|
||||||
Size size() const override { return 0; }
|
Size size() const override { return 0; }
|
||||||
};
|
};
|
||||||
|
|
|
@ -50,7 +50,7 @@ public:
|
||||||
protected:
|
protected:
|
||||||
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
GL41Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||||
void generateMips() const override;
|
void generateMips() const override;
|
||||||
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const override;
|
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum internalFormat, GLenum format, GLenum type, Size sourceSize, const void* sourcePointer) const override;
|
||||||
virtual void syncSampler() const;
|
virtual void syncSampler() const;
|
||||||
|
|
||||||
void withPreservedTexture(std::function<void()> f) const;
|
void withPreservedTexture(std::function<void()> f) const;
|
||||||
|
@ -105,7 +105,7 @@ public:
|
||||||
void promote() override;
|
void promote() override;
|
||||||
void demote() override;
|
void demote() override;
|
||||||
void populateTransferQueue() override;
|
void populateTransferQueue() override;
|
||||||
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const override;
|
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum internalFormat, GLenum format, GLenum type, Size sourceSize, const void* sourcePointer) const override;
|
||||||
|
|
||||||
Size size() const override { return _size; }
|
Size size() const override { return _size; }
|
||||||
Size _size { 0 };
|
Size _size { 0 };
|
||||||
|
|
|
@ -92,12 +92,37 @@ void GL41Texture::generateMips() const {
|
||||||
(void)CHECK_GL_ERROR();
|
(void)CHECK_GL_ERROR();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GL41Texture::copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const {
|
void GL41Texture::copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum internalFormat, GLenum format, GLenum type, Size sourceSize, const void* sourcePointer) const {
|
||||||
if (GL_TEXTURE_2D == _target) {
|
if (GL_TEXTURE_2D == _target) {
|
||||||
glTexSubImage2D(_target, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
switch (internalFormat) {
|
||||||
|
case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
|
||||||
|
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
|
||||||
|
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
|
||||||
|
case GL_COMPRESSED_RED_RGTC1:
|
||||||
|
case GL_COMPRESSED_RG_RGTC2:
|
||||||
|
glCompressedTexSubImage2D(_target, mip, 0, yOffset, size.x, size.y, internalFormat,
|
||||||
|
static_cast<GLsizei>(sourceSize), sourcePointer);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
glTexSubImage2D(_target, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
||||||
|
break;
|
||||||
|
}
|
||||||
} else if (GL_TEXTURE_CUBE_MAP == _target) {
|
} else if (GL_TEXTURE_CUBE_MAP == _target) {
|
||||||
auto target = GLTexture::CUBE_FACE_LAYOUT[face];
|
auto target = GLTexture::CUBE_FACE_LAYOUT[face];
|
||||||
glTexSubImage2D(target, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
|
||||||
|
switch (internalFormat) {
|
||||||
|
case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
|
||||||
|
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
|
||||||
|
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
|
||||||
|
case GL_COMPRESSED_RED_RGTC1:
|
||||||
|
case GL_COMPRESSED_RG_RGTC2:
|
||||||
|
glCompressedTexSubImage2D(target, mip, 0, yOffset, size.x, size.y, internalFormat,
|
||||||
|
static_cast<GLsizei>(sourceSize), sourcePointer);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
glTexSubImage2D(target, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
||||||
|
break;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
|
@ -251,9 +276,9 @@ void GL41VariableAllocationTexture::allocateStorage(uint16 allocatedMip) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void GL41VariableAllocationTexture::copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const {
|
void GL41VariableAllocationTexture::copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum internalFormat, GLenum format, GLenum type, Size sourceSize, const void* sourcePointer) const {
|
||||||
withPreservedTexture([&] {
|
withPreservedTexture([&] {
|
||||||
Parent::copyMipFaceLinesFromTexture(mip, face, size, yOffset, format, type, sourcePointer);
|
Parent::copyMipFaceLinesFromTexture(mip, face, size, yOffset, internalFormat, format, type, sourceSize, sourcePointer);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ public:
|
||||||
protected:
|
protected:
|
||||||
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
GL45Texture(const std::weak_ptr<GLBackend>& backend, const Texture& texture);
|
||||||
void generateMips() const override;
|
void generateMips() const override;
|
||||||
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const override;
|
void copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum internalFormat, GLenum format, GLenum type, Size sourceSize, const void* sourcePointer) const override;
|
||||||
virtual void syncSampler() const;
|
virtual void syncSampler() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -117,17 +117,47 @@ void GL45Texture::generateMips() const {
|
||||||
(void)CHECK_GL_ERROR();
|
(void)CHECK_GL_ERROR();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GL45Texture::copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum format, GLenum type, const void* sourcePointer) const {
|
void GL45Texture::copyMipFaceLinesFromTexture(uint16_t mip, uint8_t face, const uvec3& size, uint32_t yOffset, GLenum internalFormat, GLenum format, GLenum type, Size sourceSize, const void* sourcePointer) const {
|
||||||
if (GL_TEXTURE_2D == _target) {
|
if (GL_TEXTURE_2D == _target) {
|
||||||
glTextureSubImage2D(_id, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
switch (internalFormat) {
|
||||||
|
case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
|
||||||
|
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
|
||||||
|
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
|
||||||
|
case GL_COMPRESSED_RED_RGTC1:
|
||||||
|
case GL_COMPRESSED_RG_RGTC2:
|
||||||
|
glCompressedTextureSubImage2D(_id, mip, 0, yOffset, size.x, size.y, internalFormat,
|
||||||
|
static_cast<GLsizei>(sourceSize), sourcePointer);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
glTextureSubImage2D(_id, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
||||||
|
break;
|
||||||
|
}
|
||||||
} else if (GL_TEXTURE_CUBE_MAP == _target) {
|
} else if (GL_TEXTURE_CUBE_MAP == _target) {
|
||||||
// DSA ARB does not work on AMD, so use EXT
|
switch (internalFormat) {
|
||||||
// unless EXT is not available on the driver
|
case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
|
||||||
if (glTextureSubImage2DEXT) {
|
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
|
||||||
auto target = GLTexture::CUBE_FACE_LAYOUT[face];
|
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
|
||||||
glTextureSubImage2DEXT(_id, target, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
case GL_COMPRESSED_RED_RGTC1:
|
||||||
} else {
|
case GL_COMPRESSED_RG_RGTC2:
|
||||||
glTextureSubImage3D(_id, mip, 0, yOffset, face, size.x, size.y, 1, format, type, sourcePointer);
|
if (glCompressedTextureSubImage2DEXT) {
|
||||||
|
auto target = GLTexture::CUBE_FACE_LAYOUT[face];
|
||||||
|
glCompressedTextureSubImage2DEXT(_id, target, mip, 0, yOffset, size.x, size.y, internalFormat,
|
||||||
|
static_cast<GLsizei>(sourceSize), sourcePointer);
|
||||||
|
} else {
|
||||||
|
glCompressedTextureSubImage3D(_id, mip, 0, yOffset, face, size.x, size.y, 1, internalFormat,
|
||||||
|
static_cast<GLsizei>(sourceSize), sourcePointer);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
// DSA ARB does not work on AMD, so use EXT
|
||||||
|
// unless EXT is not available on the driver
|
||||||
|
if (glTextureSubImage2DEXT) {
|
||||||
|
auto target = GLTexture::CUBE_FACE_LAYOUT[face];
|
||||||
|
glTextureSubImage2DEXT(_id, target, mip, 0, yOffset, size.x, size.y, format, type, sourcePointer);
|
||||||
|
} else {
|
||||||
|
glTextureSubImage3D(_id, mip, 0, yOffset, face, size.x, size.y, 1, format, type, sourcePointer);
|
||||||
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Q_ASSERT(false);
|
Q_ASSERT(false);
|
||||||
|
|
|
@ -19,6 +19,12 @@ const Element Element::COLOR_SRGBA_32{ VEC4, NUINT8, SRGBA };
|
||||||
const Element Element::COLOR_BGRA_32{ VEC4, NUINT8, BGRA };
|
const Element Element::COLOR_BGRA_32{ VEC4, NUINT8, BGRA };
|
||||||
const Element Element::COLOR_SBGRA_32{ VEC4, NUINT8, SBGRA };
|
const Element Element::COLOR_SBGRA_32{ VEC4, NUINT8, SBGRA };
|
||||||
|
|
||||||
|
const Element Element::COLOR_COMPRESSED_RED{ VEC4, NUINT8, COMPRESSED_BC4_RED };
|
||||||
|
const Element Element::COLOR_COMPRESSED_SRGB{ VEC4, NUINT8, COMPRESSED_BC1_SRGB };
|
||||||
|
const Element Element::COLOR_COMPRESSED_SRGBA_MASK{ VEC4, NUINT8, COMPRESSED_BC1_SRGBA };
|
||||||
|
const Element Element::COLOR_COMPRESSED_SRGBA{ VEC4, NUINT8, COMPRESSED_BC3_SRGBA };
|
||||||
|
const Element Element::COLOR_COMPRESSED_XY{ VEC4, NUINT8, COMPRESSED_BC5_XY };
|
||||||
|
|
||||||
const Element Element::COLOR_R11G11B10{ SCALAR, FLOAT, R11G11B10 };
|
const Element Element::COLOR_R11G11B10{ SCALAR, FLOAT, R11G11B10 };
|
||||||
const Element Element::VEC4F_COLOR_RGBA{ VEC4, FLOAT, RGBA };
|
const Element Element::VEC4F_COLOR_RGBA{ VEC4, FLOAT, RGBA };
|
||||||
const Element Element::VEC2F_UV{ VEC2, FLOAT, UV };
|
const Element Element::VEC2F_UV{ VEC2, FLOAT, UV };
|
||||||
|
|
|
@ -157,20 +157,12 @@ enum Semantic {
|
||||||
|
|
||||||
// These are generic compression format smeantic for images
|
// These are generic compression format smeantic for images
|
||||||
_FIRST_COMPRESSED,
|
_FIRST_COMPRESSED,
|
||||||
COMPRESSED_R,
|
|
||||||
|
|
||||||
COMPRESSED_RGB,
|
COMPRESSED_BC1_SRGB,
|
||||||
COMPRESSED_RGBA,
|
COMPRESSED_BC1_SRGBA,
|
||||||
|
COMPRESSED_BC3_SRGBA,
|
||||||
COMPRESSED_SRGB,
|
COMPRESSED_BC4_RED,
|
||||||
COMPRESSED_SRGBA,
|
COMPRESSED_BC5_XY,
|
||||||
|
|
||||||
// FIXME: Will have to be supported later:
|
|
||||||
/*COMPRESSED_BC3_RGBA, // RGBA_S3TC_DXT5_EXT,
|
|
||||||
COMPRESSED_BC3_SRGBA, // SRGB_ALPHA_S3TC_DXT5_EXT
|
|
||||||
|
|
||||||
COMPRESSED_BC7_RGBA,
|
|
||||||
COMPRESSED_BC7_SRGBA, */
|
|
||||||
|
|
||||||
_LAST_COMPRESSED,
|
_LAST_COMPRESSED,
|
||||||
|
|
||||||
|
@ -237,6 +229,11 @@ public:
|
||||||
static const Element COLOR_BGRA_32;
|
static const Element COLOR_BGRA_32;
|
||||||
static const Element COLOR_SBGRA_32;
|
static const Element COLOR_SBGRA_32;
|
||||||
static const Element COLOR_R11G11B10;
|
static const Element COLOR_R11G11B10;
|
||||||
|
static const Element COLOR_COMPRESSED_RED;
|
||||||
|
static const Element COLOR_COMPRESSED_SRGB;
|
||||||
|
static const Element COLOR_COMPRESSED_SRGBA_MASK;
|
||||||
|
static const Element COLOR_COMPRESSED_SRGBA;
|
||||||
|
static const Element COLOR_COMPRESSED_XY;
|
||||||
static const Element VEC4F_COLOR_RGBA;
|
static const Element VEC4F_COLOR_RGBA;
|
||||||
static const Element VEC2F_UV;
|
static const Element VEC2F_UV;
|
||||||
static const Element VEC2F_XY;
|
static const Element VEC2F_XY;
|
||||||
|
|
|
@ -212,8 +212,8 @@ void Texture::MemoryStorage::assignMipFaceData(uint16 level, uint8 face, const s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::createExternal(const ExternalRecycler& recycler, const Sampler& sampler) {
|
TexturePointer Texture::createExternal(const ExternalRecycler& recycler, const Sampler& sampler) {
|
||||||
Texture* tex = new Texture(TextureUsageType::EXTERNAL);
|
TexturePointer tex = std::make_shared<Texture>(TextureUsageType::EXTERNAL);
|
||||||
tex->_type = TEX_2D;
|
tex->_type = TEX_2D;
|
||||||
tex->_maxMipLevel = 0;
|
tex->_maxMipLevel = 0;
|
||||||
tex->_sampler = sampler;
|
tex->_sampler = sampler;
|
||||||
|
@ -221,36 +221,36 @@ Texture* Texture::createExternal(const ExternalRecycler& recycler, const Sampler
|
||||||
return tex;
|
return tex;
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::createRenderBuffer(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips, const Sampler& sampler) {
|
TexturePointer Texture::createRenderBuffer(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips, const Sampler& sampler) {
|
||||||
return create(TextureUsageType::RENDERBUFFER, TEX_2D, texelFormat, width, height, 1, 1, 0, numMips, sampler);
|
return create(TextureUsageType::RENDERBUFFER, TEX_2D, texelFormat, width, height, 1, 1, 0, numMips, sampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::create1D(const Element& texelFormat, uint16 width, uint16 numMips, const Sampler& sampler) {
|
TexturePointer Texture::create1D(const Element& texelFormat, uint16 width, uint16 numMips, const Sampler& sampler) {
|
||||||
return create(TextureUsageType::RESOURCE, TEX_1D, texelFormat, width, 1, 1, 1, 0, numMips, sampler);
|
return create(TextureUsageType::RESOURCE, TEX_1D, texelFormat, width, 1, 1, 1, 0, numMips, sampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::create2D(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips, const Sampler& sampler) {
|
TexturePointer Texture::create2D(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips, const Sampler& sampler) {
|
||||||
return create(TextureUsageType::RESOURCE, TEX_2D, texelFormat, width, height, 1, 1, 0, numMips, sampler);
|
return create(TextureUsageType::RESOURCE, TEX_2D, texelFormat, width, height, 1, 1, 0, numMips, sampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::createStrict(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips, const Sampler& sampler) {
|
TexturePointer Texture::createStrict(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips, const Sampler& sampler) {
|
||||||
return create(TextureUsageType::STRICT_RESOURCE, TEX_2D, texelFormat, width, height, 1, 1, 0, numMips, sampler);
|
return create(TextureUsageType::STRICT_RESOURCE, TEX_2D, texelFormat, width, height, 1, 1, 0, numMips, sampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numMips, const Sampler& sampler) {
|
TexturePointer Texture::create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numMips, const Sampler& sampler) {
|
||||||
return create(TextureUsageType::RESOURCE, TEX_3D, texelFormat, width, height, depth, 1, 0, numMips, sampler);
|
return create(TextureUsageType::RESOURCE, TEX_3D, texelFormat, width, height, depth, 1, 0, numMips, sampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::createCube(const Element& texelFormat, uint16 width, uint16 numMips, const Sampler& sampler) {
|
TexturePointer Texture::createCube(const Element& texelFormat, uint16 width, uint16 numMips, const Sampler& sampler) {
|
||||||
return create(TextureUsageType::RESOURCE, TEX_CUBE, texelFormat, width, width, 1, 1, 0, numMips, sampler);
|
return create(TextureUsageType::RESOURCE, TEX_CUBE, texelFormat, width, width, 1, 1, 0, numMips, sampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler)
|
TexturePointer Texture::create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler)
|
||||||
{
|
{
|
||||||
Texture* tex = new Texture(usageType);
|
TexturePointer tex = std::make_shared<Texture>(usageType);
|
||||||
tex->_storage.reset(new MemoryStorage());
|
tex->_storage.reset(new MemoryStorage());
|
||||||
tex->_type = type;
|
tex->_type = type;
|
||||||
tex->_storage->assignTexture(tex);
|
tex->_storage->assignTexture(tex.get());
|
||||||
tex->resize(type, texelFormat, width, height, depth, numSamples, numSlices, numMips);
|
tex->resize(type, texelFormat, width, height, depth, numSamples, numSlices, numMips);
|
||||||
|
|
||||||
tex->_sampler = sampler;
|
tex->_sampler = sampler;
|
||||||
|
@ -434,7 +434,7 @@ void Texture::assignStoredMip(uint16 level, storage::StoragePointer& storage) {
|
||||||
// THen check that the mem texture passed make sense with its format
|
// THen check that the mem texture passed make sense with its format
|
||||||
Size expectedSize = evalStoredMipSize(level, getStoredMipFormat());
|
Size expectedSize = evalStoredMipSize(level, getStoredMipFormat());
|
||||||
auto size = storage->size();
|
auto size = storage->size();
|
||||||
if (storage->size() == expectedSize) {
|
if (storage->size() <= expectedSize) {
|
||||||
_storage->assignMipData(level, storage);
|
_storage->assignMipData(level, storage);
|
||||||
_stamp++;
|
_stamp++;
|
||||||
} else if (size > expectedSize) {
|
} else if (size > expectedSize) {
|
||||||
|
@ -461,7 +461,7 @@ void Texture::assignStoredMipFace(uint16 level, uint8 face, storage::StoragePoin
|
||||||
// THen check that the mem texture passed make sense with its format
|
// THen check that the mem texture passed make sense with its format
|
||||||
Size expectedSize = evalStoredMipFaceSize(level, getStoredMipFormat());
|
Size expectedSize = evalStoredMipFaceSize(level, getStoredMipFormat());
|
||||||
auto size = storage->size();
|
auto size = storage->size();
|
||||||
if (size == expectedSize) {
|
if (size <= expectedSize) {
|
||||||
_storage->assignMipFaceData(level, face, storage);
|
_storage->assignMipFaceData(level, face, storage);
|
||||||
_stamp++;
|
_stamp++;
|
||||||
} else if (size > expectedSize) {
|
} else if (size > expectedSize) {
|
||||||
|
@ -752,7 +752,7 @@ bool sphericalHarmonicsFromTexture(const gpu::Texture& cubeTexture, std::vector<
|
||||||
boffset = 0;
|
boffset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto data = cubeTexture.accessStoredMipFace(0,face)->readData();
|
auto data = cubeTexture.accessStoredMipFace(0, face)->readData();
|
||||||
if (data == nullptr) {
|
if (data == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -328,13 +328,13 @@ public:
|
||||||
|
|
||||||
static const uint16 MAX_NUM_MIPS = 0;
|
static const uint16 MAX_NUM_MIPS = 0;
|
||||||
static const uint16 SINGLE_MIP = 1;
|
static const uint16 SINGLE_MIP = 1;
|
||||||
static Texture* create1D(const Element& texelFormat, uint16 width, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
static TexturePointer create1D(const Element& texelFormat, uint16 width, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
||||||
static Texture* create2D(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
static TexturePointer create2D(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
||||||
static Texture* create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
static TexturePointer create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
||||||
static Texture* createCube(const Element& texelFormat, uint16 width, uint16 numMips = 1, const Sampler& sampler = Sampler());
|
static TexturePointer createCube(const Element& texelFormat, uint16 width, uint16 numMips = 1, const Sampler& sampler = Sampler());
|
||||||
static Texture* createRenderBuffer(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
static TexturePointer createRenderBuffer(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
||||||
static Texture* createStrict(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
static TexturePointer createStrict(const Element& texelFormat, uint16 width, uint16 height, uint16 numMips = SINGLE_MIP, const Sampler& sampler = Sampler());
|
||||||
static Texture* createExternal(const ExternalRecycler& recycler, const Sampler& sampler = Sampler());
|
static TexturePointer createExternal(const ExternalRecycler& recycler, const Sampler& sampler = Sampler());
|
||||||
|
|
||||||
// After the texture has been created, it should be defined
|
// After the texture has been created, it should be defined
|
||||||
bool isDefined() const { return _defined; }
|
bool isDefined() const { return _defined; }
|
||||||
|
@ -435,6 +435,8 @@ public:
|
||||||
// For convenience assign a source name
|
// For convenience assign a source name
|
||||||
const std::string& source() const { return _source; }
|
const std::string& source() const { return _source; }
|
||||||
void setSource(const std::string& source) { _source = source; }
|
void setSource(const std::string& source) { _source = source; }
|
||||||
|
const std::string& sourceHash() const { return _sourceHash; }
|
||||||
|
void setSourceHash(const std::string& sourceHash) { _sourceHash = sourceHash; }
|
||||||
|
|
||||||
// Potentially change the minimum mip (mostly for debugging purpose)
|
// Potentially change the minimum mip (mostly for debugging purpose)
|
||||||
bool setMinMip(uint16 newMinMip);
|
bool setMinMip(uint16 newMinMip);
|
||||||
|
@ -482,6 +484,7 @@ public:
|
||||||
// For Cube Texture, it's possible to generate the irradiance spherical harmonics and make them availalbe with the texture
|
// For Cube Texture, it's possible to generate the irradiance spherical harmonics and make them availalbe with the texture
|
||||||
bool generateIrradiance();
|
bool generateIrradiance();
|
||||||
const SHPointer& getIrradiance(uint16 slice = 0) const { return _irradiance; }
|
const SHPointer& getIrradiance(uint16 slice = 0) const { return _irradiance; }
|
||||||
|
void overrideIrradiance(SHPointer irradiance) { _irradiance = irradiance; }
|
||||||
bool isIrradianceValid() const { return _isIrradianceValid; }
|
bool isIrradianceValid() const { return _isIrradianceValid; }
|
||||||
|
|
||||||
// Own sampler
|
// Own sampler
|
||||||
|
@ -502,7 +505,7 @@ public:
|
||||||
|
|
||||||
// Textures can be serialized directly to ktx data file, here is how
|
// Textures can be serialized directly to ktx data file, here is how
|
||||||
static ktx::KTXUniquePointer serialize(const Texture& texture);
|
static ktx::KTXUniquePointer serialize(const Texture& texture);
|
||||||
static Texture* unserialize(const std::string& ktxFile, TextureUsageType usageType = TextureUsageType::RESOURCE, Usage usage = Usage(), const Sampler::Desc& sampler = Sampler::Desc());
|
static TexturePointer unserialize(const std::string& ktxFile, TextureUsageType usageType = TextureUsageType::RESOURCE, Usage usage = Usage(), const Sampler::Desc& sampler = Sampler::Desc());
|
||||||
static bool evalKTXFormat(const Element& mipFormat, const Element& texelFormat, ktx::Header& header);
|
static bool evalKTXFormat(const Element& mipFormat, const Element& texelFormat, ktx::Header& header);
|
||||||
static bool evalTextureFormat(const ktx::Header& header, Element& mipFormat, Element& texelFormat);
|
static bool evalTextureFormat(const ktx::Header& header, Element& mipFormat, Element& texelFormat);
|
||||||
|
|
||||||
|
@ -518,6 +521,7 @@ protected:
|
||||||
std::weak_ptr<Texture> _fallback;
|
std::weak_ptr<Texture> _fallback;
|
||||||
// Not strictly necessary, but incredibly useful for debugging
|
// Not strictly necessary, but incredibly useful for debugging
|
||||||
std::string _source;
|
std::string _source;
|
||||||
|
std::string _sourceHash;
|
||||||
std::unique_ptr< Storage > _storage;
|
std::unique_ptr< Storage > _storage;
|
||||||
|
|
||||||
Stamp _stamp { 0 };
|
Stamp _stamp { 0 };
|
||||||
|
@ -552,7 +556,7 @@ protected:
|
||||||
bool _isIrradianceValid = false;
|
bool _isIrradianceValid = false;
|
||||||
bool _defined = false;
|
bool _defined = false;
|
||||||
|
|
||||||
static Texture* create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler);
|
static TexturePointer create(TextureUsageType usageType, Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips, const Sampler& sampler);
|
||||||
|
|
||||||
Size resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips);
|
Size resize(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, uint16 numMips);
|
||||||
};
|
};
|
||||||
|
|
|
@ -23,6 +23,7 @@ struct GPUKTXPayload {
|
||||||
Texture::Usage _usage;
|
Texture::Usage _usage;
|
||||||
TextureUsageType _usageType;
|
TextureUsageType _usageType;
|
||||||
|
|
||||||
|
|
||||||
static std::string KEY;
|
static std::string KEY;
|
||||||
static bool isGPUKTX(const ktx::KeyValue& val) {
|
static bool isGPUKTX(const ktx::KeyValue& val) {
|
||||||
return (val._key.compare(KEY) == 0);
|
return (val._key.compare(KEY) == 0);
|
||||||
|
@ -161,7 +162,13 @@ ktx::KTXUniquePointer Texture::serialize(const Texture& texture) {
|
||||||
keyval._usage = texture.getUsage();
|
keyval._usage = texture.getUsage();
|
||||||
keyval._usageType = texture.getUsageType();
|
keyval._usageType = texture.getUsageType();
|
||||||
ktx::KeyValues keyValues;
|
ktx::KeyValues keyValues;
|
||||||
keyValues.emplace_back(ktx::KeyValue(GPUKTXPayload::KEY, sizeof(GPUKTXPayload), (ktx::Byte*) &keyval));
|
keyValues.emplace_back(ktx::KeyValue(GPUKTXPayload::KEY, sizeof(GPUKTXPayload), (ktx::Byte*) &keyval));
|
||||||
|
|
||||||
|
static const std::string SOURCE_HASH_KEY = "hifi.sourceHash";
|
||||||
|
auto hash = texture.sourceHash();
|
||||||
|
if (!hash.empty()) {
|
||||||
|
keyValues.emplace_back(ktx::KeyValue(SOURCE_HASH_KEY, static_cast<uint32>(hash.size()), (ktx::Byte*) hash.c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
auto ktxBuffer = ktx::KTX::create(header, images, keyValues);
|
auto ktxBuffer = ktx::KTX::create(header, images, keyValues);
|
||||||
#if 0
|
#if 0
|
||||||
|
@ -193,7 +200,7 @@ ktx::KTXUniquePointer Texture::serialize(const Texture& texture) {
|
||||||
return ktxBuffer;
|
return ktxBuffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture* Texture::unserialize(const std::string& ktxfile, TextureUsageType usageType, Usage usage, const Sampler::Desc& sampler) {
|
TexturePointer Texture::unserialize(const std::string& ktxfile, TextureUsageType usageType, Usage usage, const Sampler::Desc& sampler) {
|
||||||
std::unique_ptr<ktx::KTX> ktxPointer = ktx::KTX::create(ktx::StoragePointer { new storage::FileStorage(ktxfile.c_str()) });
|
std::unique_ptr<ktx::KTX> ktxPointer = ktx::KTX::create(ktx::StoragePointer { new storage::FileStorage(ktxfile.c_str()) });
|
||||||
if (!ktxPointer) {
|
if (!ktxPointer) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -260,6 +267,16 @@ bool Texture::evalKTXFormat(const Element& mipFormat, const Element& texelFormat
|
||||||
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::RGBA, ktx::GLInternalFormat_Uncompressed::SRGB8_ALPHA8, ktx::GLBaseInternalFormat::RGBA);
|
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::RGBA, ktx::GLInternalFormat_Uncompressed::SRGB8_ALPHA8, ktx::GLBaseInternalFormat::RGBA);
|
||||||
} else if (texelFormat == Format::COLOR_R_8 && mipFormat == Format::COLOR_R_8) {
|
} else if (texelFormat == Format::COLOR_R_8 && mipFormat == Format::COLOR_R_8) {
|
||||||
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::RED, ktx::GLInternalFormat_Uncompressed::R8, ktx::GLBaseInternalFormat::RED);
|
header.setUncompressed(ktx::GLType::UNSIGNED_BYTE, 1, ktx::GLFormat::RED, ktx::GLInternalFormat_Uncompressed::R8, ktx::GLBaseInternalFormat::RED);
|
||||||
|
} else if (texelFormat == Format::COLOR_COMPRESSED_SRGB && mipFormat == Format::COLOR_COMPRESSED_SRGB) {
|
||||||
|
header.setCompressed(ktx::GLInternalFormat_Compressed::COMPRESSED_SRGB_S3TC_DXT1_EXT, ktx::GLBaseInternalFormat::RGB);
|
||||||
|
} else if (texelFormat == Format::COLOR_COMPRESSED_SRGBA_MASK && mipFormat == Format::COLOR_COMPRESSED_SRGBA_MASK) {
|
||||||
|
header.setCompressed(ktx::GLInternalFormat_Compressed::COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, ktx::GLBaseInternalFormat::RGBA);
|
||||||
|
} else if (texelFormat == Format::COLOR_COMPRESSED_SRGBA && mipFormat == Format::COLOR_COMPRESSED_SRGBA) {
|
||||||
|
header.setCompressed(ktx::GLInternalFormat_Compressed::COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, ktx::GLBaseInternalFormat::RGBA);
|
||||||
|
} else if (texelFormat == Format::COLOR_COMPRESSED_RED && mipFormat == Format::COLOR_COMPRESSED_RED) {
|
||||||
|
header.setCompressed(ktx::GLInternalFormat_Compressed::COMPRESSED_RED_RGTC1, ktx::GLBaseInternalFormat::RED);
|
||||||
|
} else if (texelFormat == Format::COLOR_COMPRESSED_XY && mipFormat == Format::COLOR_COMPRESSED_XY) {
|
||||||
|
header.setCompressed(ktx::GLInternalFormat_Compressed::COMPRESSED_RG_RGTC2, ktx::GLBaseInternalFormat::RG);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -295,6 +312,25 @@ bool Texture::evalTextureFormat(const ktx::Header& header, Element& mipFormat, E
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
} else if (header.getGLFormat() == ktx::GLFormat::COMPRESSED_FORMAT && header.getGLType() == ktx::GLType::COMPRESSED_TYPE) {
|
||||||
|
if (header.getGLInternaFormat_Compressed() == ktx::GLInternalFormat_Compressed::COMPRESSED_SRGB_S3TC_DXT1_EXT) {
|
||||||
|
mipFormat = Format::COLOR_COMPRESSED_SRGB;
|
||||||
|
texelFormat = Format::COLOR_COMPRESSED_SRGB;
|
||||||
|
} else if (header.getGLInternaFormat_Compressed() == ktx::GLInternalFormat_Compressed::COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT) {
|
||||||
|
mipFormat = Format::COLOR_COMPRESSED_SRGBA_MASK;
|
||||||
|
texelFormat = Format::COLOR_COMPRESSED_SRGBA_MASK;
|
||||||
|
} else if (header.getGLInternaFormat_Compressed() == ktx::GLInternalFormat_Compressed::COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT) {
|
||||||
|
mipFormat = Format::COLOR_COMPRESSED_SRGBA;
|
||||||
|
texelFormat = Format::COLOR_COMPRESSED_SRGBA;
|
||||||
|
} else if (header.getGLInternaFormat_Compressed() == ktx::GLInternalFormat_Compressed::COMPRESSED_RED_RGTC1) {
|
||||||
|
mipFormat = Format::COLOR_COMPRESSED_RED;
|
||||||
|
texelFormat = Format::COLOR_COMPRESSED_RED;
|
||||||
|
} else if (header.getGLInternaFormat_Compressed() == ktx::GLInternalFormat_Compressed::COMPRESSED_RG_RGTC2) {
|
||||||
|
mipFormat = Format::COLOR_COMPRESSED_XY;
|
||||||
|
texelFormat = Format::COLOR_COMPRESSED_XY;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
11
libraries/image/CMakeLists.txt
Normal file
11
libraries/image/CMakeLists.txt
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
set(TARGET_NAME image)
|
||||||
|
setup_hifi_library()
|
||||||
|
link_hifi_libraries(shared gpu)
|
||||||
|
|
||||||
|
target_glm()
|
||||||
|
|
||||||
|
add_dependency_external_projects(nvtt)
|
||||||
|
find_package(NVTT REQUIRED)
|
||||||
|
target_include_directories(${TARGET_NAME} PRIVATE ${NVTT_INCLUDE_DIRS})
|
||||||
|
target_link_libraries(${TARGET_NAME} ${NVTT_LIBRARIES})
|
||||||
|
add_paths_to_fixup_libs(${NVTT_DLL_PATH})
|
934
libraries/image/src/image/Image.cpp
Normal file
934
libraries/image/src/image/Image.cpp
Normal file
|
@ -0,0 +1,934 @@
|
||||||
|
//
|
||||||
|
// Image.cpp
|
||||||
|
// image/src/image
|
||||||
|
//
|
||||||
|
// Created by Clement Brisset on 4/5/2017.
|
||||||
|
// Copyright 2017 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "Image.h"
|
||||||
|
|
||||||
|
#include <nvtt/nvtt.h>
|
||||||
|
|
||||||
|
#include <QUrl>
|
||||||
|
#include <QImage>
|
||||||
|
#include <QBuffer>
|
||||||
|
#include <QImageReader>
|
||||||
|
|
||||||
|
#include <Finally.h>
|
||||||
|
#include <Profile.h>
|
||||||
|
#include <StatTracker.h>
|
||||||
|
#include <GLMHelpers.h>
|
||||||
|
|
||||||
|
#include "ImageLogging.h"
|
||||||
|
|
||||||
|
using namespace gpu;
|
||||||
|
|
||||||
|
#define CPU_MIPMAPS 1
|
||||||
|
#define COMPRESS_COLOR_TEXTURES 0
|
||||||
|
#define COMPRESS_NORMALMAP_TEXTURES 0 // Disable Normalmap compression for now
|
||||||
|
#define COMPRESS_GRAYSCALE_TEXTURES 0
|
||||||
|
#define COMPRESS_CUBEMAP_TEXTURES 0 // Disable Cubemap compression for now
|
||||||
|
|
||||||
|
static const glm::uvec2 SPARSE_PAGE_SIZE(128);
|
||||||
|
static const glm::uvec2 MAX_TEXTURE_SIZE(4096);
|
||||||
|
bool DEV_DECIMATE_TEXTURES = false;
|
||||||
|
std::atomic<size_t> DECIMATED_TEXTURE_COUNT{ 0 };
|
||||||
|
std::atomic<size_t> RECTIFIED_TEXTURE_COUNT{ 0 };
|
||||||
|
|
||||||
|
bool needsSparseRectification(const glm::uvec2& size) {
|
||||||
|
// Don't attempt to rectify small textures (textures less than the sparse page size in any dimension)
|
||||||
|
if (glm::any(glm::lessThan(size, SPARSE_PAGE_SIZE))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't rectify textures that are already an exact multiple of sparse page size
|
||||||
|
if (glm::uvec2(0) == (size % SPARSE_PAGE_SIZE)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Texture is not sparse compatible, but is bigger than the sparse page size in both dimensions, rectify!
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
glm::uvec2 rectifyToSparseSize(const glm::uvec2& size) {
|
||||||
|
glm::uvec2 pages = ((size / SPARSE_PAGE_SIZE) + glm::clamp(size % SPARSE_PAGE_SIZE, glm::uvec2(0), glm::uvec2(1)));
|
||||||
|
glm::uvec2 result = pages * SPARSE_PAGE_SIZE;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
namespace image {
|
||||||
|
|
||||||
|
TextureUsage::TextureLoader TextureUsage::getTextureLoaderForType(Type type, const QVariantMap& options) {
|
||||||
|
switch (type) {
|
||||||
|
case ALBEDO_TEXTURE:
|
||||||
|
return image::TextureUsage::createAlbedoTextureFromImage;
|
||||||
|
case EMISSIVE_TEXTURE:
|
||||||
|
return image::TextureUsage::createEmissiveTextureFromImage;
|
||||||
|
case LIGHTMAP_TEXTURE:
|
||||||
|
return image::TextureUsage::createLightmapTextureFromImage;
|
||||||
|
case CUBE_TEXTURE:
|
||||||
|
if (options.value("generateIrradiance", true).toBool()) {
|
||||||
|
return image::TextureUsage::createCubeTextureFromImage;
|
||||||
|
} else {
|
||||||
|
return image::TextureUsage::createCubeTextureFromImageWithoutIrradiance;
|
||||||
|
}
|
||||||
|
case BUMP_TEXTURE:
|
||||||
|
return image::TextureUsage::createNormalTextureFromBumpImage;
|
||||||
|
case NORMAL_TEXTURE:
|
||||||
|
return image::TextureUsage::createNormalTextureFromNormalImage;
|
||||||
|
case ROUGHNESS_TEXTURE:
|
||||||
|
return image::TextureUsage::createRoughnessTextureFromImage;
|
||||||
|
case GLOSS_TEXTURE:
|
||||||
|
return image::TextureUsage::createRoughnessTextureFromGlossImage;
|
||||||
|
case SPECULAR_TEXTURE:
|
||||||
|
return image::TextureUsage::createMetallicTextureFromImage;
|
||||||
|
case STRICT_TEXTURE:
|
||||||
|
return image::TextureUsage::createStrict2DTextureFromImage;
|
||||||
|
|
||||||
|
case DEFAULT_TEXTURE:
|
||||||
|
default:
|
||||||
|
return image::TextureUsage::create2DTextureFromImage;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createStrict2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureColorFromImage(srcImage, srcImageName, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::create2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureColorFromImage(srcImage, srcImageName, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createAlbedoTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureColorFromImage(srcImage, srcImageName, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createEmissiveTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureColorFromImage(srcImage, srcImageName, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createLightmapTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureColorFromImage(srcImage, srcImageName, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createNormalTextureFromNormalImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureNormalMapFromImage(srcImage, srcImageName, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createNormalTextureFromBumpImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureNormalMapFromImage(srcImage, srcImageName, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createRoughnessTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureGrayscaleFromImage(srcImage, srcImageName, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createRoughnessTextureFromGlossImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureGrayscaleFromImage(srcImage, srcImageName, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createMetallicTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return process2DTextureGrayscaleFromImage(srcImage, srcImageName, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createCubeTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return processCubeTextureColorFromImage(srcImage, srcImageName, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::createCubeTextureFromImageWithoutIrradiance(const QImage& srcImage, const std::string& srcImageName) {
|
||||||
|
return processCubeTextureColorFromImage(srcImage, srcImageName, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer processImage(const QByteArray& content, const std::string& filename, int maxNumPixels, TextureUsage::Type textureType) {
|
||||||
|
// Help the QImage loader by extracting the image file format from the url filename ext.
|
||||||
|
// Some tga are not created properly without it.
|
||||||
|
auto filenameExtension = filename.substr(filename.find_last_of('.') + 1);
|
||||||
|
QBuffer buffer;
|
||||||
|
buffer.setData(content);
|
||||||
|
QImageReader imageReader(&buffer, filenameExtension.c_str());
|
||||||
|
QImage image;
|
||||||
|
|
||||||
|
if (imageReader.canRead()) {
|
||||||
|
image = imageReader.read();
|
||||||
|
} else {
|
||||||
|
// Extension could be incorrect, try to detect the format from the content
|
||||||
|
QImageReader newImageReader;
|
||||||
|
newImageReader.setDecideFormatFromContent(true);
|
||||||
|
buffer.setData(content);
|
||||||
|
newImageReader.setDevice(&buffer);
|
||||||
|
|
||||||
|
if (newImageReader.canRead()) {
|
||||||
|
qCWarning(imagelogging) << "Image file" << filename.c_str() << "has extension" << filenameExtension.c_str()
|
||||||
|
<< "but is actually a" << qPrintable(newImageReader.format()) << "(recovering)";
|
||||||
|
image = newImageReader.read();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int imageWidth = image.width();
|
||||||
|
int imageHeight = image.height();
|
||||||
|
|
||||||
|
// Validate that the image loaded
|
||||||
|
if (imageWidth == 0 || imageHeight == 0 || image.format() == QImage::Format_Invalid) {
|
||||||
|
QString reason(image.format() == QImage::Format_Invalid ? "(Invalid Format)" : "(Size is invalid)");
|
||||||
|
qCWarning(imagelogging) << "Failed to load" << filename.c_str() << qPrintable(reason);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate the image is less than _maxNumPixels, and downscale if necessary
|
||||||
|
if (imageWidth * imageHeight > maxNumPixels) {
|
||||||
|
float scaleFactor = sqrtf(maxNumPixels / (float)(imageWidth * imageHeight));
|
||||||
|
int originalWidth = imageWidth;
|
||||||
|
int originalHeight = imageHeight;
|
||||||
|
imageWidth = (int)(scaleFactor * (float)imageWidth + 0.5f);
|
||||||
|
imageHeight = (int)(scaleFactor * (float)imageHeight + 0.5f);
|
||||||
|
QImage newImage = image.scaled(QSize(imageWidth, imageHeight), Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||||
|
image.swap(newImage);
|
||||||
|
qCDebug(imagelogging).nospace() << "Downscaled " << filename.c_str() << " (" <<
|
||||||
|
QSize(originalWidth, originalHeight) << " to " <<
|
||||||
|
QSize(imageWidth, imageHeight) << ")";
|
||||||
|
}
|
||||||
|
|
||||||
|
auto loader = TextureUsage::getTextureLoaderForType(textureType);
|
||||||
|
auto texture = loader(image, filename);
|
||||||
|
|
||||||
|
return texture;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
QImage processSourceImage(const QImage& srcImage, bool cubemap) {
|
||||||
|
PROFILE_RANGE(resource_parse, "processSourceImage");
|
||||||
|
const glm::uvec2 srcImageSize = toGlm(srcImage.size());
|
||||||
|
glm::uvec2 targetSize = srcImageSize;
|
||||||
|
|
||||||
|
while (glm::any(glm::greaterThan(targetSize, MAX_TEXTURE_SIZE))) {
|
||||||
|
targetSize /= 2;
|
||||||
|
}
|
||||||
|
if (targetSize != srcImageSize) {
|
||||||
|
++DECIMATED_TEXTURE_COUNT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!cubemap && needsSparseRectification(targetSize)) {
|
||||||
|
++RECTIFIED_TEXTURE_COUNT;
|
||||||
|
targetSize = rectifyToSparseSize(targetSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (DEV_DECIMATE_TEXTURES && glm::all(glm::greaterThanEqual(targetSize / SPARSE_PAGE_SIZE, glm::uvec2(2)))) {
|
||||||
|
targetSize /= 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (targetSize != srcImageSize) {
|
||||||
|
PROFILE_RANGE(resource_parse, "processSourceImage Rectify");
|
||||||
|
qCDebug(imagelogging) << "Resizing texture from " << srcImageSize.x << "x" << srcImageSize.y << " to " << targetSize.x << "x" << targetSize.y;
|
||||||
|
return srcImage.scaled(fromGlm(targetSize), Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||||
|
}
|
||||||
|
|
||||||
|
return srcImage;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MyOutputHandler : public nvtt::OutputHandler {
|
||||||
|
MyOutputHandler(gpu::Texture* texture, int face) : _texture(texture), _face(face) {}
|
||||||
|
|
||||||
|
virtual void beginImage(int size, int width, int height, int depth, int face, int miplevel) override {
|
||||||
|
_size = size;
|
||||||
|
_miplevel = miplevel;
|
||||||
|
|
||||||
|
_data = static_cast<gpu::Byte*>(malloc(size));
|
||||||
|
_current = _data;
|
||||||
|
}
|
||||||
|
virtual bool writeData(const void* data, int size) override {
|
||||||
|
assert(_current + size <= _data + _size);
|
||||||
|
memcpy(_current, data, size);
|
||||||
|
_current += size;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
virtual void endImage() override {
|
||||||
|
if (_face >= 0) {
|
||||||
|
_texture->assignStoredMipFace(_miplevel, _face, _size, static_cast<const gpu::Byte*>(_data));
|
||||||
|
} else {
|
||||||
|
_texture->assignStoredMip(_miplevel, _size, static_cast<const gpu::Byte*>(_data));
|
||||||
|
}
|
||||||
|
free(_data);
|
||||||
|
_data = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::Byte* _data{ nullptr };
|
||||||
|
gpu::Byte* _current{ nullptr };
|
||||||
|
gpu::Texture* _texture{ nullptr };
|
||||||
|
int _miplevel = 0;
|
||||||
|
int _size = 0;
|
||||||
|
int _face = -1;
|
||||||
|
};
|
||||||
|
struct MyErrorHandler : public nvtt::ErrorHandler {
|
||||||
|
virtual void error(nvtt::Error e) override {
|
||||||
|
qCWarning(imagelogging) << "Texture compression error:" << nvtt::errorString(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void generateMips(gpu::Texture* texture, QImage& image, int face = -1) {
|
||||||
|
#if CPU_MIPMAPS
|
||||||
|
PROFILE_RANGE(resource_parse, "generateMips");
|
||||||
|
|
||||||
|
if (image.format() != QImage::Format_ARGB32) {
|
||||||
|
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||||
|
}
|
||||||
|
|
||||||
|
const int width = image.width(), height = image.height();
|
||||||
|
const void* data = static_cast<const void*>(image.constBits());
|
||||||
|
|
||||||
|
nvtt::TextureType textureType = nvtt::TextureType_2D;
|
||||||
|
nvtt::InputFormat inputFormat = nvtt::InputFormat_BGRA_8UB;
|
||||||
|
nvtt::WrapMode wrapMode = nvtt::WrapMode_Repeat;
|
||||||
|
nvtt::RoundMode roundMode = nvtt::RoundMode_None;
|
||||||
|
nvtt::AlphaMode alphaMode = nvtt::AlphaMode_None;
|
||||||
|
|
||||||
|
float inputGamma = 2.2f;
|
||||||
|
float outputGamma = 2.2f;
|
||||||
|
|
||||||
|
nvtt::CompressionOptions compressionOptions;
|
||||||
|
compressionOptions.setQuality(nvtt::Quality_Production);
|
||||||
|
|
||||||
|
auto mipFormat = texture->getStoredMipFormat();
|
||||||
|
if (mipFormat == gpu::Element::COLOR_COMPRESSED_SRGB) {
|
||||||
|
compressionOptions.setFormat(nvtt::Format_BC1);
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_COMPRESSED_SRGBA_MASK) {
|
||||||
|
alphaMode = nvtt::AlphaMode_Transparency;
|
||||||
|
compressionOptions.setFormat(nvtt::Format_BC1a);
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_COMPRESSED_SRGBA) {
|
||||||
|
alphaMode = nvtt::AlphaMode_Transparency;
|
||||||
|
compressionOptions.setFormat(nvtt::Format_BC3);
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_COMPRESSED_RED) {
|
||||||
|
compressionOptions.setFormat(nvtt::Format_BC4);
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_COMPRESSED_XY) {
|
||||||
|
compressionOptions.setFormat(nvtt::Format_BC5);
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_RGBA_32) {
|
||||||
|
compressionOptions.setFormat(nvtt::Format_RGBA);
|
||||||
|
compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm);
|
||||||
|
compressionOptions.setPixelFormat(32,
|
||||||
|
0x000000FF,
|
||||||
|
0x0000FF00,
|
||||||
|
0x00FF0000,
|
||||||
|
0xFF000000);
|
||||||
|
inputGamma = 1.0f;
|
||||||
|
outputGamma = 1.0f;
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_BGRA_32) {
|
||||||
|
compressionOptions.setFormat(nvtt::Format_RGBA);
|
||||||
|
compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm);
|
||||||
|
compressionOptions.setPixelFormat(32,
|
||||||
|
0x00FF0000,
|
||||||
|
0x0000FF00,
|
||||||
|
0x000000FF,
|
||||||
|
0xFF000000);
|
||||||
|
inputGamma = 1.0f;
|
||||||
|
outputGamma = 1.0f;
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_SRGBA_32) {
|
||||||
|
compressionOptions.setFormat(nvtt::Format_RGBA);
|
||||||
|
compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm);
|
||||||
|
compressionOptions.setPixelFormat(32,
|
||||||
|
0x000000FF,
|
||||||
|
0x0000FF00,
|
||||||
|
0x00FF0000,
|
||||||
|
0xFF000000);
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_SBGRA_32) {
|
||||||
|
compressionOptions.setFormat(nvtt::Format_RGBA);
|
||||||
|
compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm);
|
||||||
|
compressionOptions.setPixelFormat(32,
|
||||||
|
0x00FF0000,
|
||||||
|
0x0000FF00,
|
||||||
|
0x000000FF,
|
||||||
|
0xFF000000);
|
||||||
|
} else if (mipFormat == gpu::Element::COLOR_R_8) {
|
||||||
|
compressionOptions.setFormat(nvtt::Format_RGB);
|
||||||
|
compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm);
|
||||||
|
compressionOptions.setPixelFormat(8, 0, 0, 0);
|
||||||
|
} else {
|
||||||
|
qCWarning(imagelogging) << "Unknown mip format";
|
||||||
|
Q_UNREACHABLE();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
nvtt::InputOptions inputOptions;
|
||||||
|
inputOptions.setTextureLayout(textureType, width, height);
|
||||||
|
inputOptions.setMipmapData(data, width, height);
|
||||||
|
|
||||||
|
inputOptions.setFormat(inputFormat);
|
||||||
|
inputOptions.setGamma(inputGamma, outputGamma);
|
||||||
|
inputOptions.setAlphaMode(alphaMode);
|
||||||
|
inputOptions.setWrapMode(wrapMode);
|
||||||
|
inputOptions.setRoundMode(roundMode);
|
||||||
|
|
||||||
|
inputOptions.setMipmapGeneration(true);
|
||||||
|
inputOptions.setMipmapFilter(nvtt::MipmapFilter_Box);
|
||||||
|
|
||||||
|
nvtt::OutputOptions outputOptions;
|
||||||
|
outputOptions.setOutputHeader(false);
|
||||||
|
MyOutputHandler outputHandler(texture, face);
|
||||||
|
outputOptions.setOutputHandler(&outputHandler);
|
||||||
|
MyErrorHandler errorHandler;
|
||||||
|
outputOptions.setErrorHandler(&errorHandler);
|
||||||
|
|
||||||
|
nvtt::Compressor compressor;
|
||||||
|
compressor.process(inputOptions, compressionOptions, outputOptions);
|
||||||
|
#else
|
||||||
|
texture->autoGenerateMips(-1);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void processTextureAlpha(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask) {
|
||||||
|
PROFILE_RANGE(resource_parse, "processTextureAlpha");
|
||||||
|
validAlpha = false;
|
||||||
|
alphaAsMask = true;
|
||||||
|
const uint8 OPAQUE_ALPHA = 255;
|
||||||
|
const uint8 TRANSPARENT_ALPHA = 0;
|
||||||
|
|
||||||
|
// Figure out if we can use a mask for alpha or not
|
||||||
|
int numOpaques = 0;
|
||||||
|
int numTranslucents = 0;
|
||||||
|
const int NUM_PIXELS = srcImage.width() * srcImage.height();
|
||||||
|
const int MAX_TRANSLUCENT_PIXELS_FOR_ALPHAMASK = (int)(0.05f * (float)(NUM_PIXELS));
|
||||||
|
const QRgb* data = reinterpret_cast<const QRgb*>(srcImage.constBits());
|
||||||
|
for (int i = 0; i < NUM_PIXELS; ++i) {
|
||||||
|
auto alpha = qAlpha(data[i]);
|
||||||
|
if (alpha == OPAQUE_ALPHA) {
|
||||||
|
numOpaques++;
|
||||||
|
} else if (alpha != TRANSPARENT_ALPHA) {
|
||||||
|
if (++numTranslucents > MAX_TRANSLUCENT_PIXELS_FOR_ALPHAMASK) {
|
||||||
|
alphaAsMask = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
validAlpha = (numOpaques != NUM_PIXELS);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isStrict) {
|
||||||
|
PROFILE_RANGE(resource_parse, "process2DTextureColorFromImage");
|
||||||
|
QImage image = processSourceImage(srcImage, false);
|
||||||
|
bool validAlpha = image.hasAlphaChannel();
|
||||||
|
bool alphaAsMask = false;
|
||||||
|
|
||||||
|
if (image.format() != QImage::Format_ARGB32) {
|
||||||
|
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (validAlpha) {
|
||||||
|
processTextureAlpha(image, validAlpha, alphaAsMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer theTexture = nullptr;
|
||||||
|
|
||||||
|
if ((image.width() > 0) && (image.height() > 0)) {
|
||||||
|
#if CPU_MIPMAPS && COMPRESS_COLOR_TEXTURES
|
||||||
|
gpu::Element formatGPU;
|
||||||
|
if (validAlpha) {
|
||||||
|
formatGPU = alphaAsMask ? gpu::Element::COLOR_COMPRESSED_SRGBA_MASK : gpu::Element::COLOR_COMPRESSED_SRGBA;
|
||||||
|
} else {
|
||||||
|
formatGPU = gpu::Element::COLOR_COMPRESSED_SRGB;
|
||||||
|
}
|
||||||
|
gpu::Element formatMip = formatGPU;
|
||||||
|
#else
|
||||||
|
gpu::Element formatMip = gpu::Element::COLOR_SBGRA_32;
|
||||||
|
gpu::Element formatGPU = gpu::Element::COLOR_SRGBA_32;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (isStrict) {
|
||||||
|
theTexture = gpu::Texture::createStrict(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR));
|
||||||
|
} else {
|
||||||
|
theTexture = gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR));
|
||||||
|
}
|
||||||
|
theTexture->setSource(srcImageName);
|
||||||
|
auto usage = gpu::Texture::Usage::Builder().withColor();
|
||||||
|
if (validAlpha) {
|
||||||
|
usage.withAlpha();
|
||||||
|
if (alphaAsMask) {
|
||||||
|
usage.withAlphaMask();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
theTexture->setUsage(usage.build());
|
||||||
|
theTexture->setStoredMipFormat(formatMip);
|
||||||
|
generateMips(theTexture.get(), image);
|
||||||
|
}
|
||||||
|
|
||||||
|
return theTexture;
|
||||||
|
}
|
||||||
|
|
||||||
|
int clampPixelCoordinate(int coordinate, int maxCoordinate) {
|
||||||
|
return coordinate - ((int)(coordinate < 0) * coordinate) + ((int)(coordinate > maxCoordinate) * (maxCoordinate - coordinate));
|
||||||
|
}
|
||||||
|
|
||||||
|
const int RGBA_MAX = 255;
|
||||||
|
|
||||||
|
// transform -1 - 1 to 0 - 255 (from sobel value to rgb)
|
||||||
|
double mapComponent(double sobelValue) {
|
||||||
|
const double factor = RGBA_MAX / 2.0;
|
||||||
|
return (sobelValue + 1.0) * factor;
|
||||||
|
}
|
||||||
|
|
||||||
|
QImage processBumpMap(QImage& image) {
|
||||||
|
if (image.format() != QImage::Format_Grayscale8) {
|
||||||
|
image = image.convertToFormat(QImage::Format_Grayscale8);
|
||||||
|
}
|
||||||
|
|
||||||
|
// PR 5540 by AlessandroSigna integrated here as a specialized TextureLoader for bumpmaps
|
||||||
|
// The conversion is done using the Sobel Filter to calculate the derivatives from the grayscale image
|
||||||
|
const double pStrength = 2.0;
|
||||||
|
int width = image.width();
|
||||||
|
int height = image.height();
|
||||||
|
|
||||||
|
QImage result(width, height, QImage::Format_ARGB32);
|
||||||
|
|
||||||
|
for (int i = 0; i < width; i++) {
|
||||||
|
const int iNextClamped = clampPixelCoordinate(i + 1, width - 1);
|
||||||
|
const int iPrevClamped = clampPixelCoordinate(i - 1, width - 1);
|
||||||
|
|
||||||
|
for (int j = 0; j < height; j++) {
|
||||||
|
const int jNextClamped = clampPixelCoordinate(j + 1, height - 1);
|
||||||
|
const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1);
|
||||||
|
|
||||||
|
// surrounding pixels
|
||||||
|
const QRgb topLeft = image.pixel(iPrevClamped, jPrevClamped);
|
||||||
|
const QRgb top = image.pixel(iPrevClamped, j);
|
||||||
|
const QRgb topRight = image.pixel(iPrevClamped, jNextClamped);
|
||||||
|
const QRgb right = image.pixel(i, jNextClamped);
|
||||||
|
const QRgb bottomRight = image.pixel(iNextClamped, jNextClamped);
|
||||||
|
const QRgb bottom = image.pixel(iNextClamped, j);
|
||||||
|
const QRgb bottomLeft = image.pixel(iNextClamped, jPrevClamped);
|
||||||
|
const QRgb left = image.pixel(i, jPrevClamped);
|
||||||
|
|
||||||
|
// take their gray intensities
|
||||||
|
// since it's a grayscale image, the value of each component RGB is the same
|
||||||
|
const double tl = qRed(topLeft);
|
||||||
|
const double t = qRed(top);
|
||||||
|
const double tr = qRed(topRight);
|
||||||
|
const double r = qRed(right);
|
||||||
|
const double br = qRed(bottomRight);
|
||||||
|
const double b = qRed(bottom);
|
||||||
|
const double bl = qRed(bottomLeft);
|
||||||
|
const double l = qRed(left);
|
||||||
|
|
||||||
|
// apply the sobel filter
|
||||||
|
const double dX = (tr + pStrength * r + br) - (tl + pStrength * l + bl);
|
||||||
|
const double dY = (bl + pStrength * b + br) - (tl + pStrength * t + tr);
|
||||||
|
const double dZ = RGBA_MAX / pStrength;
|
||||||
|
|
||||||
|
glm::vec3 v(dX, dY, dZ);
|
||||||
|
glm::normalize(v);
|
||||||
|
|
||||||
|
// convert to rgb from the value obtained computing the filter
|
||||||
|
QRgb qRgbValue = qRgba(mapComponent(v.z), mapComponent(v.y), mapComponent(v.x), 1.0);
|
||||||
|
result.setPixel(i, j, qRgbValue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(const QImage& srcImage, const std::string& srcImageName, bool isBumpMap) {
|
||||||
|
PROFILE_RANGE(resource_parse, "process2DTextureNormalMapFromImage");
|
||||||
|
QImage image = processSourceImage(srcImage, false);
|
||||||
|
|
||||||
|
if (isBumpMap) {
|
||||||
|
image = processBumpMap(image);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure the normal map source image is ARGB32
|
||||||
|
if (image.format() != QImage::Format_ARGB32) {
|
||||||
|
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer theTexture = nullptr;
|
||||||
|
if ((image.width() > 0) && (image.height() > 0)) {
|
||||||
|
|
||||||
|
#if CPU_MIPMAPS && COMPRESS_NORMALMAP_TEXTURES
|
||||||
|
gpu::Element formatMip = gpu::Element::COLOR_COMPRESSED_XY;
|
||||||
|
gpu::Element formatGPU = gpu::Element::COLOR_COMPRESSED_XY;
|
||||||
|
#else
|
||||||
|
gpu::Element formatMip = gpu::Element::COLOR_RGBA_32;
|
||||||
|
gpu::Element formatGPU = gpu::Element::COLOR_RGBA_32;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
theTexture = gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR));
|
||||||
|
theTexture->setSource(srcImageName);
|
||||||
|
theTexture->setStoredMipFormat(formatMip);
|
||||||
|
generateMips(theTexture.get(), image);
|
||||||
|
}
|
||||||
|
|
||||||
|
return theTexture;
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::process2DTextureGrayscaleFromImage(const QImage& srcImage, const std::string& srcImageName, bool isInvertedPixels) {
|
||||||
|
PROFILE_RANGE(resource_parse, "process2DTextureGrayscaleFromImage");
|
||||||
|
QImage image = processSourceImage(srcImage, false);
|
||||||
|
|
||||||
|
if (image.format() != QImage::Format_ARGB32) {
|
||||||
|
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isInvertedPixels) {
|
||||||
|
// Gloss turned into Rough
|
||||||
|
image.invertPixels(QImage::InvertRgba);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer theTexture = nullptr;
|
||||||
|
if ((image.width() > 0) && (image.height() > 0)) {
|
||||||
|
|
||||||
|
#if CPU_MIPMAPS && COMPRESS_GRAYSCALE_TEXTURES
|
||||||
|
gpu::Element formatMip = gpu::Element::COLOR_COMPRESSED_RED;
|
||||||
|
gpu::Element formatGPU = gpu::Element::COLOR_COMPRESSED_RED;
|
||||||
|
#else
|
||||||
|
gpu::Element formatMip = gpu::Element::COLOR_R_8;
|
||||||
|
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
theTexture = gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR));
|
||||||
|
theTexture->setSource(srcImageName);
|
||||||
|
theTexture->setStoredMipFormat(formatMip);
|
||||||
|
generateMips(theTexture.get(), image);
|
||||||
|
}
|
||||||
|
|
||||||
|
return theTexture;
|
||||||
|
}
|
||||||
|
|
||||||
|
class CubeLayout {
|
||||||
|
public:
|
||||||
|
|
||||||
|
enum SourceProjection {
|
||||||
|
FLAT = 0,
|
||||||
|
EQUIRECTANGULAR,
|
||||||
|
};
|
||||||
|
int _type = FLAT;
|
||||||
|
int _widthRatio = 1;
|
||||||
|
int _heightRatio = 1;
|
||||||
|
|
||||||
|
class Face {
|
||||||
|
public:
|
||||||
|
int _x = 0;
|
||||||
|
int _y = 0;
|
||||||
|
bool _horizontalMirror = false;
|
||||||
|
bool _verticalMirror = false;
|
||||||
|
|
||||||
|
Face() {}
|
||||||
|
Face(int x, int y, bool horizontalMirror, bool verticalMirror) : _x(x), _y(y), _horizontalMirror(horizontalMirror), _verticalMirror(verticalMirror) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
Face _faceXPos;
|
||||||
|
Face _faceXNeg;
|
||||||
|
Face _faceYPos;
|
||||||
|
Face _faceYNeg;
|
||||||
|
Face _faceZPos;
|
||||||
|
Face _faceZNeg;
|
||||||
|
|
||||||
|
CubeLayout(int wr, int hr, Face fXP, Face fXN, Face fYP, Face fYN, Face fZP, Face fZN) :
|
||||||
|
_type(FLAT),
|
||||||
|
_widthRatio(wr),
|
||||||
|
_heightRatio(hr),
|
||||||
|
_faceXPos(fXP),
|
||||||
|
_faceXNeg(fXN),
|
||||||
|
_faceYPos(fYP),
|
||||||
|
_faceYNeg(fYN),
|
||||||
|
_faceZPos(fZP),
|
||||||
|
_faceZNeg(fZN) {}
|
||||||
|
|
||||||
|
CubeLayout(int wr, int hr) :
|
||||||
|
_type(EQUIRECTANGULAR),
|
||||||
|
_widthRatio(wr),
|
||||||
|
_heightRatio(hr) {}
|
||||||
|
|
||||||
|
|
||||||
|
static const CubeLayout CUBEMAP_LAYOUTS[];
|
||||||
|
static const int NUM_CUBEMAP_LAYOUTS;
|
||||||
|
|
||||||
|
static int findLayout(int width, int height) {
|
||||||
|
// Find the layout of the cubemap in the 2D image
|
||||||
|
int foundLayout = -1;
|
||||||
|
for (int i = 0; i < NUM_CUBEMAP_LAYOUTS; i++) {
|
||||||
|
if ((height * CUBEMAP_LAYOUTS[i]._widthRatio) == (width * CUBEMAP_LAYOUTS[i]._heightRatio)) {
|
||||||
|
foundLayout = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return foundLayout;
|
||||||
|
}
|
||||||
|
|
||||||
|
static QImage extractEquirectangularFace(const QImage& source, gpu::Texture::CubeFace face, int faceWidth) {
|
||||||
|
QImage image(faceWidth, faceWidth, source.format());
|
||||||
|
|
||||||
|
glm::vec2 dstInvSize(1.0f / (float)image.width(), 1.0f / (float)image.height());
|
||||||
|
|
||||||
|
struct CubeToXYZ {
|
||||||
|
gpu::Texture::CubeFace _face;
|
||||||
|
CubeToXYZ(gpu::Texture::CubeFace face) : _face(face) {}
|
||||||
|
|
||||||
|
glm::vec3 xyzFrom(const glm::vec2& uv) {
|
||||||
|
auto faceDir = glm::normalize(glm::vec3(-1.0f + 2.0f * uv.x, -1.0f + 2.0f * uv.y, 1.0f));
|
||||||
|
|
||||||
|
switch (_face) {
|
||||||
|
case gpu::Texture::CubeFace::CUBE_FACE_BACK_POS_Z:
|
||||||
|
return glm::vec3(-faceDir.x, faceDir.y, faceDir.z);
|
||||||
|
case gpu::Texture::CubeFace::CUBE_FACE_FRONT_NEG_Z:
|
||||||
|
return glm::vec3(faceDir.x, faceDir.y, -faceDir.z);
|
||||||
|
case gpu::Texture::CubeFace::CUBE_FACE_LEFT_NEG_X:
|
||||||
|
return glm::vec3(faceDir.z, faceDir.y, faceDir.x);
|
||||||
|
case gpu::Texture::CubeFace::CUBE_FACE_RIGHT_POS_X:
|
||||||
|
return glm::vec3(-faceDir.z, faceDir.y, -faceDir.x);
|
||||||
|
case gpu::Texture::CubeFace::CUBE_FACE_BOTTOM_NEG_Y:
|
||||||
|
return glm::vec3(-faceDir.x, -faceDir.z, faceDir.y);
|
||||||
|
case gpu::Texture::CubeFace::CUBE_FACE_TOP_POS_Y:
|
||||||
|
default:
|
||||||
|
return glm::vec3(-faceDir.x, faceDir.z, -faceDir.y);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
CubeToXYZ cubeToXYZ(face);
|
||||||
|
|
||||||
|
struct RectToXYZ {
|
||||||
|
RectToXYZ() {}
|
||||||
|
|
||||||
|
glm::vec2 uvFrom(const glm::vec3& xyz) {
|
||||||
|
auto flatDir = glm::normalize(glm::vec2(xyz.x, xyz.z));
|
||||||
|
auto uvRad = glm::vec2(atan2(flatDir.x, flatDir.y), asin(xyz.y));
|
||||||
|
|
||||||
|
const float LON_TO_RECT_U = 1.0f / (glm::pi<float>());
|
||||||
|
const float LAT_TO_RECT_V = 2.0f / glm::pi<float>();
|
||||||
|
return glm::vec2(0.5f * uvRad.x * LON_TO_RECT_U + 0.5f, 0.5f * uvRad.y * LAT_TO_RECT_V + 0.5f);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
RectToXYZ rectToXYZ;
|
||||||
|
|
||||||
|
int srcFaceHeight = source.height();
|
||||||
|
int srcFaceWidth = source.width();
|
||||||
|
|
||||||
|
glm::vec2 dstCoord;
|
||||||
|
glm::ivec2 srcPixel;
|
||||||
|
for (int y = 0; y < faceWidth; ++y) {
|
||||||
|
dstCoord.y = 1.0f - (y + 0.5f) * dstInvSize.y; // Fill cube face images from top to bottom
|
||||||
|
for (int x = 0; x < faceWidth; ++x) {
|
||||||
|
dstCoord.x = (x + 0.5f) * dstInvSize.x;
|
||||||
|
|
||||||
|
auto xyzDir = cubeToXYZ.xyzFrom(dstCoord);
|
||||||
|
auto srcCoord = rectToXYZ.uvFrom(xyzDir);
|
||||||
|
|
||||||
|
srcPixel.x = floor(srcCoord.x * srcFaceWidth);
|
||||||
|
// Flip the vertical axis to QImage going top to bottom
|
||||||
|
srcPixel.y = floor((1.0f - srcCoord.y) * srcFaceHeight);
|
||||||
|
|
||||||
|
if (((uint32)srcPixel.x < (uint32)source.width()) && ((uint32)srcPixel.y < (uint32)source.height())) {
|
||||||
|
image.setPixel(x, y, source.pixel(QPoint(srcPixel.x, srcPixel.y)));
|
||||||
|
|
||||||
|
// Keep for debug, this is showing the dir as a color
|
||||||
|
// glm::u8vec4 rgba((xyzDir.x + 1.0)*0.5 * 256, (xyzDir.y + 1.0)*0.5 * 256, (xyzDir.z + 1.0)*0.5 * 256, 256);
|
||||||
|
// unsigned int val = 0xff000000 | (rgba.r) | (rgba.g << 8) | (rgba.b << 16);
|
||||||
|
// image.setPixel(x, y, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return image;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const CubeLayout CubeLayout::CUBEMAP_LAYOUTS[] = {
|
||||||
|
|
||||||
|
// Here is the expected layout for the faces in an image with the 2/1 aspect ratio:
|
||||||
|
// THis is detected as an Equirectangular projection
|
||||||
|
// WIDTH
|
||||||
|
// <--------------------------->
|
||||||
|
// ^ +------+------+------+------+
|
||||||
|
// H | | | | |
|
||||||
|
// E | | | | |
|
||||||
|
// I | | | | |
|
||||||
|
// G +------+------+------+------+
|
||||||
|
// H | | | | |
|
||||||
|
// T | | | | |
|
||||||
|
// | | | | | |
|
||||||
|
// v +------+------+------+------+
|
||||||
|
//
|
||||||
|
// FaceWidth = width = height / 6
|
||||||
|
{ 2, 1 },
|
||||||
|
|
||||||
|
// Here is the expected layout for the faces in an image with the 1/6 aspect ratio:
|
||||||
|
//
|
||||||
|
// WIDTH
|
||||||
|
// <------>
|
||||||
|
// ^ +------+
|
||||||
|
// | | |
|
||||||
|
// | | +X |
|
||||||
|
// | | |
|
||||||
|
// H +------+
|
||||||
|
// E | |
|
||||||
|
// I | -X |
|
||||||
|
// G | |
|
||||||
|
// H +------+
|
||||||
|
// T | |
|
||||||
|
// | | +Y |
|
||||||
|
// | | |
|
||||||
|
// | +------+
|
||||||
|
// | | |
|
||||||
|
// | | -Y |
|
||||||
|
// | | |
|
||||||
|
// H +------+
|
||||||
|
// E | |
|
||||||
|
// I | +Z |
|
||||||
|
// G | |
|
||||||
|
// H +------+
|
||||||
|
// T | |
|
||||||
|
// | | -Z |
|
||||||
|
// | | |
|
||||||
|
// V +------+
|
||||||
|
//
|
||||||
|
// FaceWidth = width = height / 6
|
||||||
|
{ 1, 6,
|
||||||
|
{ 0, 0, true, false },
|
||||||
|
{ 0, 1, true, false },
|
||||||
|
{ 0, 2, false, true },
|
||||||
|
{ 0, 3, false, true },
|
||||||
|
{ 0, 4, true, false },
|
||||||
|
{ 0, 5, true, false }
|
||||||
|
},
|
||||||
|
|
||||||
|
// Here is the expected layout for the faces in an image with the 3/4 aspect ratio:
|
||||||
|
//
|
||||||
|
// <-----------WIDTH----------->
|
||||||
|
// ^ +------+------+------+------+
|
||||||
|
// | | | | | |
|
||||||
|
// | | | +Y | | |
|
||||||
|
// | | | | | |
|
||||||
|
// H +------+------+------+------+
|
||||||
|
// E | | | | |
|
||||||
|
// I | -X | -Z | +X | +Z |
|
||||||
|
// G | | | | |
|
||||||
|
// H +------+------+------+------+
|
||||||
|
// T | | | | |
|
||||||
|
// | | | -Y | | |
|
||||||
|
// | | | | | |
|
||||||
|
// V +------+------+------+------+
|
||||||
|
//
|
||||||
|
// FaceWidth = width / 4 = height / 3
|
||||||
|
{ 4, 3,
|
||||||
|
{ 2, 1, true, false },
|
||||||
|
{ 0, 1, true, false },
|
||||||
|
{ 1, 0, false, true },
|
||||||
|
{ 1, 2, false, true },
|
||||||
|
{ 3, 1, true, false },
|
||||||
|
{ 1, 1, true, false }
|
||||||
|
},
|
||||||
|
|
||||||
|
// Here is the expected layout for the faces in an image with the 4/3 aspect ratio:
|
||||||
|
//
|
||||||
|
// <-------WIDTH-------->
|
||||||
|
// ^ +------+------+------+
|
||||||
|
// | | | | |
|
||||||
|
// | | | +Y | |
|
||||||
|
// | | | | |
|
||||||
|
// H +------+------+------+
|
||||||
|
// E | | | |
|
||||||
|
// I | -X | -Z | +X |
|
||||||
|
// G | | | |
|
||||||
|
// H +------+------+------+
|
||||||
|
// T | | | |
|
||||||
|
// | | | -Y | |
|
||||||
|
// | | | | |
|
||||||
|
// | +------+------+------+
|
||||||
|
// | | | | |
|
||||||
|
// | | | +Z! | | <+Z is upside down!
|
||||||
|
// | | | | |
|
||||||
|
// V +------+------+------+
|
||||||
|
//
|
||||||
|
// FaceWidth = width / 3 = height / 4
|
||||||
|
{ 3, 4,
|
||||||
|
{ 2, 1, true, false },
|
||||||
|
{ 0, 1, true, false },
|
||||||
|
{ 1, 0, false, true },
|
||||||
|
{ 1, 2, false, true },
|
||||||
|
{ 1, 3, false, true },
|
||||||
|
{ 1, 1, true, false }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
const int CubeLayout::NUM_CUBEMAP_LAYOUTS = sizeof(CubeLayout::CUBEMAP_LAYOUTS) / sizeof(CubeLayout);
|
||||||
|
|
||||||
|
gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool generateIrradiance) {
|
||||||
|
PROFILE_RANGE(resource_parse, "processCubeTextureColorFromImage");
|
||||||
|
|
||||||
|
gpu::TexturePointer theTexture = nullptr;
|
||||||
|
if ((srcImage.width() > 0) && (srcImage.height() > 0)) {
|
||||||
|
QImage image = processSourceImage(srcImage, true);
|
||||||
|
if (image.format() != QImage::Format_ARGB32) {
|
||||||
|
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if CPU_MIPMAPS && COMPRESS_CUBEMAP_TEXTURES
|
||||||
|
gpu::Element formatMip = gpu::Element::COLOR_COMPRESSED_SRGBA;
|
||||||
|
gpu::Element formatGPU = gpu::Element::COLOR_COMPRESSED_SRGBA;
|
||||||
|
#else
|
||||||
|
gpu::Element formatMip = gpu::Element::COLOR_SRGBA_32;
|
||||||
|
gpu::Element formatGPU = gpu::Element::COLOR_SRGBA_32;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Find the layout of the cubemap in the 2D image
|
||||||
|
// Use the original image size since processSourceImage may have altered the size / aspect ratio
|
||||||
|
int foundLayout = CubeLayout::findLayout(srcImage.width(), srcImage.height());
|
||||||
|
|
||||||
|
std::vector<QImage> faces;
|
||||||
|
// If found, go extract the faces as separate images
|
||||||
|
if (foundLayout >= 0) {
|
||||||
|
auto& layout = CubeLayout::CUBEMAP_LAYOUTS[foundLayout];
|
||||||
|
if (layout._type == CubeLayout::FLAT) {
|
||||||
|
int faceWidth = image.width() / layout._widthRatio;
|
||||||
|
|
||||||
|
faces.push_back(image.copy(QRect(layout._faceXPos._x * faceWidth, layout._faceXPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXPos._horizontalMirror, layout._faceXPos._verticalMirror));
|
||||||
|
faces.push_back(image.copy(QRect(layout._faceXNeg._x * faceWidth, layout._faceXNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXNeg._horizontalMirror, layout._faceXNeg._verticalMirror));
|
||||||
|
faces.push_back(image.copy(QRect(layout._faceYPos._x * faceWidth, layout._faceYPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYPos._horizontalMirror, layout._faceYPos._verticalMirror));
|
||||||
|
faces.push_back(image.copy(QRect(layout._faceYNeg._x * faceWidth, layout._faceYNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYNeg._horizontalMirror, layout._faceYNeg._verticalMirror));
|
||||||
|
faces.push_back(image.copy(QRect(layout._faceZPos._x * faceWidth, layout._faceZPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZPos._horizontalMirror, layout._faceZPos._verticalMirror));
|
||||||
|
faces.push_back(image.copy(QRect(layout._faceZNeg._x * faceWidth, layout._faceZNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZNeg._horizontalMirror, layout._faceZNeg._verticalMirror));
|
||||||
|
} else if (layout._type == CubeLayout::EQUIRECTANGULAR) {
|
||||||
|
// THe face width is estimated from the input image
|
||||||
|
const int EQUIRECT_FACE_RATIO_TO_WIDTH = 4;
|
||||||
|
const int EQUIRECT_MAX_FACE_WIDTH = 2048;
|
||||||
|
int faceWidth = std::min(image.width() / EQUIRECT_FACE_RATIO_TO_WIDTH, EQUIRECT_MAX_FACE_WIDTH);
|
||||||
|
for (int face = gpu::Texture::CUBE_FACE_RIGHT_POS_X; face < gpu::Texture::NUM_CUBE_FACES; face++) {
|
||||||
|
QImage faceImage = CubeLayout::extractEquirectangularFace(image, (gpu::Texture::CubeFace) face, faceWidth);
|
||||||
|
faces.push_back(faceImage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
qCDebug(imagelogging) << "Failed to find a known cube map layout from this image:" << QString(srcImageName.c_str());
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the 6 faces have been created go on and define the true Texture
|
||||||
|
if (faces.size() == gpu::Texture::NUM_FACES_PER_TYPE[gpu::Texture::TEX_CUBE]) {
|
||||||
|
theTexture = gpu::Texture::createCube(formatGPU, faces[0].width(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||||
|
theTexture->setSource(srcImageName);
|
||||||
|
theTexture->setStoredMipFormat(formatMip);
|
||||||
|
|
||||||
|
for (uint8 face = 0; face < faces.size(); ++face) {
|
||||||
|
generateMips(theTexture.get(), faces[face], face);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate irradiance while we are at it
|
||||||
|
if (generateIrradiance) {
|
||||||
|
PROFILE_RANGE(resource_parse, "generateIrradiance");
|
||||||
|
auto irradianceTexture = gpu::Texture::createCube(gpu::Element::COLOR_SRGBA_32, faces[0].width(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||||
|
irradianceTexture->setSource(srcImageName);
|
||||||
|
irradianceTexture->setStoredMipFormat(gpu::Element::COLOR_SBGRA_32);
|
||||||
|
for (uint8 face = 0; face < faces.size(); ++face) {
|
||||||
|
irradianceTexture->assignStoredMipFace(0, face, faces[face].byteCount(), faces[face].constBits());
|
||||||
|
}
|
||||||
|
|
||||||
|
irradianceTexture->generateIrradiance();
|
||||||
|
|
||||||
|
auto irradiance = irradianceTexture->getIrradiance();
|
||||||
|
theTexture->overrideIrradiance(irradiance);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return theTexture;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace image
|
70
libraries/image/src/image/Image.h
Normal file
70
libraries/image/src/image/Image.h
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
//
|
||||||
|
// Image.h
|
||||||
|
// image/src/image
|
||||||
|
//
|
||||||
|
// Created by Clement Brisset on 4/5/2017.
|
||||||
|
// Copyright 2017 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_image_Image_h
|
||||||
|
#define hifi_image_Image_h
|
||||||
|
|
||||||
|
#include <QVariant>
|
||||||
|
|
||||||
|
#include <gpu/Texture.h>
|
||||||
|
|
||||||
|
class QByteArray;
|
||||||
|
class QImage;
|
||||||
|
|
||||||
|
namespace image {
|
||||||
|
|
||||||
|
namespace TextureUsage {
|
||||||
|
|
||||||
|
enum Type {
|
||||||
|
DEFAULT_TEXTURE,
|
||||||
|
STRICT_TEXTURE,
|
||||||
|
ALBEDO_TEXTURE,
|
||||||
|
NORMAL_TEXTURE,
|
||||||
|
BUMP_TEXTURE,
|
||||||
|
SPECULAR_TEXTURE,
|
||||||
|
METALLIC_TEXTURE = SPECULAR_TEXTURE, // for now spec and metallic texture are the same, converted to grey
|
||||||
|
ROUGHNESS_TEXTURE,
|
||||||
|
GLOSS_TEXTURE,
|
||||||
|
EMISSIVE_TEXTURE,
|
||||||
|
CUBE_TEXTURE,
|
||||||
|
OCCLUSION_TEXTURE,
|
||||||
|
SCATTERING_TEXTURE = OCCLUSION_TEXTURE,
|
||||||
|
LIGHTMAP_TEXTURE
|
||||||
|
};
|
||||||
|
|
||||||
|
using TextureLoader = std::function<gpu::TexturePointer(const QImage&, const std::string&)>;
|
||||||
|
TextureLoader getTextureLoaderForType(Type type, const QVariantMap& options = QVariantMap());
|
||||||
|
|
||||||
|
gpu::TexturePointer create2DTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createStrict2DTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createAlbedoTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createEmissiveTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createNormalTextureFromNormalImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createNormalTextureFromBumpImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createRoughnessTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createRoughnessTextureFromGlossImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createMetallicTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createCubeTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createCubeTextureFromImageWithoutIrradiance(const QImage& image, const std::string& srcImageName);
|
||||||
|
gpu::TexturePointer createLightmapTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||||
|
|
||||||
|
gpu::TexturePointer process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isStrict);
|
||||||
|
gpu::TexturePointer process2DTextureNormalMapFromImage(const QImage& srcImage, const std::string& srcImageName, bool isBumpMap);
|
||||||
|
gpu::TexturePointer process2DTextureGrayscaleFromImage(const QImage& srcImage, const std::string& srcImageName, bool isInvertedPixels);
|
||||||
|
gpu::TexturePointer processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool generateIrradiance);
|
||||||
|
|
||||||
|
} // namespace TextureUsage
|
||||||
|
|
||||||
|
gpu::TexturePointer processImage(const QByteArray& content, const std::string& url, int maxNumPixels, TextureUsage::Type textureType);
|
||||||
|
|
||||||
|
} // namespace image
|
||||||
|
|
||||||
|
#endif // hifi_image_Image_h
|
14
libraries/image/src/image/ImageLogging.cpp
Normal file
14
libraries/image/src/image/ImageLogging.cpp
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
//
|
||||||
|
// ImageLogging.cpp
|
||||||
|
// image/src/image
|
||||||
|
//
|
||||||
|
// Created by Clement Brisset on 4/5/2017.
|
||||||
|
// Copyright 2017 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "ImageLogging.h"
|
||||||
|
|
||||||
|
Q_LOGGING_CATEGORY(imagelogging, "hifi.image")
|
14
libraries/image/src/image/ImageLogging.h
Normal file
14
libraries/image/src/image/ImageLogging.h
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
//
|
||||||
|
// ImageLogging.h
|
||||||
|
// image/src/image
|
||||||
|
//
|
||||||
|
// Created by Clement Brisset on 4/5/2017.
|
||||||
|
// Copyright 2017 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <QLoggingCategory>
|
||||||
|
|
||||||
|
Q_DECLARE_LOGGING_CATEGORY(imagelogging)
|
|
@ -101,8 +101,6 @@ namespace ktx {
|
||||||
UNSIGNED_INT_10F_11F_11F_REV = 0x8C3B,
|
UNSIGNED_INT_10F_11F_11F_REV = 0x8C3B,
|
||||||
UNSIGNED_INT_5_9_9_9_REV = 0x8C3E,
|
UNSIGNED_INT_5_9_9_9_REV = 0x8C3E,
|
||||||
FLOAT_32_UNSIGNED_INT_24_8_REV = 0x8DAD,
|
FLOAT_32_UNSIGNED_INT_24_8_REV = 0x8DAD,
|
||||||
|
|
||||||
NUM_GLTYPES = 25,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class GLFormat : uint32_t {
|
enum class GLFormat : uint32_t {
|
||||||
|
@ -130,8 +128,6 @@ namespace ktx {
|
||||||
RGBA_INTEGER = 0x8D99,
|
RGBA_INTEGER = 0x8D99,
|
||||||
BGR_INTEGER = 0x8D9A,
|
BGR_INTEGER = 0x8D9A,
|
||||||
BGRA_INTEGER = 0x8D9B,
|
BGRA_INTEGER = 0x8D9B,
|
||||||
|
|
||||||
NUM_GLFORMATS = 20,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class GLInternalFormat_Uncompressed : uint32_t {
|
enum class GLInternalFormat_Uncompressed : uint32_t {
|
||||||
|
@ -232,8 +228,6 @@ namespace ktx {
|
||||||
STENCIL_INDEX4 = 0x8D47,
|
STENCIL_INDEX4 = 0x8D47,
|
||||||
STENCIL_INDEX8 = 0x8D48,
|
STENCIL_INDEX8 = 0x8D48,
|
||||||
STENCIL_INDEX16 = 0x8D49,
|
STENCIL_INDEX16 = 0x8D49,
|
||||||
|
|
||||||
NUM_UNCOMPRESSED_GLINTERNALFORMATS = 74,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class GLInternalFormat_Compressed : uint32_t {
|
enum class GLInternalFormat_Compressed : uint32_t {
|
||||||
|
@ -246,6 +240,11 @@ namespace ktx {
|
||||||
COMPRESSED_SRGB = 0x8C48,
|
COMPRESSED_SRGB = 0x8C48,
|
||||||
COMPRESSED_SRGB_ALPHA = 0x8C49,
|
COMPRESSED_SRGB_ALPHA = 0x8C49,
|
||||||
|
|
||||||
|
COMPRESSED_SRGB_S3TC_DXT1_EXT = 0x8C4C,
|
||||||
|
COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT = 0x8C4D,
|
||||||
|
COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT = 0x8C4E,
|
||||||
|
COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT = 0x8C4F,
|
||||||
|
|
||||||
COMPRESSED_RED_RGTC1 = 0x8DBB,
|
COMPRESSED_RED_RGTC1 = 0x8DBB,
|
||||||
COMPRESSED_SIGNED_RED_RGTC1 = 0x8DBC,
|
COMPRESSED_SIGNED_RED_RGTC1 = 0x8DBC,
|
||||||
COMPRESSED_RG_RGTC2 = 0x8DBD,
|
COMPRESSED_RG_RGTC2 = 0x8DBD,
|
||||||
|
@ -267,8 +266,6 @@ namespace ktx {
|
||||||
COMPRESSED_SIGNED_R11_EAC = 0x9271,
|
COMPRESSED_SIGNED_R11_EAC = 0x9271,
|
||||||
COMPRESSED_RG11_EAC = 0x9272,
|
COMPRESSED_RG11_EAC = 0x9272,
|
||||||
COMPRESSED_SIGNED_RG11_EAC = 0x9273,
|
COMPRESSED_SIGNED_RG11_EAC = 0x9273,
|
||||||
|
|
||||||
NUM_COMPRESSED_GLINTERNALFORMATS = 24,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum class GLBaseInternalFormat : uint32_t {
|
enum class GLBaseInternalFormat : uint32_t {
|
||||||
|
@ -280,8 +277,6 @@ namespace ktx {
|
||||||
RGB = 0x1907,
|
RGB = 0x1907,
|
||||||
RGBA = 0x1908,
|
RGBA = 0x1908,
|
||||||
STENCIL_INDEX = 0x1901,
|
STENCIL_INDEX = 0x1901,
|
||||||
|
|
||||||
NUM_GLBASEINTERNALFORMATS = 7,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum CubeMapFace {
|
enum CubeMapFace {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
set(TARGET_NAME model-networking)
|
set(TARGET_NAME model-networking)
|
||||||
setup_hifi_library()
|
setup_hifi_library()
|
||||||
link_hifi_libraries(shared networking model fbx ktx)
|
link_hifi_libraries(shared networking model fbx ktx image)
|
||||||
|
|
||||||
|
|
|
@ -489,7 +489,7 @@ QUrl NetworkMaterial::getTextureUrl(const QUrl& baseUrl, const FBXTexture& textu
|
||||||
}
|
}
|
||||||
|
|
||||||
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& baseUrl, const FBXTexture& fbxTexture,
|
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& baseUrl, const FBXTexture& fbxTexture,
|
||||||
TextureType type, MapChannel channel) {
|
image::TextureUsage::Type type, MapChannel channel) {
|
||||||
const auto url = getTextureUrl(baseUrl, fbxTexture);
|
const auto url = getTextureUrl(baseUrl, fbxTexture);
|
||||||
const auto texture = DependencyManager::get<TextureCache>()->getTexture(url, type, fbxTexture.content, fbxTexture.maxNumPixels);
|
const auto texture = DependencyManager::get<TextureCache>()->getTexture(url, type, fbxTexture.content, fbxTexture.maxNumPixels);
|
||||||
_textures[channel] = Texture { fbxTexture.name, texture };
|
_textures[channel] = Texture { fbxTexture.name, texture };
|
||||||
|
@ -503,7 +503,7 @@ model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& baseUrl, c
|
||||||
return map;
|
return map;
|
||||||
}
|
}
|
||||||
|
|
||||||
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& url, TextureType type, MapChannel channel) {
|
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& url, image::TextureUsage::Type type, MapChannel channel) {
|
||||||
const auto texture = DependencyManager::get<TextureCache>()->getTexture(url, type);
|
const auto texture = DependencyManager::get<TextureCache>()->getTexture(url, type);
|
||||||
_textures[channel].texture = texture;
|
_textures[channel].texture = texture;
|
||||||
|
|
||||||
|
@ -518,7 +518,7 @@ NetworkMaterial::NetworkMaterial(const FBXMaterial& material, const QUrl& textur
|
||||||
{
|
{
|
||||||
_textures = Textures(MapChannel::NUM_MAP_CHANNELS);
|
_textures = Textures(MapChannel::NUM_MAP_CHANNELS);
|
||||||
if (!material.albedoTexture.filename.isEmpty()) {
|
if (!material.albedoTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.albedoTexture, NetworkTexture::ALBEDO_TEXTURE, MapChannel::ALBEDO_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.albedoTexture, image::TextureUsage::ALBEDO_TEXTURE, MapChannel::ALBEDO_MAP);
|
||||||
_albedoTransform = material.albedoTexture.transform;
|
_albedoTransform = material.albedoTexture.transform;
|
||||||
map->setTextureTransform(_albedoTransform);
|
map->setTextureTransform(_albedoTransform);
|
||||||
|
|
||||||
|
@ -535,45 +535,45 @@ NetworkMaterial::NetworkMaterial(const FBXMaterial& material, const QUrl& textur
|
||||||
|
|
||||||
|
|
||||||
if (!material.normalTexture.filename.isEmpty()) {
|
if (!material.normalTexture.filename.isEmpty()) {
|
||||||
auto type = (material.normalTexture.isBumpmap ? NetworkTexture::BUMP_TEXTURE : NetworkTexture::NORMAL_TEXTURE);
|
auto type = (material.normalTexture.isBumpmap ? image::TextureUsage::BUMP_TEXTURE : image::TextureUsage::NORMAL_TEXTURE);
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.normalTexture, type, MapChannel::NORMAL_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.normalTexture, type, MapChannel::NORMAL_MAP);
|
||||||
setTextureMap(MapChannel::NORMAL_MAP, map);
|
setTextureMap(MapChannel::NORMAL_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!material.roughnessTexture.filename.isEmpty()) {
|
if (!material.roughnessTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.roughnessTexture, NetworkTexture::ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.roughnessTexture, image::TextureUsage::ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
|
||||||
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
|
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
|
||||||
} else if (!material.glossTexture.filename.isEmpty()) {
|
} else if (!material.glossTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.glossTexture, NetworkTexture::GLOSS_TEXTURE, MapChannel::ROUGHNESS_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.glossTexture, image::TextureUsage::GLOSS_TEXTURE, MapChannel::ROUGHNESS_MAP);
|
||||||
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
|
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!material.metallicTexture.filename.isEmpty()) {
|
if (!material.metallicTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.metallicTexture, NetworkTexture::METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.metallicTexture, image::TextureUsage::METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
|
||||||
setTextureMap(MapChannel::METALLIC_MAP, map);
|
setTextureMap(MapChannel::METALLIC_MAP, map);
|
||||||
} else if (!material.specularTexture.filename.isEmpty()) {
|
} else if (!material.specularTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.specularTexture, NetworkTexture::SPECULAR_TEXTURE, MapChannel::METALLIC_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.specularTexture, image::TextureUsage::SPECULAR_TEXTURE, MapChannel::METALLIC_MAP);
|
||||||
setTextureMap(MapChannel::METALLIC_MAP, map);
|
setTextureMap(MapChannel::METALLIC_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!material.occlusionTexture.filename.isEmpty()) {
|
if (!material.occlusionTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.occlusionTexture, NetworkTexture::OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.occlusionTexture, image::TextureUsage::OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
|
||||||
map->setTextureTransform(material.occlusionTexture.transform);
|
map->setTextureTransform(material.occlusionTexture.transform);
|
||||||
setTextureMap(MapChannel::OCCLUSION_MAP, map);
|
setTextureMap(MapChannel::OCCLUSION_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!material.emissiveTexture.filename.isEmpty()) {
|
if (!material.emissiveTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.emissiveTexture, NetworkTexture::EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.emissiveTexture, image::TextureUsage::EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
|
||||||
setTextureMap(MapChannel::EMISSIVE_MAP, map);
|
setTextureMap(MapChannel::EMISSIVE_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!material.scatteringTexture.filename.isEmpty()) {
|
if (!material.scatteringTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.scatteringTexture, NetworkTexture::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.scatteringTexture, image::TextureUsage::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
|
||||||
setTextureMap(MapChannel::SCATTERING_MAP, map);
|
setTextureMap(MapChannel::SCATTERING_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!material.lightmapTexture.filename.isEmpty()) {
|
if (!material.lightmapTexture.filename.isEmpty()) {
|
||||||
auto map = fetchTextureMap(textureBaseUrl, material.lightmapTexture, NetworkTexture::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
|
auto map = fetchTextureMap(textureBaseUrl, material.lightmapTexture, image::TextureUsage::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
|
||||||
_lightmapTransform = material.lightmapTexture.transform;
|
_lightmapTransform = material.lightmapTexture.transform;
|
||||||
_lightmapParams = material.lightmapParams;
|
_lightmapParams = material.lightmapParams;
|
||||||
map->setTextureTransform(_lightmapTransform);
|
map->setTextureTransform(_lightmapTransform);
|
||||||
|
@ -596,7 +596,7 @@ void NetworkMaterial::setTextures(const QVariantMap& textureMap) {
|
||||||
|
|
||||||
if (!albedoName.isEmpty()) {
|
if (!albedoName.isEmpty()) {
|
||||||
auto url = textureMap.contains(albedoName) ? textureMap[albedoName].toUrl() : QUrl();
|
auto url = textureMap.contains(albedoName) ? textureMap[albedoName].toUrl() : QUrl();
|
||||||
auto map = fetchTextureMap(url, NetworkTexture::ALBEDO_TEXTURE, MapChannel::ALBEDO_MAP);
|
auto map = fetchTextureMap(url, image::TextureUsage::ALBEDO_TEXTURE, MapChannel::ALBEDO_MAP);
|
||||||
map->setTextureTransform(_albedoTransform);
|
map->setTextureTransform(_albedoTransform);
|
||||||
// when reassigning the albedo texture we also check for the alpha channel used as opacity
|
// when reassigning the albedo texture we also check for the alpha channel used as opacity
|
||||||
map->setUseAlphaChannel(true);
|
map->setUseAlphaChannel(true);
|
||||||
|
@ -605,45 +605,45 @@ void NetworkMaterial::setTextures(const QVariantMap& textureMap) {
|
||||||
|
|
||||||
if (!normalName.isEmpty()) {
|
if (!normalName.isEmpty()) {
|
||||||
auto url = textureMap.contains(normalName) ? textureMap[normalName].toUrl() : QUrl();
|
auto url = textureMap.contains(normalName) ? textureMap[normalName].toUrl() : QUrl();
|
||||||
auto map = fetchTextureMap(url, NetworkTexture::NORMAL_TEXTURE, MapChannel::NORMAL_MAP);
|
auto map = fetchTextureMap(url, image::TextureUsage::NORMAL_TEXTURE, MapChannel::NORMAL_MAP);
|
||||||
setTextureMap(MapChannel::NORMAL_MAP, map);
|
setTextureMap(MapChannel::NORMAL_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!roughnessName.isEmpty()) {
|
if (!roughnessName.isEmpty()) {
|
||||||
auto url = textureMap.contains(roughnessName) ? textureMap[roughnessName].toUrl() : QUrl();
|
auto url = textureMap.contains(roughnessName) ? textureMap[roughnessName].toUrl() : QUrl();
|
||||||
// FIXME: If passing a gloss map instead of a roughmap how do we know?
|
// FIXME: If passing a gloss map instead of a roughmap how do we know?
|
||||||
auto map = fetchTextureMap(url, NetworkTexture::ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
|
auto map = fetchTextureMap(url, image::TextureUsage::ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
|
||||||
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
|
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!metallicName.isEmpty()) {
|
if (!metallicName.isEmpty()) {
|
||||||
auto url = textureMap.contains(metallicName) ? textureMap[metallicName].toUrl() : QUrl();
|
auto url = textureMap.contains(metallicName) ? textureMap[metallicName].toUrl() : QUrl();
|
||||||
// FIXME: If passing a specular map instead of a metallic how do we know?
|
// FIXME: If passing a specular map instead of a metallic how do we know?
|
||||||
auto map = fetchTextureMap(url, NetworkTexture::METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
|
auto map = fetchTextureMap(url, image::TextureUsage::METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
|
||||||
setTextureMap(MapChannel::METALLIC_MAP, map);
|
setTextureMap(MapChannel::METALLIC_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!occlusionName.isEmpty()) {
|
if (!occlusionName.isEmpty()) {
|
||||||
auto url = textureMap.contains(occlusionName) ? textureMap[occlusionName].toUrl() : QUrl();
|
auto url = textureMap.contains(occlusionName) ? textureMap[occlusionName].toUrl() : QUrl();
|
||||||
auto map = fetchTextureMap(url, NetworkTexture::OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
|
auto map = fetchTextureMap(url, image::TextureUsage::OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
|
||||||
setTextureMap(MapChannel::OCCLUSION_MAP, map);
|
setTextureMap(MapChannel::OCCLUSION_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!emissiveName.isEmpty()) {
|
if (!emissiveName.isEmpty()) {
|
||||||
auto url = textureMap.contains(emissiveName) ? textureMap[emissiveName].toUrl() : QUrl();
|
auto url = textureMap.contains(emissiveName) ? textureMap[emissiveName].toUrl() : QUrl();
|
||||||
auto map = fetchTextureMap(url, NetworkTexture::EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
|
auto map = fetchTextureMap(url, image::TextureUsage::EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
|
||||||
setTextureMap(MapChannel::EMISSIVE_MAP, map);
|
setTextureMap(MapChannel::EMISSIVE_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!scatteringName.isEmpty()) {
|
if (!scatteringName.isEmpty()) {
|
||||||
auto url = textureMap.contains(scatteringName) ? textureMap[scatteringName].toUrl() : QUrl();
|
auto url = textureMap.contains(scatteringName) ? textureMap[scatteringName].toUrl() : QUrl();
|
||||||
auto map = fetchTextureMap(url, NetworkTexture::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
|
auto map = fetchTextureMap(url, image::TextureUsage::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
|
||||||
setTextureMap(MapChannel::SCATTERING_MAP, map);
|
setTextureMap(MapChannel::SCATTERING_MAP, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!lightmapName.isEmpty()) {
|
if (!lightmapName.isEmpty()) {
|
||||||
auto url = textureMap.contains(lightmapName) ? textureMap[lightmapName].toUrl() : QUrl();
|
auto url = textureMap.contains(lightmapName) ? textureMap[lightmapName].toUrl() : QUrl();
|
||||||
auto map = fetchTextureMap(url, NetworkTexture::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
|
auto map = fetchTextureMap(url, image::TextureUsage::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
|
||||||
map->setTextureTransform(_lightmapTransform);
|
map->setTextureTransform(_lightmapTransform);
|
||||||
map->setLightmapOffsetScale(_lightmapParams.x, _lightmapParams.y);
|
map->setLightmapOffsetScale(_lightmapParams.x, _lightmapParams.y);
|
||||||
setTextureMap(MapChannel::LIGHTMAP_MAP, map);
|
setTextureMap(MapChannel::LIGHTMAP_MAP, map);
|
||||||
|
|
|
@ -180,13 +180,11 @@ protected:
|
||||||
const bool& isOriginal() const { return _isOriginal; }
|
const bool& isOriginal() const { return _isOriginal; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using TextureType = NetworkTexture::Type;
|
|
||||||
|
|
||||||
// Helpers for the ctors
|
// Helpers for the ctors
|
||||||
QUrl getTextureUrl(const QUrl& baseUrl, const FBXTexture& fbxTexture);
|
QUrl getTextureUrl(const QUrl& baseUrl, const FBXTexture& fbxTexture);
|
||||||
model::TextureMapPointer fetchTextureMap(const QUrl& baseUrl, const FBXTexture& fbxTexture,
|
model::TextureMapPointer fetchTextureMap(const QUrl& baseUrl, const FBXTexture& fbxTexture,
|
||||||
TextureType type, MapChannel channel);
|
image::TextureUsage::Type type, MapChannel channel);
|
||||||
model::TextureMapPointer fetchTextureMap(const QUrl& url, TextureType type, MapChannel channel);
|
model::TextureMapPointer fetchTextureMap(const QUrl& url, image::TextureUsage::Type type, MapChannel channel);
|
||||||
|
|
||||||
Transform _albedoTransform;
|
Transform _albedoTransform;
|
||||||
Transform _lightmapTransform;
|
Transform _lightmapTransform;
|
||||||
|
|
|
@ -13,11 +13,12 @@
|
||||||
|
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
||||||
#include <QNetworkReply>
|
#include <QCryptographicHash>
|
||||||
#include <QPainter>
|
#include <QImageReader>
|
||||||
#include <QRunnable>
|
#include <QRunnable>
|
||||||
#include <QThreadPool>
|
#include <QThreadPool>
|
||||||
#include <QImageReader>
|
#include <QNetworkReply>
|
||||||
|
#include <QPainter>
|
||||||
|
|
||||||
#if DEBUG_DUMP_TEXTURE_LOADS
|
#if DEBUG_DUMP_TEXTURE_LOADS
|
||||||
#include <QtCore/QFile>
|
#include <QtCore/QFile>
|
||||||
|
@ -31,10 +32,13 @@
|
||||||
|
|
||||||
#include <ktx/KTX.h>
|
#include <ktx/KTX.h>
|
||||||
|
|
||||||
|
#include <image/Image.h>
|
||||||
|
|
||||||
#include <NumericalConstants.h>
|
#include <NumericalConstants.h>
|
||||||
#include <shared/NsightHelpers.h>
|
#include <shared/NsightHelpers.h>
|
||||||
|
|
||||||
#include <Finally.h>
|
#include <Finally.h>
|
||||||
|
#include <Profile.h>
|
||||||
|
|
||||||
#include "ModelNetworkingLogging.h"
|
#include "ModelNetworkingLogging.h"
|
||||||
#include <Trace.h>
|
#include <Trace.h>
|
||||||
|
@ -51,16 +55,6 @@ TextureCache::TextureCache() :
|
||||||
_ktxCache(KTX_DIRNAME, KTX_EXT) {
|
_ktxCache(KTX_DIRNAME, KTX_EXT) {
|
||||||
setUnusedResourceCacheSize(0);
|
setUnusedResourceCacheSize(0);
|
||||||
setObjectName("TextureCache");
|
setObjectName("TextureCache");
|
||||||
|
|
||||||
// Expose enum Type to JS/QML via properties
|
|
||||||
// Despite being one-off, this should be fine, because TextureCache is a SINGLETON_DEPENDENCY
|
|
||||||
QObject* type = new QObject(this);
|
|
||||||
type->setObjectName("TextureType");
|
|
||||||
setProperty("Type", QVariant::fromValue(type));
|
|
||||||
auto metaEnum = QMetaEnum::fromType<Type>();
|
|
||||||
for (int i = 0; i < metaEnum.keyCount(); ++i) {
|
|
||||||
type->setProperty(metaEnum.key(i), metaEnum.value(i));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TextureCache::~TextureCache() {
|
TextureCache::~TextureCache() {
|
||||||
|
@ -117,7 +111,7 @@ const gpu::TexturePointer& TextureCache::getPermutationNormalTexture() {
|
||||||
data[i + 2] = ((randvec.z + 1.0f) / 2.0f) * 255.0f;
|
data[i + 2] = ((randvec.z + 1.0f) / 2.0f) * 255.0f;
|
||||||
}
|
}
|
||||||
|
|
||||||
_permutationNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), 256, 2));
|
_permutationNormalTexture = gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::NUINT8, gpu::RGB), 256, 2);
|
||||||
_permutationNormalTexture->setStoredMipFormat(_permutationNormalTexture->getTexelFormat());
|
_permutationNormalTexture->setStoredMipFormat(_permutationNormalTexture->getTexelFormat());
|
||||||
_permutationNormalTexture->assignStoredMip(0, sizeof(data), data);
|
_permutationNormalTexture->assignStoredMip(0, sizeof(data), data);
|
||||||
}
|
}
|
||||||
|
@ -131,7 +125,7 @@ const unsigned char OPAQUE_BLACK[] = { 0x00, 0x00, 0x00, 0xFF };
|
||||||
|
|
||||||
const gpu::TexturePointer& TextureCache::getWhiteTexture() {
|
const gpu::TexturePointer& TextureCache::getWhiteTexture() {
|
||||||
if (!_whiteTexture) {
|
if (!_whiteTexture) {
|
||||||
_whiteTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
|
_whiteTexture = gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1);
|
||||||
_whiteTexture->setSource("TextureCache::_whiteTexture");
|
_whiteTexture->setSource("TextureCache::_whiteTexture");
|
||||||
_whiteTexture->setStoredMipFormat(_whiteTexture->getTexelFormat());
|
_whiteTexture->setStoredMipFormat(_whiteTexture->getTexelFormat());
|
||||||
_whiteTexture->assignStoredMip(0, sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
|
_whiteTexture->assignStoredMip(0, sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
|
||||||
|
@ -141,7 +135,7 @@ const gpu::TexturePointer& TextureCache::getWhiteTexture() {
|
||||||
|
|
||||||
const gpu::TexturePointer& TextureCache::getGrayTexture() {
|
const gpu::TexturePointer& TextureCache::getGrayTexture() {
|
||||||
if (!_grayTexture) {
|
if (!_grayTexture) {
|
||||||
_grayTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
|
_grayTexture = gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1);
|
||||||
_grayTexture->setSource("TextureCache::_grayTexture");
|
_grayTexture->setSource("TextureCache::_grayTexture");
|
||||||
_grayTexture->setStoredMipFormat(_grayTexture->getTexelFormat());
|
_grayTexture->setStoredMipFormat(_grayTexture->getTexelFormat());
|
||||||
_grayTexture->assignStoredMip(0, sizeof(OPAQUE_GRAY), OPAQUE_GRAY);
|
_grayTexture->assignStoredMip(0, sizeof(OPAQUE_GRAY), OPAQUE_GRAY);
|
||||||
|
@ -151,7 +145,7 @@ const gpu::TexturePointer& TextureCache::getGrayTexture() {
|
||||||
|
|
||||||
const gpu::TexturePointer& TextureCache::getBlueTexture() {
|
const gpu::TexturePointer& TextureCache::getBlueTexture() {
|
||||||
if (!_blueTexture) {
|
if (!_blueTexture) {
|
||||||
_blueTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
|
_blueTexture = gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1);
|
||||||
_blueTexture->setSource("TextureCache::_blueTexture");
|
_blueTexture->setSource("TextureCache::_blueTexture");
|
||||||
_blueTexture->setStoredMipFormat(_blueTexture->getTexelFormat());
|
_blueTexture->setStoredMipFormat(_blueTexture->getTexelFormat());
|
||||||
_blueTexture->assignStoredMip(0, sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
|
_blueTexture->assignStoredMip(0, sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
|
||||||
|
@ -161,7 +155,7 @@ const gpu::TexturePointer& TextureCache::getBlueTexture() {
|
||||||
|
|
||||||
const gpu::TexturePointer& TextureCache::getBlackTexture() {
|
const gpu::TexturePointer& TextureCache::getBlackTexture() {
|
||||||
if (!_blackTexture) {
|
if (!_blackTexture) {
|
||||||
_blackTexture = gpu::TexturePointer(gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1));
|
_blackTexture = gpu::Texture::createStrict(gpu::Element::COLOR_RGBA_32, 1, 1);
|
||||||
_blackTexture->setSource("TextureCache::_blackTexture");
|
_blackTexture->setSource("TextureCache::_blackTexture");
|
||||||
_blackTexture->setStoredMipFormat(_blackTexture->getTexelFormat());
|
_blackTexture->setStoredMipFormat(_blackTexture->getTexelFormat());
|
||||||
_blackTexture->assignStoredMip(0, sizeof(OPAQUE_BLACK), OPAQUE_BLACK);
|
_blackTexture->assignStoredMip(0, sizeof(OPAQUE_BLACK), OPAQUE_BLACK);
|
||||||
|
@ -172,18 +166,18 @@ const gpu::TexturePointer& TextureCache::getBlackTexture() {
|
||||||
/// Extra data for creating textures.
|
/// Extra data for creating textures.
|
||||||
class TextureExtra {
|
class TextureExtra {
|
||||||
public:
|
public:
|
||||||
NetworkTexture::Type type;
|
image::TextureUsage::Type type;
|
||||||
const QByteArray& content;
|
const QByteArray& content;
|
||||||
int maxNumPixels;
|
int maxNumPixels;
|
||||||
};
|
};
|
||||||
|
|
||||||
ScriptableResource* TextureCache::prefetch(const QUrl& url, int type, int maxNumPixels) {
|
ScriptableResource* TextureCache::prefetch(const QUrl& url, int type, int maxNumPixels) {
|
||||||
auto byteArray = QByteArray();
|
auto byteArray = QByteArray();
|
||||||
TextureExtra extra = { (Type)type, byteArray, maxNumPixels };
|
TextureExtra extra = { (image::TextureUsage::Type)type, byteArray, maxNumPixels };
|
||||||
return ResourceCache::prefetch(url, &extra);
|
return ResourceCache::prefetch(url, &extra);
|
||||||
}
|
}
|
||||||
|
|
||||||
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, Type type, const QByteArray& content, int maxNumPixels) {
|
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) {
|
||||||
TextureExtra extra = { type, content, maxNumPixels };
|
TextureExtra extra = { type, content, maxNumPixels };
|
||||||
return ResourceCache::getResource(url, QUrl(), &extra).staticCast<NetworkTexture>();
|
return ResourceCache::getResource(url, QUrl(), &extra).staticCast<NetworkTexture>();
|
||||||
}
|
}
|
||||||
|
@ -216,8 +210,7 @@ gpu::TexturePointer TextureCache::cacheTextureByHash(const std::string& hash, co
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gpu::TexturePointer getFallbackTextureForType(image::TextureUsage::Type type) {
|
||||||
gpu::TexturePointer getFallbackTextureForType(NetworkTexture::Type type) {
|
|
||||||
gpu::TexturePointer result;
|
gpu::TexturePointer result;
|
||||||
auto textureCache = DependencyManager::get<TextureCache>();
|
auto textureCache = DependencyManager::get<TextureCache>();
|
||||||
// Since this can be called on a background thread, there's a chance that the cache
|
// Since this can be called on a background thread, there's a chance that the cache
|
||||||
|
@ -226,116 +219,51 @@ gpu::TexturePointer getFallbackTextureForType(NetworkTexture::Type type) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case NetworkTexture::DEFAULT_TEXTURE:
|
case image::TextureUsage::DEFAULT_TEXTURE:
|
||||||
case NetworkTexture::ALBEDO_TEXTURE:
|
case image::TextureUsage::ALBEDO_TEXTURE:
|
||||||
case NetworkTexture::ROUGHNESS_TEXTURE:
|
case image::TextureUsage::ROUGHNESS_TEXTURE:
|
||||||
case NetworkTexture::OCCLUSION_TEXTURE:
|
case image::TextureUsage::OCCLUSION_TEXTURE:
|
||||||
result = textureCache->getWhiteTexture();
|
result = textureCache->getWhiteTexture();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NetworkTexture::NORMAL_TEXTURE:
|
case image::TextureUsage::NORMAL_TEXTURE:
|
||||||
result = textureCache->getBlueTexture();
|
result = textureCache->getBlueTexture();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NetworkTexture::EMISSIVE_TEXTURE:
|
case image::TextureUsage::EMISSIVE_TEXTURE:
|
||||||
case NetworkTexture::LIGHTMAP_TEXTURE:
|
case image::TextureUsage::LIGHTMAP_TEXTURE:
|
||||||
result = textureCache->getBlackTexture();
|
result = textureCache->getBlackTexture();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NetworkTexture::BUMP_TEXTURE:
|
case image::TextureUsage::BUMP_TEXTURE:
|
||||||
case NetworkTexture::SPECULAR_TEXTURE:
|
case image::TextureUsage::SPECULAR_TEXTURE:
|
||||||
case NetworkTexture::GLOSS_TEXTURE:
|
case image::TextureUsage::GLOSS_TEXTURE:
|
||||||
case NetworkTexture::CUBE_TEXTURE:
|
case image::TextureUsage::CUBE_TEXTURE:
|
||||||
case NetworkTexture::CUSTOM_TEXTURE:
|
case image::TextureUsage::STRICT_TEXTURE:
|
||||||
case NetworkTexture::STRICT_TEXTURE:
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
NetworkTexture::TextureLoaderFunc getTextureLoaderForType(NetworkTexture::Type type,
|
|
||||||
const QVariantMap& options = QVariantMap()) {
|
|
||||||
using Type = NetworkTexture;
|
|
||||||
|
|
||||||
switch (type) {
|
|
||||||
case Type::ALBEDO_TEXTURE: {
|
|
||||||
return model::TextureUsage::createAlbedoTextureFromImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::EMISSIVE_TEXTURE: {
|
|
||||||
return model::TextureUsage::createEmissiveTextureFromImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::LIGHTMAP_TEXTURE: {
|
|
||||||
return model::TextureUsage::createLightmapTextureFromImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::CUBE_TEXTURE: {
|
|
||||||
if (options.value("generateIrradiance", true).toBool()) {
|
|
||||||
return model::TextureUsage::createCubeTextureFromImage;
|
|
||||||
} else {
|
|
||||||
return model::TextureUsage::createCubeTextureFromImageWithoutIrradiance;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::BUMP_TEXTURE: {
|
|
||||||
return model::TextureUsage::createNormalTextureFromBumpImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::NORMAL_TEXTURE: {
|
|
||||||
return model::TextureUsage::createNormalTextureFromNormalImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::ROUGHNESS_TEXTURE: {
|
|
||||||
return model::TextureUsage::createRoughnessTextureFromImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::GLOSS_TEXTURE: {
|
|
||||||
return model::TextureUsage::createRoughnessTextureFromGlossImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::SPECULAR_TEXTURE: {
|
|
||||||
return model::TextureUsage::createMetallicTextureFromImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::STRICT_TEXTURE: {
|
|
||||||
return model::TextureUsage::createStrict2DTextureFromImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Type::CUSTOM_TEXTURE: {
|
|
||||||
Q_ASSERT(false);
|
|
||||||
return NetworkTexture::TextureLoaderFunc();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case Type::DEFAULT_TEXTURE:
|
|
||||||
default: {
|
|
||||||
return model::TextureUsage::create2DTextureFromImage;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a texture version of an image file
|
/// Returns a texture version of an image file
|
||||||
gpu::TexturePointer TextureCache::getImageTexture(const QString& path, Type type, QVariantMap options) {
|
gpu::TexturePointer TextureCache::getImageTexture(const QString& path, image::TextureUsage::Type type, QVariantMap options) {
|
||||||
QImage image = QImage(path);
|
QImage image = QImage(path);
|
||||||
auto loader = getTextureLoaderForType(type, options);
|
auto loader = image::TextureUsage::getTextureLoaderForType(type, options);
|
||||||
return gpu::TexturePointer(loader(image, QUrl::fromLocalFile(path).fileName().toStdString()));
|
return gpu::TexturePointer(loader(image, QUrl::fromLocalFile(path).fileName().toStdString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
QSharedPointer<Resource> TextureCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
|
QSharedPointer<Resource> TextureCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
|
||||||
const void* extra) {
|
const void* extra) {
|
||||||
const TextureExtra* textureExtra = static_cast<const TextureExtra*>(extra);
|
const TextureExtra* textureExtra = static_cast<const TextureExtra*>(extra);
|
||||||
auto type = textureExtra ? textureExtra->type : Type::DEFAULT_TEXTURE;
|
auto type = textureExtra ? textureExtra->type : image::TextureUsage::DEFAULT_TEXTURE;
|
||||||
auto content = textureExtra ? textureExtra->content : QByteArray();
|
auto content = textureExtra ? textureExtra->content : QByteArray();
|
||||||
auto maxNumPixels = textureExtra ? textureExtra->maxNumPixels : ABSOLUTE_MAX_TEXTURE_NUM_PIXELS;
|
auto maxNumPixels = textureExtra ? textureExtra->maxNumPixels : ABSOLUTE_MAX_TEXTURE_NUM_PIXELS;
|
||||||
NetworkTexture* texture = new NetworkTexture(url, type, content, maxNumPixels);
|
NetworkTexture* texture = new NetworkTexture(url, type, content, maxNumPixels);
|
||||||
return QSharedPointer<Resource>(texture, &Resource::deleter);
|
return QSharedPointer<Resource>(texture, &Resource::deleter);
|
||||||
}
|
}
|
||||||
|
|
||||||
NetworkTexture::NetworkTexture(const QUrl& url, Type type, const QByteArray& content, int maxNumPixels) :
|
NetworkTexture::NetworkTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels) :
|
||||||
Resource(url),
|
Resource(url),
|
||||||
_type(type),
|
_type(type),
|
||||||
_maxNumPixels(maxNumPixels)
|
_maxNumPixels(maxNumPixels)
|
||||||
|
@ -353,13 +281,6 @@ NetworkTexture::NetworkTexture(const QUrl& url, Type type, const QByteArray& con
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
NetworkTexture::TextureLoaderFunc NetworkTexture::getTextureLoader() const {
|
|
||||||
if (_type == CUSTOM_TEXTURE) {
|
|
||||||
return _textureLoader;
|
|
||||||
}
|
|
||||||
return getTextureLoaderForType(_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetworkTexture::setImage(gpu::TexturePointer texture, int originalWidth,
|
void NetworkTexture::setImage(gpu::TexturePointer texture, int originalWidth,
|
||||||
int originalHeight) {
|
int originalHeight) {
|
||||||
_originalWidth = originalWidth;
|
_originalWidth = originalWidth;
|
||||||
|
@ -384,34 +305,22 @@ void NetworkTexture::setImage(gpu::TexturePointer texture, int originalWidth,
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu::TexturePointer NetworkTexture::getFallbackTexture() const {
|
gpu::TexturePointer NetworkTexture::getFallbackTexture() const {
|
||||||
if (_type == CUSTOM_TEXTURE) {
|
|
||||||
return gpu::TexturePointer();
|
|
||||||
}
|
|
||||||
return getFallbackTextureForType(_type);
|
return getFallbackTextureForType(_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
class Reader : public QRunnable {
|
class ImageReader : public QRunnable {
|
||||||
public:
|
|
||||||
Reader(const QWeakPointer<Resource>& resource, const QUrl& url);
|
|
||||||
void run() override final;
|
|
||||||
virtual void read() = 0;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
QWeakPointer<Resource> _resource;
|
|
||||||
QUrl _url;
|
|
||||||
};
|
|
||||||
|
|
||||||
class ImageReader : public Reader {
|
|
||||||
public:
|
public:
|
||||||
ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url,
|
ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url,
|
||||||
const QByteArray& data, const std::string& hash, int maxNumPixels);
|
const QByteArray& data, int maxNumPixels);
|
||||||
void read() override final;
|
void run() override final;
|
||||||
|
void read();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static void listSupportedImageFormats();
|
static void listSupportedImageFormats();
|
||||||
|
|
||||||
|
QWeakPointer<Resource> _resource;
|
||||||
|
QUrl _url;
|
||||||
QByteArray _content;
|
QByteArray _content;
|
||||||
std::string _hash;
|
|
||||||
int _maxNumPixels;
|
int _maxNumPixels;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -420,71 +329,16 @@ void NetworkTexture::downloadFinished(const QByteArray& data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void NetworkTexture::loadContent(const QByteArray& content) {
|
void NetworkTexture::loadContent(const QByteArray& content) {
|
||||||
// Hash the source image to for KTX caching
|
QThreadPool::globalInstance()->start(new ImageReader(_self, _url, content, _maxNumPixels));
|
||||||
std::string hash;
|
|
||||||
{
|
|
||||||
QCryptographicHash hasher(QCryptographicHash::Md5);
|
|
||||||
hasher.addData(content);
|
|
||||||
hash = hasher.result().toHex().toStdString();
|
|
||||||
}
|
|
||||||
|
|
||||||
auto textureCache = static_cast<TextureCache*>(_cache.data());
|
|
||||||
|
|
||||||
if (textureCache != nullptr) {
|
|
||||||
// If we already have a live texture with the same hash, use it
|
|
||||||
auto texture = textureCache->getTextureByHash(hash);
|
|
||||||
|
|
||||||
// If there is no live texture, check if there's an existing KTX file
|
|
||||||
if (!texture) {
|
|
||||||
KTXFilePointer ktxFile = textureCache->_ktxCache.getFile(hash);
|
|
||||||
if (ktxFile) {
|
|
||||||
texture.reset(gpu::Texture::unserialize(ktxFile->getFilepath()));
|
|
||||||
if (texture) {
|
|
||||||
texture = textureCache->cacheTextureByHash(hash, texture);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we found the texture either because it's in use or via KTX deserialization,
|
|
||||||
// set the image and return immediately.
|
|
||||||
if (texture) {
|
|
||||||
setImage(texture, texture->getWidth(), texture->getHeight());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We failed to find an existing live or KTX texture, so trigger an image reader
|
|
||||||
QThreadPool::globalInstance()->start(new ImageReader(_self, _url, content, hash, _maxNumPixels));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Reader::Reader(const QWeakPointer<Resource>& resource, const QUrl& url) :
|
ImageReader::ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url, const QByteArray& data, int maxNumPixels) :
|
||||||
_resource(resource), _url(url) {
|
_resource(resource),
|
||||||
|
_url(url),
|
||||||
|
_content(data),
|
||||||
|
_maxNumPixels(maxNumPixels)
|
||||||
|
{
|
||||||
DependencyManager::get<StatTracker>()->incrementStat("PendingProcessing");
|
DependencyManager::get<StatTracker>()->incrementStat("PendingProcessing");
|
||||||
}
|
|
||||||
|
|
||||||
void Reader::run() {
|
|
||||||
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffff0000, 0, { { "url", _url.toString() } });
|
|
||||||
DependencyManager::get<StatTracker>()->decrementStat("PendingProcessing");
|
|
||||||
CounterStat counter("Processing");
|
|
||||||
|
|
||||||
auto originalPriority = QThread::currentThread()->priority();
|
|
||||||
if (originalPriority == QThread::InheritPriority) {
|
|
||||||
originalPriority = QThread::NormalPriority;
|
|
||||||
}
|
|
||||||
QThread::currentThread()->setPriority(QThread::LowPriority);
|
|
||||||
Finally restorePriority([originalPriority]{ QThread::currentThread()->setPriority(originalPriority); });
|
|
||||||
|
|
||||||
if (!_resource.data()) {
|
|
||||||
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
read();
|
|
||||||
}
|
|
||||||
|
|
||||||
ImageReader::ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url,
|
|
||||||
const QByteArray& data, const std::string& hash, int maxNumPixels) :
|
|
||||||
Reader(resource, url), _content(data), _hash(hash), _maxNumPixels(maxNumPixels) {
|
|
||||||
listSupportedImageFormats();
|
listSupportedImageFormats();
|
||||||
|
|
||||||
#if DEBUG_DUMP_TEXTURE_LOADS
|
#if DEBUG_DUMP_TEXTURE_LOADS
|
||||||
|
@ -515,89 +369,110 @@ void ImageReader::listSupportedImageFormats() {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void ImageReader::read() {
|
void ImageReader::run() {
|
||||||
// Help the QImage loader by extracting the image file format from the url filename ext.
|
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffff0000, 0, { { "url", _url.toString() } });
|
||||||
// Some tga are not created properly without it.
|
DependencyManager::get<StatTracker>()->decrementStat("PendingProcessing");
|
||||||
auto filename = _url.fileName().toStdString();
|
CounterStat counter("Processing");
|
||||||
auto filenameExtension = filename.substr(filename.find_last_of('.') + 1);
|
|
||||||
QImage image = QImage::fromData(_content, filenameExtension.c_str());
|
|
||||||
int imageWidth = image.width();
|
|
||||||
int imageHeight = image.height();
|
|
||||||
|
|
||||||
// Validate that the image loaded
|
auto originalPriority = QThread::currentThread()->priority();
|
||||||
if (imageWidth == 0 || imageHeight == 0 || image.format() == QImage::Format_Invalid) {
|
if (originalPriority == QThread::InheritPriority) {
|
||||||
QString reason(filenameExtension.empty() ? "" : "(no file extension)");
|
originalPriority = QThread::NormalPriority;
|
||||||
qCWarning(modelnetworking) << "Failed to load" << _url << reason;
|
}
|
||||||
|
QThread::currentThread()->setPriority(QThread::LowPriority);
|
||||||
|
Finally restorePriority([originalPriority] { QThread::currentThread()->setPriority(originalPriority); });
|
||||||
|
|
||||||
|
read();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ImageReader::read() {
|
||||||
|
auto resource = _resource.lock(); // to ensure the resource is still needed
|
||||||
|
if (!resource) {
|
||||||
|
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
auto networkTexture = resource.staticCast<NetworkTexture>();
|
||||||
|
|
||||||
// Validate the image is less than _maxNumPixels, and downscale if necessary
|
// Hash the source image to for KTX caching
|
||||||
if (imageWidth * imageHeight > _maxNumPixels) {
|
std::string hash;
|
||||||
float scaleFactor = sqrtf(_maxNumPixels / (float)(imageWidth * imageHeight));
|
{
|
||||||
int originalWidth = imageWidth;
|
QCryptographicHash hasher(QCryptographicHash::Md5);
|
||||||
int originalHeight = imageHeight;
|
hasher.addData(_content);
|
||||||
imageWidth = (int)(scaleFactor * (float)imageWidth + 0.5f);
|
hash = hasher.result().toHex().toStdString();
|
||||||
imageHeight = (int)(scaleFactor * (float)imageHeight + 0.5f);
|
|
||||||
QImage newImage = image.scaled(QSize(imageWidth, imageHeight), Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
|
||||||
image.swap(newImage);
|
|
||||||
qCDebug(modelnetworking).nospace() << "Downscaled " << _url << " (" <<
|
|
||||||
QSize(originalWidth, originalHeight) << " to " <<
|
|
||||||
QSize(imageWidth, imageHeight) << ")";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu::TexturePointer texture = nullptr;
|
// Maybe load from cache
|
||||||
|
auto textureCache = DependencyManager::get<TextureCache>();
|
||||||
|
if (textureCache) {
|
||||||
|
// If we already have a live texture with the same hash, use it
|
||||||
|
auto texture = textureCache->getTextureByHash(hash);
|
||||||
|
|
||||||
|
// If there is no live texture, check if there's an existing KTX file
|
||||||
|
if (!texture) {
|
||||||
|
KTXFilePointer ktxFile = textureCache->_ktxCache.getFile(hash);
|
||||||
|
if (ktxFile) {
|
||||||
|
texture = gpu::Texture::unserialize(ktxFile->getFilepath());
|
||||||
|
if (texture) {
|
||||||
|
texture = textureCache->cacheTextureByHash(hash, texture);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we found the texture either because it's in use or via KTX deserialization,
|
||||||
|
// set the image and return immediately.
|
||||||
|
if (texture) {
|
||||||
|
QMetaObject::invokeMethod(resource.data(), "setImage",
|
||||||
|
Q_ARG(gpu::TexturePointer, texture),
|
||||||
|
Q_ARG(int, texture->getWidth()),
|
||||||
|
Q_ARG(int, texture->getHeight()));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proccess new texture
|
||||||
|
gpu::TexturePointer texture;
|
||||||
{
|
{
|
||||||
auto resource = _resource.lock(); // to ensure the resource is still needed
|
PROFILE_RANGE_EX(resource_parse_image_raw, __FUNCTION__, 0xffff0000, 0);
|
||||||
if (!resource) {
|
texture = image::processImage(_content, _url.toString().toStdString(), _maxNumPixels, networkTexture->getTextureType());
|
||||||
qCDebug(modelnetworking) << _url << "loading stopped; resource out of scope";
|
|
||||||
|
if (!texture) {
|
||||||
|
qCWarning(modelnetworking) << "Could not process:" << _url;
|
||||||
|
QMetaObject::invokeMethod(resource.data(), "setImage",
|
||||||
|
Q_ARG(gpu::TexturePointer, texture),
|
||||||
|
Q_ARG(int, 0),
|
||||||
|
Q_ARG(int, 0));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto url = _url.toString().toStdString();
|
texture->setSourceHash(hash);
|
||||||
|
texture->setFallbackTexture(networkTexture->getFallbackTexture());
|
||||||
|
}
|
||||||
|
|
||||||
PROFILE_RANGE_EX(resource_parse_image_raw, __FUNCTION__, 0xffff0000, 0);
|
// Save the image into a KTXFile
|
||||||
// Load the image into a gpu::Texture
|
if (texture && textureCache) {
|
||||||
auto networkTexture = resource.staticCast<NetworkTexture>();
|
|
||||||
texture.reset(networkTexture->getTextureLoader()(image, url));
|
|
||||||
texture->setSource(url);
|
|
||||||
if (texture) {
|
|
||||||
texture->setFallbackTexture(networkTexture->getFallbackTexture());
|
|
||||||
}
|
|
||||||
|
|
||||||
auto textureCache = DependencyManager::get<TextureCache>();
|
|
||||||
// Save the image into a KTXFile
|
|
||||||
auto memKtx = gpu::Texture::serialize(*texture);
|
auto memKtx = gpu::Texture::serialize(*texture);
|
||||||
if (!memKtx) {
|
|
||||||
qCWarning(modelnetworking) << "Unable to serialize texture to KTX " << _url;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (memKtx && textureCache) {
|
if (memKtx) {
|
||||||
const char* data = reinterpret_cast<const char*>(memKtx->_storage->data());
|
const char* data = reinterpret_cast<const char*>(memKtx->_storage->data());
|
||||||
size_t length = memKtx->_storage->size();
|
size_t length = memKtx->_storage->size();
|
||||||
KTXFilePointer file;
|
|
||||||
auto& ktxCache = textureCache->_ktxCache;
|
auto& ktxCache = textureCache->_ktxCache;
|
||||||
if (!memKtx || !(file = ktxCache.writeFile(data, KTXCache::Metadata(_hash, length)))) {
|
networkTexture->_file = ktxCache.writeFile(data, KTXCache::Metadata(hash, length));
|
||||||
|
if (!networkTexture->_file) {
|
||||||
qCWarning(modelnetworking) << _url << "file cache failed";
|
qCWarning(modelnetworking) << _url << "file cache failed";
|
||||||
} else {
|
} else {
|
||||||
resource.staticCast<NetworkTexture>()->_file = file;
|
texture->setKtxBacking(networkTexture->_file->getFilepath());
|
||||||
texture->setKtxBacking(file->getFilepath());
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
qCWarning(modelnetworking) << "Unable to serialize texture to KTX " << _url;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We replace the texture with the one stored in the cache. This deals with the possible race condition of two different
|
// We replace the texture with the one stored in the cache. This deals with the possible race condition of two different
|
||||||
// images with the same hash being loaded concurrently. Only one of them will make it into the cache by hash first and will
|
// images with the same hash being loaded concurrently. Only one of them will make it into the cache by hash first and will
|
||||||
// be the winner
|
// be the winner
|
||||||
if (textureCache) {
|
texture = textureCache->cacheTextureByHash(hash, texture);
|
||||||
texture = textureCache->cacheTextureByHash(_hash, texture);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto resource = _resource.lock(); // to ensure the resource is still needed
|
QMetaObject::invokeMethod(resource.data(), "setImage",
|
||||||
if (resource) {
|
Q_ARG(gpu::TexturePointer, texture),
|
||||||
QMetaObject::invokeMethod(resource.data(), "setImage",
|
Q_ARG(int, texture->getWidth()),
|
||||||
Q_ARG(gpu::TexturePointer, texture),
|
Q_ARG(int, texture->getHeight()));
|
||||||
Q_ARG(int, imageWidth), Q_ARG(int, imageHeight));
|
|
||||||
} else {
|
|
||||||
qCDebug(modelnetworking) << _url << "loading stopped; resource out of scope";
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <DependencyManager.h>
|
#include <DependencyManager.h>
|
||||||
#include <ResourceCache.h>
|
#include <ResourceCache.h>
|
||||||
#include <model/TextureMap.h>
|
#include <model/TextureMap.h>
|
||||||
|
#include <image/Image.h>
|
||||||
|
|
||||||
#include "KTXCache.h"
|
#include "KTXCache.h"
|
||||||
|
|
||||||
|
@ -43,29 +44,7 @@ class NetworkTexture : public Resource, public Texture {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum Type {
|
NetworkTexture(const QUrl& url, image::TextureUsage::Type type, const QByteArray& content, int maxNumPixels);
|
||||||
DEFAULT_TEXTURE,
|
|
||||||
STRICT_TEXTURE,
|
|
||||||
ALBEDO_TEXTURE,
|
|
||||||
NORMAL_TEXTURE,
|
|
||||||
BUMP_TEXTURE,
|
|
||||||
SPECULAR_TEXTURE,
|
|
||||||
METALLIC_TEXTURE = SPECULAR_TEXTURE, // for now spec and metallic texture are the same, converted to grey
|
|
||||||
ROUGHNESS_TEXTURE,
|
|
||||||
GLOSS_TEXTURE,
|
|
||||||
EMISSIVE_TEXTURE,
|
|
||||||
CUBE_TEXTURE,
|
|
||||||
OCCLUSION_TEXTURE,
|
|
||||||
SCATTERING_TEXTURE = OCCLUSION_TEXTURE,
|
|
||||||
LIGHTMAP_TEXTURE,
|
|
||||||
CUSTOM_TEXTURE
|
|
||||||
};
|
|
||||||
Q_ENUM(Type)
|
|
||||||
|
|
||||||
typedef gpu::Texture* TextureLoader(const QImage& image, const std::string& srcImageName);
|
|
||||||
using TextureLoaderFunc = std::function<TextureLoader>;
|
|
||||||
|
|
||||||
NetworkTexture(const QUrl& url, Type type, const QByteArray& content, int maxNumPixels);
|
|
||||||
|
|
||||||
QString getType() const override { return "NetworkTexture"; }
|
QString getType() const override { return "NetworkTexture"; }
|
||||||
|
|
||||||
|
@ -73,9 +52,8 @@ public:
|
||||||
int getOriginalHeight() const { return _originalHeight; }
|
int getOriginalHeight() const { return _originalHeight; }
|
||||||
int getWidth() const { return _width; }
|
int getWidth() const { return _width; }
|
||||||
int getHeight() const { return _height; }
|
int getHeight() const { return _height; }
|
||||||
Type getTextureType() const { return _type; }
|
image::TextureUsage::Type getTextureType() const { return _type; }
|
||||||
|
|
||||||
TextureLoaderFunc getTextureLoader() const;
|
|
||||||
gpu::TexturePointer getFallbackTexture() const;
|
gpu::TexturePointer getFallbackTexture() const;
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
|
@ -93,8 +71,7 @@ private:
|
||||||
friend class KTXReader;
|
friend class KTXReader;
|
||||||
friend class ImageReader;
|
friend class ImageReader;
|
||||||
|
|
||||||
Type _type;
|
image::TextureUsage::Type _type;
|
||||||
TextureLoaderFunc _textureLoader { [](const QImage&, const std::string&){ return nullptr; } };
|
|
||||||
KTXFilePointer _file;
|
KTXFilePointer _file;
|
||||||
int _originalWidth { 0 };
|
int _originalWidth { 0 };
|
||||||
int _originalHeight { 0 };
|
int _originalHeight { 0 };
|
||||||
|
@ -110,8 +87,6 @@ class TextureCache : public ResourceCache, public Dependency {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
SINGLETON_DEPENDENCY
|
SINGLETON_DEPENDENCY
|
||||||
|
|
||||||
using Type = NetworkTexture::Type;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/// Returns the ID of the permutation/normal texture used for Perlin noise shader programs. This texture
|
/// Returns the ID of the permutation/normal texture used for Perlin noise shader programs. This texture
|
||||||
/// has two lines: the first, a set of random numbers in [0, 255] to be used as permutation offsets, and
|
/// has two lines: the first, a set of random numbers in [0, 255] to be used as permutation offsets, and
|
||||||
|
@ -131,10 +106,10 @@ public:
|
||||||
const gpu::TexturePointer& getBlackTexture();
|
const gpu::TexturePointer& getBlackTexture();
|
||||||
|
|
||||||
/// Returns a texture version of an image file
|
/// Returns a texture version of an image file
|
||||||
static gpu::TexturePointer getImageTexture(const QString& path, Type type = Type::DEFAULT_TEXTURE, QVariantMap options = QVariantMap());
|
static gpu::TexturePointer getImageTexture(const QString& path, image::TextureUsage::Type type = image::TextureUsage::DEFAULT_TEXTURE, QVariantMap options = QVariantMap());
|
||||||
|
|
||||||
/// Loads a texture from the specified URL.
|
/// Loads a texture from the specified URL.
|
||||||
NetworkTexturePointer getTexture(const QUrl& url, Type type = Type::DEFAULT_TEXTURE,
|
NetworkTexturePointer getTexture(const QUrl& url, image::TextureUsage::Type type = image::TextureUsage::DEFAULT_TEXTURE,
|
||||||
const QByteArray& content = QByteArray(), int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);
|
const QByteArray& content = QByteArray(), int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
set(TARGET_NAME model)
|
set(TARGET_NAME model)
|
||||||
AUTOSCRIBE_SHADER_LIB(gpu model)
|
AUTOSCRIBE_SHADER_LIB(gpu model)
|
||||||
setup_hifi_library()
|
setup_hifi_library()
|
||||||
link_hifi_libraries(shared ktx gpu)
|
link_hifi_libraries(shared ktx gpu image)
|
||||||
|
|
|
@ -10,81 +10,9 @@
|
||||||
//
|
//
|
||||||
#include "TextureMap.h"
|
#include "TextureMap.h"
|
||||||
|
|
||||||
#include <ktx/KTX.h>
|
|
||||||
|
|
||||||
#include <QImage>
|
|
||||||
#include <QPainter>
|
|
||||||
#include <QDebug>
|
|
||||||
#include <QStandardPaths>
|
|
||||||
#include <QFileInfo>
|
|
||||||
#include <QDir>
|
|
||||||
#include <QCryptographicHash>
|
|
||||||
#include <Profile.h>
|
|
||||||
|
|
||||||
#include "ModelLogging.h"
|
|
||||||
using namespace model;
|
using namespace model;
|
||||||
using namespace gpu;
|
using namespace gpu;
|
||||||
|
|
||||||
// FIXME: Declare this to enable compression
|
|
||||||
//#define COMPRESS_TEXTURES
|
|
||||||
static const uvec2 SPARSE_PAGE_SIZE(128);
|
|
||||||
static const uvec2 MAX_TEXTURE_SIZE(4096);
|
|
||||||
bool DEV_DECIMATE_TEXTURES = false;
|
|
||||||
|
|
||||||
bool needsSparseRectification(const uvec2& size) {
|
|
||||||
// Don't attempt to rectify small textures (textures less than the sparse page size in any dimension)
|
|
||||||
if (glm::any(glm::lessThan(size, SPARSE_PAGE_SIZE))) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't rectify textures that are already an exact multiple of sparse page size
|
|
||||||
if (uvec2(0) == (size % SPARSE_PAGE_SIZE)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Texture is not sparse compatible, but is bigger than the sparse page size in both dimensions, rectify!
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
uvec2 rectifyToSparseSize(const uvec2& size) {
|
|
||||||
uvec2 pages = ((size / SPARSE_PAGE_SIZE) + glm::clamp(size % SPARSE_PAGE_SIZE, uvec2(0), uvec2(1)));
|
|
||||||
uvec2 result = pages * SPARSE_PAGE_SIZE;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::atomic<size_t> DECIMATED_TEXTURE_COUNT { 0 };
|
|
||||||
std::atomic<size_t> RECTIFIED_TEXTURE_COUNT { 0 };
|
|
||||||
|
|
||||||
QImage processSourceImage(const QImage& srcImage, bool cubemap) {
|
|
||||||
PROFILE_RANGE(resource_parse, "processSourceImage");
|
|
||||||
const uvec2 srcImageSize = toGlm(srcImage.size());
|
|
||||||
uvec2 targetSize = srcImageSize;
|
|
||||||
|
|
||||||
while (glm::any(glm::greaterThan(targetSize, MAX_TEXTURE_SIZE))) {
|
|
||||||
targetSize /= 2;
|
|
||||||
}
|
|
||||||
if (targetSize != srcImageSize) {
|
|
||||||
++DECIMATED_TEXTURE_COUNT;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!cubemap && needsSparseRectification(targetSize)) {
|
|
||||||
++RECTIFIED_TEXTURE_COUNT;
|
|
||||||
targetSize = rectifyToSparseSize(targetSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (DEV_DECIMATE_TEXTURES && glm::all(glm::greaterThanEqual(targetSize / SPARSE_PAGE_SIZE, uvec2(2)))) {
|
|
||||||
targetSize /= 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (targetSize != srcImageSize) {
|
|
||||||
PROFILE_RANGE(resource_parse, "processSourceImage Rectify");
|
|
||||||
qCDebug(modelLog) << "Resizing texture from " << srcImageSize.x << "x" << srcImageSize.y << " to " << targetSize.x << "x" << targetSize.y;
|
|
||||||
return srcImage.scaled(fromGlm(targetSize), Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
|
||||||
}
|
|
||||||
|
|
||||||
return srcImage;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TextureMap::setTextureSource(TextureSourcePointer& textureSource) {
|
void TextureMap::setTextureSource(TextureSourcePointer& textureSource) {
|
||||||
_textureSource = textureSource;
|
_textureSource = textureSource;
|
||||||
}
|
}
|
||||||
|
@ -113,758 +41,3 @@ void TextureMap::setLightmapOffsetScale(float offset, float scale) {
|
||||||
_lightmapOffsetScale.x = offset;
|
_lightmapOffsetScale.x = offset;
|
||||||
_lightmapOffsetScale.y = scale;
|
_lightmapOffsetScale.y = scale;
|
||||||
}
|
}
|
||||||
|
|
||||||
const QImage TextureUsage::process2DImageColor(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask) {
|
|
||||||
PROFILE_RANGE(resource_parse, "process2DImageColor");
|
|
||||||
QImage image = processSourceImage(srcImage, false);
|
|
||||||
validAlpha = false;
|
|
||||||
alphaAsMask = true;
|
|
||||||
const uint8 OPAQUE_ALPHA = 255;
|
|
||||||
const uint8 TRANSPARENT_ALPHA = 0;
|
|
||||||
if (image.hasAlphaChannel()) {
|
|
||||||
if (image.format() != QImage::Format_ARGB32) {
|
|
||||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Figure out if we can use a mask for alpha or not
|
|
||||||
int numOpaques = 0;
|
|
||||||
int numTranslucents = 0;
|
|
||||||
const int NUM_PIXELS = image.width() * image.height();
|
|
||||||
const int MAX_TRANSLUCENT_PIXELS_FOR_ALPHAMASK = (int)(0.05f * (float)(NUM_PIXELS));
|
|
||||||
const QRgb* data = reinterpret_cast<const QRgb*>(image.constBits());
|
|
||||||
for (int i = 0; i < NUM_PIXELS; ++i) {
|
|
||||||
auto alpha = qAlpha(data[i]);
|
|
||||||
if (alpha == OPAQUE_ALPHA) {
|
|
||||||
numOpaques++;
|
|
||||||
} else if (alpha != TRANSPARENT_ALPHA) {
|
|
||||||
if (++numTranslucents > MAX_TRANSLUCENT_PIXELS_FOR_ALPHAMASK) {
|
|
||||||
alphaAsMask = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
validAlpha = (numOpaques != NUM_PIXELS);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Force all the color images to be rgba32bits
|
|
||||||
if (image.format() != QImage::Format_ARGB32) {
|
|
||||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
|
||||||
}
|
|
||||||
|
|
||||||
return image;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TextureUsage::defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
|
|
||||||
const QImage& image, bool isLinear, bool doCompress) {
|
|
||||||
|
|
||||||
#ifdef COMPRESS_TEXTURES
|
|
||||||
#else
|
|
||||||
doCompress = false;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (image.hasAlphaChannel()) {
|
|
||||||
gpu::Semantic gpuSemantic;
|
|
||||||
gpu::Semantic mipSemantic;
|
|
||||||
if (isLinear) {
|
|
||||||
mipSemantic = gpu::BGRA;
|
|
||||||
if (doCompress) {
|
|
||||||
gpuSemantic = gpu::COMPRESSED_RGBA;
|
|
||||||
} else {
|
|
||||||
gpuSemantic = gpu::RGBA;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mipSemantic = gpu::SBGRA;
|
|
||||||
if (doCompress) {
|
|
||||||
gpuSemantic = gpu::COMPRESSED_SRGBA;
|
|
||||||
} else {
|
|
||||||
gpuSemantic = gpu::SRGBA;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
formatGPU = gpu::Element(gpu::VEC4, gpu::NUINT8, gpuSemantic);
|
|
||||||
formatMip = gpu::Element(gpu::VEC4, gpu::NUINT8, mipSemantic);
|
|
||||||
} else {
|
|
||||||
gpu::Semantic gpuSemantic;
|
|
||||||
gpu::Semantic mipSemantic;
|
|
||||||
if (isLinear) {
|
|
||||||
mipSemantic = gpu::RGB;
|
|
||||||
if (doCompress) {
|
|
||||||
gpuSemantic = gpu::COMPRESSED_RGB;
|
|
||||||
} else {
|
|
||||||
gpuSemantic = gpu::RGB;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mipSemantic = gpu::SRGB;
|
|
||||||
if (doCompress) {
|
|
||||||
gpuSemantic = gpu::COMPRESSED_SRGB;
|
|
||||||
} else {
|
|
||||||
gpuSemantic = gpu::SRGB;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
formatGPU = gpu::Element(gpu::VEC3, gpu::NUINT8, gpuSemantic);
|
|
||||||
formatMip = gpu::Element(gpu::VEC3, gpu::NUINT8, mipSemantic);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define CPU_MIPMAPS 1
|
|
||||||
|
|
||||||
void generateMips(gpu::Texture* texture, QImage& image, bool fastResize) {
|
|
||||||
#if CPU_MIPMAPS
|
|
||||||
PROFILE_RANGE(resource_parse, "generateMips");
|
|
||||||
auto numMips = texture->getNumMips();
|
|
||||||
for (uint16 level = 1; level < numMips; ++level) {
|
|
||||||
QSize mipSize(texture->evalMipWidth(level), texture->evalMipHeight(level));
|
|
||||||
if (fastResize) {
|
|
||||||
image = image.scaled(mipSize);
|
|
||||||
texture->assignStoredMip(level, image.byteCount(), image.constBits());
|
|
||||||
} else {
|
|
||||||
QImage mipImage = image.scaled(mipSize, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
|
||||||
texture->assignStoredMip(level, mipImage.byteCount(), mipImage.constBits());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
texture->autoGenerateMips(-1);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void generateFaceMips(gpu::Texture* texture, QImage& image, uint8 face) {
|
|
||||||
#if CPU_MIPMAPS
|
|
||||||
PROFILE_RANGE(resource_parse, "generateFaceMips");
|
|
||||||
auto numMips = texture->getNumMips();
|
|
||||||
for (uint16 level = 1; level < numMips; ++level) {
|
|
||||||
QSize mipSize(texture->evalMipWidth(level), texture->evalMipHeight(level));
|
|
||||||
QImage mipImage = image.scaled(mipSize, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
|
||||||
texture->assignStoredMipFace(level, face, mipImage.byteCount(), mipImage.constBits());
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
texture->autoGenerateMips(-1);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict) {
|
|
||||||
PROFILE_RANGE(resource_parse, "process2DTextureColorFromImage");
|
|
||||||
bool validAlpha = false;
|
|
||||||
bool alphaAsMask = true;
|
|
||||||
QImage image = process2DImageColor(srcImage, validAlpha, alphaAsMask);
|
|
||||||
|
|
||||||
gpu::Texture* theTexture = nullptr;
|
|
||||||
|
|
||||||
if ((image.width() > 0) && (image.height() > 0)) {
|
|
||||||
gpu::Element formatGPU;
|
|
||||||
gpu::Element formatMip;
|
|
||||||
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
|
|
||||||
|
|
||||||
if (isStrict) {
|
|
||||||
theTexture = (gpu::Texture::createStrict(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
|
||||||
} else {
|
|
||||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
|
||||||
}
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
auto usage = gpu::Texture::Usage::Builder().withColor();
|
|
||||||
if (validAlpha) {
|
|
||||||
usage.withAlpha();
|
|
||||||
if (alphaAsMask) {
|
|
||||||
usage.withAlphaMask();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
theTexture->setUsage(usage.build());
|
|
||||||
theTexture->setStoredMipFormat(formatMip);
|
|
||||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
|
||||||
|
|
||||||
if (generateMips) {
|
|
||||||
::generateMips(theTexture, image, false);
|
|
||||||
}
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
}
|
|
||||||
|
|
||||||
return theTexture;
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createStrict2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::create2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createAlbedoTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createEmissiveTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createLightmapTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createNormalTextureFromNormalImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
PROFILE_RANGE(resource_parse, "createNormalTextureFromNormalImage");
|
|
||||||
QImage image = processSourceImage(srcImage, false);
|
|
||||||
|
|
||||||
// Make sure the normal map source image is ARGB32
|
|
||||||
if (image.format() != QImage::Format_ARGB32) {
|
|
||||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
gpu::Texture* theTexture = nullptr;
|
|
||||||
if ((image.width() > 0) && (image.height() > 0)) {
|
|
||||||
|
|
||||||
gpu::Element formatMip = gpu::Element::COLOR_BGRA_32;
|
|
||||||
gpu::Element formatGPU = gpu::Element::COLOR_RGBA_32;
|
|
||||||
|
|
||||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
theTexture->setStoredMipFormat(formatMip);
|
|
||||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
|
||||||
generateMips(theTexture, image, true);
|
|
||||||
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
}
|
|
||||||
|
|
||||||
return theTexture;
|
|
||||||
}
|
|
||||||
|
|
||||||
int clampPixelCoordinate(int coordinate, int maxCoordinate) {
|
|
||||||
return coordinate - ((int)(coordinate < 0) * coordinate) + ((int)(coordinate > maxCoordinate) * (maxCoordinate - coordinate));
|
|
||||||
}
|
|
||||||
|
|
||||||
const int RGBA_MAX = 255;
|
|
||||||
|
|
||||||
// transform -1 - 1 to 0 - 255 (from sobel value to rgb)
|
|
||||||
double mapComponent(double sobelValue) {
|
|
||||||
const double factor = RGBA_MAX / 2.0;
|
|
||||||
return (sobelValue + 1.0) * factor;
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
PROFILE_RANGE(resource_parse, "createNormalTextureFromBumpImage");
|
|
||||||
QImage image = processSourceImage(srcImage, false);
|
|
||||||
|
|
||||||
if (image.format() != QImage::Format_Grayscale8) {
|
|
||||||
image = image.convertToFormat(QImage::Format_Grayscale8);
|
|
||||||
}
|
|
||||||
|
|
||||||
// PR 5540 by AlessandroSigna integrated here as a specialized TextureLoader for bumpmaps
|
|
||||||
// The conversion is done using the Sobel Filter to calculate the derivatives from the grayscale image
|
|
||||||
const double pStrength = 2.0;
|
|
||||||
int width = image.width();
|
|
||||||
int height = image.height();
|
|
||||||
|
|
||||||
QImage result(width, height, QImage::Format_ARGB32);
|
|
||||||
|
|
||||||
for (int i = 0; i < width; i++) {
|
|
||||||
const int iNextClamped = clampPixelCoordinate(i + 1, width - 1);
|
|
||||||
const int iPrevClamped = clampPixelCoordinate(i - 1, width - 1);
|
|
||||||
|
|
||||||
for (int j = 0; j < height; j++) {
|
|
||||||
const int jNextClamped = clampPixelCoordinate(j + 1, height - 1);
|
|
||||||
const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1);
|
|
||||||
|
|
||||||
// surrounding pixels
|
|
||||||
const QRgb topLeft = image.pixel(iPrevClamped, jPrevClamped);
|
|
||||||
const QRgb top = image.pixel(iPrevClamped, j);
|
|
||||||
const QRgb topRight = image.pixel(iPrevClamped, jNextClamped);
|
|
||||||
const QRgb right = image.pixel(i, jNextClamped);
|
|
||||||
const QRgb bottomRight = image.pixel(iNextClamped, jNextClamped);
|
|
||||||
const QRgb bottom = image.pixel(iNextClamped, j);
|
|
||||||
const QRgb bottomLeft = image.pixel(iNextClamped, jPrevClamped);
|
|
||||||
const QRgb left = image.pixel(i, jPrevClamped);
|
|
||||||
|
|
||||||
// take their gray intensities
|
|
||||||
// since it's a grayscale image, the value of each component RGB is the same
|
|
||||||
const double tl = qRed(topLeft);
|
|
||||||
const double t = qRed(top);
|
|
||||||
const double tr = qRed(topRight);
|
|
||||||
const double r = qRed(right);
|
|
||||||
const double br = qRed(bottomRight);
|
|
||||||
const double b = qRed(bottom);
|
|
||||||
const double bl = qRed(bottomLeft);
|
|
||||||
const double l = qRed(left);
|
|
||||||
|
|
||||||
// apply the sobel filter
|
|
||||||
const double dX = (tr + pStrength * r + br) - (tl + pStrength * l + bl);
|
|
||||||
const double dY = (bl + pStrength * b + br) - (tl + pStrength * t + tr);
|
|
||||||
const double dZ = RGBA_MAX / pStrength;
|
|
||||||
|
|
||||||
glm::vec3 v(dX, dY, dZ);
|
|
||||||
glm::normalize(v);
|
|
||||||
|
|
||||||
// convert to rgb from the value obtained computing the filter
|
|
||||||
QRgb qRgbValue = qRgba(mapComponent(v.z), mapComponent(v.y), mapComponent(v.x), 1.0);
|
|
||||||
result.setPixel(i, j, qRgbValue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* theTexture = nullptr;
|
|
||||||
if ((result.width() > 0) && (result.height() > 0)) {
|
|
||||||
|
|
||||||
gpu::Element formatMip = gpu::Element::COLOR_BGRA_32;
|
|
||||||
gpu::Element formatGPU = gpu::Element::COLOR_RGBA_32;
|
|
||||||
|
|
||||||
|
|
||||||
theTexture = (gpu::Texture::create2D(formatGPU, result.width(), result.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
theTexture->setStoredMipFormat(formatMip);
|
|
||||||
theTexture->assignStoredMip(0, result.byteCount(), result.constBits());
|
|
||||||
generateMips(theTexture, result, true);
|
|
||||||
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
}
|
|
||||||
|
|
||||||
return theTexture;
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createRoughnessTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
PROFILE_RANGE(resource_parse, "createRoughnessTextureFromImage");
|
|
||||||
QImage image = processSourceImage(srcImage, false);
|
|
||||||
if (!image.hasAlphaChannel()) {
|
|
||||||
if (image.format() != QImage::Format_RGB888) {
|
|
||||||
image = image.convertToFormat(QImage::Format_RGB888);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (image.format() != QImage::Format_RGBA8888) {
|
|
||||||
image = image.convertToFormat(QImage::Format_RGBA8888);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
image = image.convertToFormat(QImage::Format_Grayscale8);
|
|
||||||
|
|
||||||
gpu::Texture* theTexture = nullptr;
|
|
||||||
if ((image.width() > 0) && (image.height() > 0)) {
|
|
||||||
#ifdef COMPRESS_TEXTURES
|
|
||||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
|
|
||||||
#else
|
|
||||||
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
|
|
||||||
#endif
|
|
||||||
gpu::Element formatMip = gpu::Element::COLOR_R_8;
|
|
||||||
|
|
||||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
theTexture->setStoredMipFormat(formatMip);
|
|
||||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
|
||||||
generateMips(theTexture, image, true);
|
|
||||||
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
}
|
|
||||||
|
|
||||||
return theTexture;
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createRoughnessTextureFromGlossImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
PROFILE_RANGE(resource_parse, "createRoughnessTextureFromGlossImage");
|
|
||||||
QImage image = processSourceImage(srcImage, false);
|
|
||||||
if (!image.hasAlphaChannel()) {
|
|
||||||
if (image.format() != QImage::Format_RGB888) {
|
|
||||||
image = image.convertToFormat(QImage::Format_RGB888);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (image.format() != QImage::Format_RGBA8888) {
|
|
||||||
image = image.convertToFormat(QImage::Format_RGBA8888);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gloss turned into Rough
|
|
||||||
image.invertPixels(QImage::InvertRgba);
|
|
||||||
|
|
||||||
image = image.convertToFormat(QImage::Format_Grayscale8);
|
|
||||||
|
|
||||||
gpu::Texture* theTexture = nullptr;
|
|
||||||
if ((image.width() > 0) && (image.height() > 0)) {
|
|
||||||
|
|
||||||
#ifdef COMPRESS_TEXTURES
|
|
||||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
|
|
||||||
#else
|
|
||||||
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
|
|
||||||
#endif
|
|
||||||
gpu::Element formatMip = gpu::Element::COLOR_R_8;
|
|
||||||
|
|
||||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
theTexture->setStoredMipFormat(formatMip);
|
|
||||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
|
||||||
generateMips(theTexture, image, true);
|
|
||||||
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
}
|
|
||||||
|
|
||||||
return theTexture;
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createMetallicTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
PROFILE_RANGE(resource_parse, "createMetallicTextureFromImage");
|
|
||||||
QImage image = processSourceImage(srcImage, false);
|
|
||||||
if (!image.hasAlphaChannel()) {
|
|
||||||
if (image.format() != QImage::Format_RGB888) {
|
|
||||||
image = image.convertToFormat(QImage::Format_RGB888);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (image.format() != QImage::Format_RGBA8888) {
|
|
||||||
image = image.convertToFormat(QImage::Format_RGBA8888);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
image = image.convertToFormat(QImage::Format_Grayscale8);
|
|
||||||
|
|
||||||
gpu::Texture* theTexture = nullptr;
|
|
||||||
if ((image.width() > 0) && (image.height() > 0)) {
|
|
||||||
|
|
||||||
#ifdef COMPRESS_TEXTURES
|
|
||||||
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
|
|
||||||
#else
|
|
||||||
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
|
|
||||||
#endif
|
|
||||||
gpu::Element formatMip = gpu::Element::COLOR_R_8;
|
|
||||||
|
|
||||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
theTexture->setStoredMipFormat(formatMip);
|
|
||||||
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
|
|
||||||
generateMips(theTexture, image, true);
|
|
||||||
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
}
|
|
||||||
|
|
||||||
return theTexture;
|
|
||||||
}
|
|
||||||
|
|
||||||
class CubeLayout {
|
|
||||||
public:
|
|
||||||
|
|
||||||
enum SourceProjection {
|
|
||||||
FLAT = 0,
|
|
||||||
EQUIRECTANGULAR,
|
|
||||||
};
|
|
||||||
int _type = FLAT;
|
|
||||||
int _widthRatio = 1;
|
|
||||||
int _heightRatio = 1;
|
|
||||||
|
|
||||||
class Face {
|
|
||||||
public:
|
|
||||||
int _x = 0;
|
|
||||||
int _y = 0;
|
|
||||||
bool _horizontalMirror = false;
|
|
||||||
bool _verticalMirror = false;
|
|
||||||
|
|
||||||
Face() {}
|
|
||||||
Face(int x, int y, bool horizontalMirror, bool verticalMirror) : _x(x), _y(y), _horizontalMirror(horizontalMirror), _verticalMirror(verticalMirror) {}
|
|
||||||
};
|
|
||||||
|
|
||||||
Face _faceXPos;
|
|
||||||
Face _faceXNeg;
|
|
||||||
Face _faceYPos;
|
|
||||||
Face _faceYNeg;
|
|
||||||
Face _faceZPos;
|
|
||||||
Face _faceZNeg;
|
|
||||||
|
|
||||||
CubeLayout(int wr, int hr, Face fXP, Face fXN, Face fYP, Face fYN, Face fZP, Face fZN) :
|
|
||||||
_type(FLAT),
|
|
||||||
_widthRatio(wr),
|
|
||||||
_heightRatio(hr),
|
|
||||||
_faceXPos(fXP),
|
|
||||||
_faceXNeg(fXN),
|
|
||||||
_faceYPos(fYP),
|
|
||||||
_faceYNeg(fYN),
|
|
||||||
_faceZPos(fZP),
|
|
||||||
_faceZNeg(fZN) {}
|
|
||||||
|
|
||||||
CubeLayout(int wr, int hr) :
|
|
||||||
_type(EQUIRECTANGULAR),
|
|
||||||
_widthRatio(wr),
|
|
||||||
_heightRatio(hr) {}
|
|
||||||
|
|
||||||
|
|
||||||
static const CubeLayout CUBEMAP_LAYOUTS[];
|
|
||||||
static const int NUM_CUBEMAP_LAYOUTS;
|
|
||||||
|
|
||||||
static int findLayout(int width, int height) {
|
|
||||||
// Find the layout of the cubemap in the 2D image
|
|
||||||
int foundLayout = -1;
|
|
||||||
for (int i = 0; i < NUM_CUBEMAP_LAYOUTS; i++) {
|
|
||||||
if ((height * CUBEMAP_LAYOUTS[i]._widthRatio) == (width * CUBEMAP_LAYOUTS[i]._heightRatio)) {
|
|
||||||
foundLayout = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return foundLayout;
|
|
||||||
}
|
|
||||||
|
|
||||||
static QImage extractEquirectangularFace(const QImage& source, gpu::Texture::CubeFace face, int faceWidth) {
|
|
||||||
QImage image(faceWidth, faceWidth, source.format());
|
|
||||||
|
|
||||||
glm::vec2 dstInvSize(1.0f / (float)image.width(), 1.0f / (float)image.height());
|
|
||||||
|
|
||||||
struct CubeToXYZ {
|
|
||||||
gpu::Texture::CubeFace _face;
|
|
||||||
CubeToXYZ(gpu::Texture::CubeFace face) : _face(face) {}
|
|
||||||
|
|
||||||
glm::vec3 xyzFrom(const glm::vec2& uv) {
|
|
||||||
auto faceDir = glm::normalize(glm::vec3(-1.0f + 2.0f * uv.x, -1.0f + 2.0f * uv.y, 1.0f));
|
|
||||||
|
|
||||||
switch (_face) {
|
|
||||||
case gpu::Texture::CubeFace::CUBE_FACE_BACK_POS_Z:
|
|
||||||
return glm::vec3(-faceDir.x, faceDir.y, faceDir.z);
|
|
||||||
case gpu::Texture::CubeFace::CUBE_FACE_FRONT_NEG_Z:
|
|
||||||
return glm::vec3(faceDir.x, faceDir.y, -faceDir.z);
|
|
||||||
case gpu::Texture::CubeFace::CUBE_FACE_LEFT_NEG_X:
|
|
||||||
return glm::vec3(faceDir.z, faceDir.y, faceDir.x);
|
|
||||||
case gpu::Texture::CubeFace::CUBE_FACE_RIGHT_POS_X:
|
|
||||||
return glm::vec3(-faceDir.z, faceDir.y, -faceDir.x);
|
|
||||||
case gpu::Texture::CubeFace::CUBE_FACE_BOTTOM_NEG_Y:
|
|
||||||
return glm::vec3(-faceDir.x, -faceDir.z, faceDir.y);
|
|
||||||
case gpu::Texture::CubeFace::CUBE_FACE_TOP_POS_Y:
|
|
||||||
default:
|
|
||||||
return glm::vec3(-faceDir.x, faceDir.z, -faceDir.y);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
CubeToXYZ cubeToXYZ(face);
|
|
||||||
|
|
||||||
struct RectToXYZ {
|
|
||||||
RectToXYZ() {}
|
|
||||||
|
|
||||||
glm::vec2 uvFrom(const glm::vec3& xyz) {
|
|
||||||
auto flatDir = glm::normalize(glm::vec2(xyz.x, xyz.z));
|
|
||||||
auto uvRad = glm::vec2(atan2(flatDir.x, flatDir.y), asin(xyz.y));
|
|
||||||
|
|
||||||
const float LON_TO_RECT_U = 1.0f / (glm::pi<float>());
|
|
||||||
const float LAT_TO_RECT_V = 2.0f / glm::pi<float>();
|
|
||||||
return glm::vec2(0.5f * uvRad.x * LON_TO_RECT_U + 0.5f, 0.5f * uvRad.y * LAT_TO_RECT_V + 0.5f);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
RectToXYZ rectToXYZ;
|
|
||||||
|
|
||||||
int srcFaceHeight = source.height();
|
|
||||||
int srcFaceWidth = source.width();
|
|
||||||
|
|
||||||
glm::vec2 dstCoord;
|
|
||||||
glm::ivec2 srcPixel;
|
|
||||||
for (int y = 0; y < faceWidth; ++y) {
|
|
||||||
dstCoord.y = 1.0f - (y + 0.5f) * dstInvSize.y; // Fill cube face images from top to bottom
|
|
||||||
for (int x = 0; x < faceWidth; ++x) {
|
|
||||||
dstCoord.x = (x + 0.5f) * dstInvSize.x;
|
|
||||||
|
|
||||||
auto xyzDir = cubeToXYZ.xyzFrom(dstCoord);
|
|
||||||
auto srcCoord = rectToXYZ.uvFrom(xyzDir);
|
|
||||||
|
|
||||||
srcPixel.x = floor(srcCoord.x * srcFaceWidth);
|
|
||||||
// Flip the vertical axis to QImage going top to bottom
|
|
||||||
srcPixel.y = floor((1.0f - srcCoord.y) * srcFaceHeight);
|
|
||||||
|
|
||||||
if (((uint32) srcPixel.x < (uint32) source.width()) && ((uint32) srcPixel.y < (uint32) source.height())) {
|
|
||||||
image.setPixel(x, y, source.pixel(QPoint(srcPixel.x, srcPixel.y)));
|
|
||||||
|
|
||||||
// Keep for debug, this is showing the dir as a color
|
|
||||||
// glm::u8vec4 rgba((xyzDir.x + 1.0)*0.5 * 256, (xyzDir.y + 1.0)*0.5 * 256, (xyzDir.z + 1.0)*0.5 * 256, 256);
|
|
||||||
// unsigned int val = 0xff000000 | (rgba.r) | (rgba.g << 8) | (rgba.b << 16);
|
|
||||||
// image.setPixel(x, y, val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return image;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const CubeLayout CubeLayout::CUBEMAP_LAYOUTS[] = {
|
|
||||||
|
|
||||||
// Here is the expected layout for the faces in an image with the 2/1 aspect ratio:
|
|
||||||
// THis is detected as an Equirectangular projection
|
|
||||||
// WIDTH
|
|
||||||
// <--------------------------->
|
|
||||||
// ^ +------+------+------+------+
|
|
||||||
// H | | | | |
|
|
||||||
// E | | | | |
|
|
||||||
// I | | | | |
|
|
||||||
// G +------+------+------+------+
|
|
||||||
// H | | | | |
|
|
||||||
// T | | | | |
|
|
||||||
// | | | | | |
|
|
||||||
// v +------+------+------+------+
|
|
||||||
//
|
|
||||||
// FaceWidth = width = height / 6
|
|
||||||
{ 2, 1 },
|
|
||||||
|
|
||||||
// Here is the expected layout for the faces in an image with the 1/6 aspect ratio:
|
|
||||||
//
|
|
||||||
// WIDTH
|
|
||||||
// <------>
|
|
||||||
// ^ +------+
|
|
||||||
// | | |
|
|
||||||
// | | +X |
|
|
||||||
// | | |
|
|
||||||
// H +------+
|
|
||||||
// E | |
|
|
||||||
// I | -X |
|
|
||||||
// G | |
|
|
||||||
// H +------+
|
|
||||||
// T | |
|
|
||||||
// | | +Y |
|
|
||||||
// | | |
|
|
||||||
// | +------+
|
|
||||||
// | | |
|
|
||||||
// | | -Y |
|
|
||||||
// | | |
|
|
||||||
// H +------+
|
|
||||||
// E | |
|
|
||||||
// I | +Z |
|
|
||||||
// G | |
|
|
||||||
// H +------+
|
|
||||||
// T | |
|
|
||||||
// | | -Z |
|
|
||||||
// | | |
|
|
||||||
// V +------+
|
|
||||||
//
|
|
||||||
// FaceWidth = width = height / 6
|
|
||||||
{ 1, 6,
|
|
||||||
{ 0, 0, true, false },
|
|
||||||
{ 0, 1, true, false },
|
|
||||||
{ 0, 2, false, true },
|
|
||||||
{ 0, 3, false, true },
|
|
||||||
{ 0, 4, true, false },
|
|
||||||
{ 0, 5, true, false }
|
|
||||||
},
|
|
||||||
|
|
||||||
// Here is the expected layout for the faces in an image with the 3/4 aspect ratio:
|
|
||||||
//
|
|
||||||
// <-----------WIDTH----------->
|
|
||||||
// ^ +------+------+------+------+
|
|
||||||
// | | | | | |
|
|
||||||
// | | | +Y | | |
|
|
||||||
// | | | | | |
|
|
||||||
// H +------+------+------+------+
|
|
||||||
// E | | | | |
|
|
||||||
// I | -X | -Z | +X | +Z |
|
|
||||||
// G | | | | |
|
|
||||||
// H +------+------+------+------+
|
|
||||||
// T | | | | |
|
|
||||||
// | | | -Y | | |
|
|
||||||
// | | | | | |
|
|
||||||
// V +------+------+------+------+
|
|
||||||
//
|
|
||||||
// FaceWidth = width / 4 = height / 3
|
|
||||||
{ 4, 3,
|
|
||||||
{ 2, 1, true, false },
|
|
||||||
{ 0, 1, true, false },
|
|
||||||
{ 1, 0, false, true },
|
|
||||||
{ 1, 2, false, true },
|
|
||||||
{ 3, 1, true, false },
|
|
||||||
{ 1, 1, true, false }
|
|
||||||
},
|
|
||||||
|
|
||||||
// Here is the expected layout for the faces in an image with the 4/3 aspect ratio:
|
|
||||||
//
|
|
||||||
// <-------WIDTH-------->
|
|
||||||
// ^ +------+------+------+
|
|
||||||
// | | | | |
|
|
||||||
// | | | +Y | |
|
|
||||||
// | | | | |
|
|
||||||
// H +------+------+------+
|
|
||||||
// E | | | |
|
|
||||||
// I | -X | -Z | +X |
|
|
||||||
// G | | | |
|
|
||||||
// H +------+------+------+
|
|
||||||
// T | | | |
|
|
||||||
// | | | -Y | |
|
|
||||||
// | | | | |
|
|
||||||
// | +------+------+------+
|
|
||||||
// | | | | |
|
|
||||||
// | | | +Z! | | <+Z is upside down!
|
|
||||||
// | | | | |
|
|
||||||
// V +------+------+------+
|
|
||||||
//
|
|
||||||
// FaceWidth = width / 3 = height / 4
|
|
||||||
{ 3, 4,
|
|
||||||
{ 2, 1, true, false },
|
|
||||||
{ 0, 1, true, false },
|
|
||||||
{ 1, 0, false, true },
|
|
||||||
{ 1, 2, false, true },
|
|
||||||
{ 1, 3, false, true },
|
|
||||||
{ 1, 1, true, false }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
const int CubeLayout::NUM_CUBEMAP_LAYOUTS = sizeof(CubeLayout::CUBEMAP_LAYOUTS) / sizeof(CubeLayout);
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool generateIrradiance) {
|
|
||||||
PROFILE_RANGE(resource_parse, "processCubeTextureColorFromImage");
|
|
||||||
|
|
||||||
gpu::Texture* theTexture = nullptr;
|
|
||||||
if ((srcImage.width() > 0) && (srcImage.height() > 0)) {
|
|
||||||
QImage image = processSourceImage(srcImage, true);
|
|
||||||
if (image.format() != QImage::Format_ARGB32) {
|
|
||||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Element formatGPU;
|
|
||||||
gpu::Element formatMip;
|
|
||||||
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
|
|
||||||
|
|
||||||
// Find the layout of the cubemap in the 2D image
|
|
||||||
// Use the original image size since processSourceImage may have altered the size / aspect ratio
|
|
||||||
int foundLayout = CubeLayout::findLayout(srcImage.width(), srcImage.height());
|
|
||||||
|
|
||||||
std::vector<QImage> faces;
|
|
||||||
// If found, go extract the faces as separate images
|
|
||||||
if (foundLayout >= 0) {
|
|
||||||
auto& layout = CubeLayout::CUBEMAP_LAYOUTS[foundLayout];
|
|
||||||
if (layout._type == CubeLayout::FLAT) {
|
|
||||||
int faceWidth = image.width() / layout._widthRatio;
|
|
||||||
|
|
||||||
faces.push_back(image.copy(QRect(layout._faceXPos._x * faceWidth, layout._faceXPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXPos._horizontalMirror, layout._faceXPos._verticalMirror));
|
|
||||||
faces.push_back(image.copy(QRect(layout._faceXNeg._x * faceWidth, layout._faceXNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXNeg._horizontalMirror, layout._faceXNeg._verticalMirror));
|
|
||||||
faces.push_back(image.copy(QRect(layout._faceYPos._x * faceWidth, layout._faceYPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYPos._horizontalMirror, layout._faceYPos._verticalMirror));
|
|
||||||
faces.push_back(image.copy(QRect(layout._faceYNeg._x * faceWidth, layout._faceYNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYNeg._horizontalMirror, layout._faceYNeg._verticalMirror));
|
|
||||||
faces.push_back(image.copy(QRect(layout._faceZPos._x * faceWidth, layout._faceZPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZPos._horizontalMirror, layout._faceZPos._verticalMirror));
|
|
||||||
faces.push_back(image.copy(QRect(layout._faceZNeg._x * faceWidth, layout._faceZNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZNeg._horizontalMirror, layout._faceZNeg._verticalMirror));
|
|
||||||
} else if (layout._type == CubeLayout::EQUIRECTANGULAR) {
|
|
||||||
// THe face width is estimated from the input image
|
|
||||||
const int EQUIRECT_FACE_RATIO_TO_WIDTH = 4;
|
|
||||||
const int EQUIRECT_MAX_FACE_WIDTH = 2048;
|
|
||||||
int faceWidth = std::min(image.width() / EQUIRECT_FACE_RATIO_TO_WIDTH, EQUIRECT_MAX_FACE_WIDTH);
|
|
||||||
for (int face = gpu::Texture::CUBE_FACE_RIGHT_POS_X; face < gpu::Texture::NUM_CUBE_FACES; face++) {
|
|
||||||
QImage faceImage = CubeLayout::extractEquirectangularFace(image, (gpu::Texture::CubeFace) face, faceWidth);
|
|
||||||
faces.push_back(faceImage);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
qCDebug(modelLog) << "Failed to find a known cube map layout from this image:" << QString(srcImageName.c_str());
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the 6 faces have been created go on and define the true Texture
|
|
||||||
if (faces.size() == gpu::Texture::NUM_FACES_PER_TYPE[gpu::Texture::TEX_CUBE]) {
|
|
||||||
theTexture = gpu::Texture::createCube(formatGPU, faces[0].width(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
theTexture->setStoredMipFormat(formatMip);
|
|
||||||
int f = 0;
|
|
||||||
for (auto& face : faces) {
|
|
||||||
theTexture->assignStoredMipFace(0, f, face.byteCount(), face.constBits());
|
|
||||||
if (generateMips) {
|
|
||||||
generateFaceMips(theTexture, face, f);
|
|
||||||
}
|
|
||||||
f++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate irradiance while we are at it
|
|
||||||
if (generateIrradiance) {
|
|
||||||
PROFILE_RANGE(resource_parse, "generateIrradiance");
|
|
||||||
theTexture->generateIrradiance();
|
|
||||||
}
|
|
||||||
|
|
||||||
theTexture->setSource(srcImageName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return theTexture;
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createCubeTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
return processCubeTextureColorFromImage(srcImage, srcImageName, false, true, true, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
gpu::Texture* TextureUsage::createCubeTextureFromImageWithoutIrradiance(const QImage& srcImage, const std::string& srcImageName) {
|
|
||||||
return processCubeTextureColorFromImage(srcImage, srcImageName, false, true, true, false);
|
|
||||||
}
|
|
||||||
|
|
|
@ -13,48 +13,10 @@
|
||||||
|
|
||||||
#include "gpu/Texture.h"
|
#include "gpu/Texture.h"
|
||||||
|
|
||||||
#include "Material.h"
|
|
||||||
#include "Transform.h"
|
#include "Transform.h"
|
||||||
|
|
||||||
#include <qurl.h>
|
|
||||||
|
|
||||||
class QImage;
|
|
||||||
|
|
||||||
namespace model {
|
namespace model {
|
||||||
|
|
||||||
typedef glm::vec3 Color;
|
|
||||||
|
|
||||||
class TextureUsage {
|
|
||||||
public:
|
|
||||||
gpu::Texture::Type _type{ gpu::Texture::TEX_2D };
|
|
||||||
Material::MapFlags _materialUsage{ MaterialKey::ALBEDO_MAP };
|
|
||||||
|
|
||||||
int _environmentUsage = 0;
|
|
||||||
|
|
||||||
static gpu::Texture* create2DTextureFromImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createStrict2DTextureFromImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createAlbedoTextureFromImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createEmissiveTextureFromImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createNormalTextureFromNormalImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createNormalTextureFromBumpImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createRoughnessTextureFromImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createRoughnessTextureFromGlossImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createMetallicTextureFromImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createCubeTextureFromImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createCubeTextureFromImageWithoutIrradiance(const QImage& image, const std::string& srcImageName);
|
|
||||||
static gpu::Texture* createLightmapTextureFromImage(const QImage& image, const std::string& srcImageName);
|
|
||||||
|
|
||||||
|
|
||||||
static const QImage process2DImageColor(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask);
|
|
||||||
static void defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
|
|
||||||
const QImage& srcImage, bool isLinear, bool doCompress);
|
|
||||||
static gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict = false);
|
|
||||||
static gpu::Texture* processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool generateIrradiance);
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class TextureMap {
|
class TextureMap {
|
||||||
public:
|
public:
|
||||||
TextureMap() {}
|
TextureMap() {}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
set(TARGET_NAME procedural)
|
set(TARGET_NAME procedural)
|
||||||
AUTOSCRIBE_SHADER_LIB(gpu model)
|
AUTOSCRIBE_SHADER_LIB(gpu model)
|
||||||
setup_hifi_library()
|
setup_hifi_library()
|
||||||
link_hifi_libraries(shared gpu gpu-gl networking model model-networking)
|
link_hifi_libraries(shared gpu gpu-gl networking model model-networking image)
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ AUTOSCRIBE_SHADER_LIB(gpu model render)
|
||||||
# pull in the resources.qrc file
|
# pull in the resources.qrc file
|
||||||
qt5_add_resources(QT_RESOURCES_FILE "${CMAKE_CURRENT_SOURCE_DIR}/res/fonts/fonts.qrc")
|
qt5_add_resources(QT_RESOURCES_FILE "${CMAKE_CURRENT_SOURCE_DIR}/res/fonts/fonts.qrc")
|
||||||
setup_hifi_library(Widgets OpenGL Network Qml Quick Script)
|
setup_hifi_library(Widgets OpenGL Network Qml Quick Script)
|
||||||
link_hifi_libraries(shared ktx gpu model model-networking render animation fbx entities)
|
link_hifi_libraries(shared ktx gpu model model-networking render animation fbx entities image)
|
||||||
|
|
||||||
if (NOT ANDROID)
|
if (NOT ANDROID)
|
||||||
target_nsight()
|
target_nsight()
|
||||||
|
|
|
@ -74,11 +74,11 @@ void AmbientOcclusionFramebuffer::allocate() {
|
||||||
auto width = _frameSize.x;
|
auto width = _frameSize.x;
|
||||||
auto height = _frameSize.y;
|
auto height = _frameSize.y;
|
||||||
|
|
||||||
_occlusionTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
_occlusionTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||||
_occlusionFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("occlusion"));
|
_occlusionFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("occlusion"));
|
||||||
_occlusionFramebuffer->setRenderBuffer(0, _occlusionTexture);
|
_occlusionFramebuffer->setRenderBuffer(0, _occlusionTexture);
|
||||||
|
|
||||||
_occlusionBlurredTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
_occlusionBlurredTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||||
_occlusionBlurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("occlusionBlurred"));
|
_occlusionBlurredFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("occlusionBlurred"));
|
||||||
_occlusionBlurredFramebuffer->setRenderBuffer(0, _occlusionBlurredTexture);
|
_occlusionBlurredFramebuffer->setRenderBuffer(0, _occlusionBlurredTexture);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ const gpu::PipelinePointer& Antialiasing::getAntialiasingPipeline() {
|
||||||
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
|
_antialiasingBuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("antialiasing"));
|
||||||
auto format = gpu::Element::COLOR_SRGBA_32; // DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
|
auto format = gpu::Element::COLOR_SRGBA_32; // DependencyManager::get<FramebufferCache>()->getLightingTexture()->getTexelFormat();
|
||||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||||
_antialiasingTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler));
|
_antialiasingTexture = gpu::Texture::createRenderBuffer(format, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
_antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture);
|
_antialiasingBuffer->setRenderBuffer(0, _antialiasingTexture);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,9 +53,9 @@ void DeferredFramebuffer::allocate() {
|
||||||
|
|
||||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||||
|
|
||||||
_deferredColorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(colorFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler));
|
_deferredColorTexture = gpu::Texture::createRenderBuffer(colorFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
_deferredNormalTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler));
|
_deferredNormalTexture = gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
_deferredSpecularTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler));
|
_deferredSpecularTexture = gpu::Texture::createRenderBuffer(linearFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
|
|
||||||
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
|
_deferredFramebuffer->setRenderBuffer(0, _deferredColorTexture);
|
||||||
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
|
_deferredFramebuffer->setRenderBuffer(1, _deferredNormalTexture);
|
||||||
|
@ -65,7 +65,7 @@ void DeferredFramebuffer::allocate() {
|
||||||
|
|
||||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||||
if (!_primaryDepthTexture) {
|
if (!_primaryDepthTexture) {
|
||||||
_primaryDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(depthFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler));
|
_primaryDepthTexture = gpu::Texture::createRenderBuffer(depthFormat, width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
_deferredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
_deferredFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||||
|
@ -75,7 +75,7 @@ void DeferredFramebuffer::allocate() {
|
||||||
|
|
||||||
auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR);
|
auto smoothSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR);
|
||||||
|
|
||||||
_lightingTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, gpu::Texture::SINGLE_MIP, defaultSampler));
|
_lightingTexture = gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::R11G11B10), width, height, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("lighting"));
|
_lightingFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("lighting"));
|
||||||
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
|
_lightingFramebuffer->setRenderBuffer(0, _lightingTexture);
|
||||||
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
_lightingFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, depthFormat);
|
||||||
|
|
|
@ -496,14 +496,14 @@ void PreparePrimaryFramebuffer::run(const RenderContextPointer& renderContext, g
|
||||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||||
|
|
||||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||||
auto primaryColorTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler));
|
auto primaryColorTexture = gpu::Texture::createRenderBuffer(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
|
|
||||||
|
|
||||||
_primaryFramebuffer->setRenderBuffer(0, primaryColorTexture);
|
_primaryFramebuffer->setRenderBuffer(0, primaryColorTexture);
|
||||||
|
|
||||||
|
|
||||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||||
auto primaryDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(depthFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler));
|
auto primaryDepthTexture = gpu::Texture::createRenderBuffer(depthFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
|
|
||||||
_primaryFramebuffer->setDepthStencilBuffer(primaryDepthTexture, depthFormat);
|
_primaryFramebuffer->setDepthStencilBuffer(primaryDepthTexture, depthFormat);
|
||||||
}
|
}
|
||||||
|
|
|
@ -199,7 +199,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
|
||||||
{
|
{
|
||||||
// Grab a texture map representing the different status icons and assign that to the drawStatsuJob
|
// Grab a texture map representing the different status icons and assign that to the drawStatsuJob
|
||||||
auto iconMapPath = PathUtils::resourcesPath() + "icons/statusIconAtlas.svg";
|
auto iconMapPath = PathUtils::resourcesPath() + "icons/statusIconAtlas.svg";
|
||||||
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath, NetworkTexture::STRICT_TEXTURE);
|
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath, image::TextureUsage::STRICT_TEXTURE);
|
||||||
task.addJob<DrawStatus>("DrawStatus", opaques, DrawStatus(statusIconMap));
|
task.addJob<DrawStatus>("DrawStatus", opaques, DrawStatus(statusIconMap));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,11 +75,11 @@ void PrepareFramebuffer::run(const RenderContextPointer& renderContext,
|
||||||
|
|
||||||
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
auto colorFormat = gpu::Element::COLOR_SRGBA_32;
|
||||||
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT);
|
||||||
auto colorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler));
|
auto colorTexture = gpu::Texture::create2D(colorFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
_framebuffer->setRenderBuffer(0, colorTexture);
|
_framebuffer->setRenderBuffer(0, colorTexture);
|
||||||
|
|
||||||
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
auto depthFormat = gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::DEPTH_STENCIL); // Depth24_Stencil8 texel format
|
||||||
auto depthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler));
|
auto depthTexture = gpu::Texture::create2D(depthFormat, frameSize.x, frameSize.y, gpu::Texture::SINGLE_MIP, defaultSampler);
|
||||||
_framebuffer->setDepthStencilBuffer(depthTexture, depthFormat);
|
_framebuffer->setDepthStencilBuffer(depthTexture, depthFormat);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -414,7 +414,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringProfile(Rend
|
||||||
const int PROFILE_RESOLUTION = 512;
|
const int PROFILE_RESOLUTION = 512;
|
||||||
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
|
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
|
||||||
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
|
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
|
||||||
auto profileMap = gpu::TexturePointer(gpu::Texture::createRenderBuffer(pixelFormat, PROFILE_RESOLUTION, 1, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
auto profileMap = gpu::Texture::createRenderBuffer(pixelFormat, PROFILE_RESOLUTION, 1, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||||
profileMap->setSource("Generated Scattering Profile");
|
profileMap->setSource("Generated Scattering Profile");
|
||||||
diffuseProfileGPU(profileMap, args);
|
diffuseProfileGPU(profileMap, args);
|
||||||
return profileMap;
|
return profileMap;
|
||||||
|
@ -425,7 +425,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generatePreIntegratedScatterin
|
||||||
const int TABLE_RESOLUTION = 512;
|
const int TABLE_RESOLUTION = 512;
|
||||||
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
|
// const auto pixelFormat = gpu::Element::COLOR_SRGBA_32;
|
||||||
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
|
const auto pixelFormat = gpu::Element::COLOR_R11G11B10;
|
||||||
auto scatteringLUT = gpu::TexturePointer(gpu::Texture::createRenderBuffer(pixelFormat, TABLE_RESOLUTION, TABLE_RESOLUTION, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
auto scatteringLUT = gpu::Texture::createRenderBuffer(pixelFormat, TABLE_RESOLUTION, TABLE_RESOLUTION, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||||
//diffuseScatter(scatteringLUT);
|
//diffuseScatter(scatteringLUT);
|
||||||
scatteringLUT->setSource("Generated pre-integrated scattering");
|
scatteringLUT->setSource("Generated pre-integrated scattering");
|
||||||
diffuseScatterGPU(profile, scatteringLUT, args);
|
diffuseScatterGPU(profile, scatteringLUT, args);
|
||||||
|
@ -434,7 +434,7 @@ gpu::TexturePointer SubsurfaceScatteringResource::generatePreIntegratedScatterin
|
||||||
|
|
||||||
gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringSpecularBeckmann(RenderArgs* args) {
|
gpu::TexturePointer SubsurfaceScatteringResource::generateScatteringSpecularBeckmann(RenderArgs* args) {
|
||||||
const int SPECULAR_RESOLUTION = 256;
|
const int SPECULAR_RESOLUTION = 256;
|
||||||
auto beckmannMap = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, SPECULAR_RESOLUTION, SPECULAR_RESOLUTION, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP)));
|
auto beckmannMap = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, SPECULAR_RESOLUTION, SPECULAR_RESOLUTION, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||||
beckmannMap->setSource("Generated beckmannMap");
|
beckmannMap->setSource("Generated beckmannMap");
|
||||||
computeSpecularBeckmannGPU(beckmannMap, args);
|
computeSpecularBeckmannGPU(beckmannMap, args);
|
||||||
return beckmannMap;
|
return beckmannMap;
|
||||||
|
|
|
@ -72,19 +72,19 @@ void LinearDepthFramebuffer::allocate() {
|
||||||
auto height = _frameSize.y;
|
auto height = _frameSize.y;
|
||||||
|
|
||||||
// For Linear Depth:
|
// For Linear Depth:
|
||||||
_linearDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RED), width, height, gpu::Texture::SINGLE_MIP,
|
_linearDepthTexture = gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RED), width, height, gpu::Texture::SINGLE_MIP,
|
||||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||||
_linearDepthFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("linearDepth"));
|
_linearDepthFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("linearDepth"));
|
||||||
_linearDepthFramebuffer->setRenderBuffer(0, _linearDepthTexture);
|
_linearDepthFramebuffer->setRenderBuffer(0, _linearDepthTexture);
|
||||||
_linearDepthFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat());
|
_linearDepthFramebuffer->setDepthStencilBuffer(_primaryDepthTexture, _primaryDepthTexture->getTexelFormat());
|
||||||
|
|
||||||
// For Downsampling:
|
// For Downsampling:
|
||||||
const uint16_t HALF_LINEAR_DEPTH_MAX_MIP_LEVEL = 5;
|
const uint16_t HALF_LINEAR_DEPTH_MAX_MIP_LEVEL = 5;
|
||||||
_halfLinearDepthTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RED), _halfFrameSize.x, _halfFrameSize.y, HALF_LINEAR_DEPTH_MAX_MIP_LEVEL,
|
_halfLinearDepthTexture = gpu::Texture::createRenderBuffer(gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::RED), _halfFrameSize.x, _halfFrameSize.y, HALF_LINEAR_DEPTH_MAX_MIP_LEVEL,
|
||||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||||
|
|
||||||
_halfNormalTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, _halfFrameSize.x, _halfFrameSize.y, gpu::Texture::SINGLE_MIP,
|
_halfNormalTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, _halfFrameSize.x, _halfFrameSize.y, gpu::Texture::SINGLE_MIP,
|
||||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||||
|
|
||||||
_downsampleFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("halfLinearDepth"));
|
_downsampleFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("halfLinearDepth"));
|
||||||
_downsampleFramebuffer->setRenderBuffer(0, _halfLinearDepthTexture);
|
_downsampleFramebuffer->setRenderBuffer(0, _halfLinearDepthTexture);
|
||||||
|
@ -304,15 +304,15 @@ void SurfaceGeometryFramebuffer::allocate() {
|
||||||
auto width = _frameSize.x;
|
auto width = _frameSize.x;
|
||||||
auto height = _frameSize.y;
|
auto height = _frameSize.y;
|
||||||
|
|
||||||
_curvatureTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
_curvatureTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||||
_curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::curvature"));
|
_curvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::curvature"));
|
||||||
_curvatureFramebuffer->setRenderBuffer(0, _curvatureTexture);
|
_curvatureFramebuffer->setRenderBuffer(0, _curvatureTexture);
|
||||||
|
|
||||||
_lowCurvatureTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
_lowCurvatureTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||||
_lowCurvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::lowCurvature"));
|
_lowCurvatureFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::lowCurvature"));
|
||||||
_lowCurvatureFramebuffer->setRenderBuffer(0, _lowCurvatureTexture);
|
_lowCurvatureFramebuffer->setRenderBuffer(0, _lowCurvatureTexture);
|
||||||
|
|
||||||
_blurringTexture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT)));
|
_blurringTexture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, width, height, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT));
|
||||||
_blurringFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::blurring"));
|
_blurringFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("surfaceGeometry::blurring"));
|
||||||
_blurringFramebuffer->setRenderBuffer(0, _blurringTexture);
|
_blurringFramebuffer->setRenderBuffer(0, _blurringTexture);
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,8 +207,8 @@ void Font::read(QIODevice& in) {
|
||||||
formatGPU = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
|
formatGPU = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
|
||||||
formatMip = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::BGRA);
|
formatMip = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::BGRA);
|
||||||
}
|
}
|
||||||
_texture = gpu::TexturePointer(gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::SINGLE_MIP,
|
_texture = gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::SINGLE_MIP,
|
||||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR)));
|
gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR));
|
||||||
_texture->setStoredMipFormat(formatMip);
|
_texture->setStoredMipFormat(formatMip);
|
||||||
_texture->assignStoredMip(0, image.byteCount(), image.constBits());
|
_texture->assignStoredMip(0, image.byteCount(), image.constBits());
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,7 +108,7 @@ bool BlurInOutResource::updateResources(const gpu::FramebufferPointer& sourceFra
|
||||||
// _blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
|
// _blurredFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
|
||||||
//}
|
//}
|
||||||
auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
|
auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
|
||||||
auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), gpu::Texture::SINGLE_MIP, blurringSampler));
|
auto blurringTarget = gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), gpu::Texture::SINGLE_MIP, blurringSampler);
|
||||||
_blurredFramebuffer->setRenderBuffer(0, blurringTarget);
|
_blurredFramebuffer->setRenderBuffer(0, blurringTarget);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ bool BlurInOutResource::updateResources(const gpu::FramebufferPointer& sourceFra
|
||||||
_outputFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
|
_outputFramebuffer->setDepthStencilBuffer(sourceFramebuffer->getDepthStencilBuffer(), sourceFramebuffer->getDepthStencilBufferFormat());
|
||||||
}*/
|
}*/
|
||||||
auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
|
auto blurringSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR_MIP_POINT);
|
||||||
auto blurringTarget = gpu::TexturePointer(gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), gpu::Texture::SINGLE_MIP, blurringSampler));
|
auto blurringTarget = gpu::Texture::create2D(sourceFramebuffer->getRenderBuffer(0)->getTexelFormat(), sourceFramebuffer->getWidth(), sourceFramebuffer->getHeight(), gpu::Texture::SINGLE_MIP, blurringSampler);
|
||||||
_outputFramebuffer->setRenderBuffer(0, blurringTarget);
|
_outputFramebuffer->setRenderBuffer(0, blurringTarget);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,6 @@ if (NOT ANDROID)
|
||||||
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
link_hifi_libraries(shared networking octree gpu ui procedural model model-networking recording avatars fbx entities controllers animation audio physics)
|
link_hifi_libraries(shared networking octree gpu ui procedural model model-networking recording avatars fbx entities controllers animation audio physics image)
|
||||||
# ui includes gl, but link_hifi_libraries does not use transitive includes, so gl must be explicit
|
# ui includes gl, but link_hifi_libraries does not use transitive includes, so gl must be explicit
|
||||||
include_hifi_library_headers(gl)
|
include_hifi_library_headers(gl)
|
||||||
|
|
|
@ -13,7 +13,7 @@ if (WIN32)
|
||||||
setup_hifi_plugin(OpenGL Script Qml Widgets)
|
setup_hifi_plugin(OpenGL Script Qml Widgets)
|
||||||
link_hifi_libraries(shared gl networking controllers ui
|
link_hifi_libraries(shared gl networking controllers ui
|
||||||
plugins display-plugins ui-plugins input-plugins script-engine
|
plugins display-plugins ui-plugins input-plugins script-engine
|
||||||
render-utils model gpu gpu-gl render model-networking fbx)
|
render-utils model gpu gpu-gl render model-networking fbx image)
|
||||||
|
|
||||||
include_hifi_library_headers(octree)
|
include_hifi_library_headers(octree)
|
||||||
|
|
||||||
|
|
|
@ -495,7 +495,7 @@ void OpenVrDisplayPlugin::customizeContext() {
|
||||||
_compositeInfos[0].texture = _compositeFramebuffer->getRenderBuffer(0);
|
_compositeInfos[0].texture = _compositeFramebuffer->getRenderBuffer(0);
|
||||||
for (size_t i = 0; i < COMPOSITING_BUFFER_SIZE; ++i) {
|
for (size_t i = 0; i < COMPOSITING_BUFFER_SIZE; ++i) {
|
||||||
if (0 != i) {
|
if (0 != i) {
|
||||||
_compositeInfos[i].texture = gpu::TexturePointer(gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, _renderTargetSize.x, _renderTargetSize.y, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT)));
|
_compositeInfos[i].texture = gpu::Texture::createRenderBuffer(gpu::Element::COLOR_RGBA_32, _renderTargetSize.x, _renderTargetSize.y, gpu::Texture::SINGLE_MIP, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_POINT));
|
||||||
}
|
}
|
||||||
_compositeInfos[i].textureID = getGLBackend()->getTextureID(_compositeInfos[i].texture);
|
_compositeInfos[i].textureID = getGLBackend()->getTextureID(_compositeInfos[i].texture);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ AUTOSCRIBE_SHADER_LIB(gpu model render-utils)
|
||||||
# This is not a testcase -- just set it up as a regular hifi project
|
# This is not a testcase -- just set it up as a regular hifi project
|
||||||
setup_hifi_project(Quick Gui OpenGL Script Widgets)
|
setup_hifi_project(Quick Gui OpenGL Script Widgets)
|
||||||
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
||||||
link_hifi_libraries(networking gl gpu gpu-gl procedural shared fbx model model-networking animation script-engine render render-utils octree )
|
link_hifi_libraries(networking gl gpu gpu-gl procedural shared fbx model model-networking animation script-engine render render-utils octree image)
|
||||||
package_libraries_for_deployment()
|
package_libraries_for_deployment()
|
||||||
|
|
||||||
target_nsight()
|
target_nsight()
|
||||||
|
|
|
@ -10,6 +10,6 @@ setup_hifi_project(Quick Gui OpenGL)
|
||||||
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
||||||
|
|
||||||
# link in the shared libraries
|
# link in the shared libraries
|
||||||
link_hifi_libraries(shared octree ktx gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics)
|
link_hifi_libraries(shared octree ktx gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics image)
|
||||||
|
|
||||||
package_libraries_for_deployment()
|
package_libraries_for_deployment()
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
#include <gl/Config.h>
|
#include <gl/Config.h>
|
||||||
#include <model/TextureMap.h>
|
#include <model/TextureMap.h>
|
||||||
#include <ktx/KTX.h>
|
#include <ktx/KTX.h>
|
||||||
|
#include <image/Image.h>
|
||||||
|
|
||||||
|
|
||||||
QSharedPointer<FileLogger> logger;
|
QSharedPointer<FileLogger> logger;
|
||||||
|
@ -94,7 +95,7 @@ int main(int argc, char** argv) {
|
||||||
QLoggingCategory::setFilterRules(LOG_FILTER_RULES);
|
QLoggingCategory::setFilterRules(LOG_FILTER_RULES);
|
||||||
|
|
||||||
QImage image(TEST_IMAGE);
|
QImage image(TEST_IMAGE);
|
||||||
gpu::Texture* testTexture = model::TextureUsage::process2DTextureColorFromImage(image, TEST_IMAGE.toStdString(), true, false, true);
|
gpu::TexturePointer testTexture = image::TextureUsage::process2DTextureColorFromImage(image, TEST_IMAGE.toStdString(), true);
|
||||||
|
|
||||||
auto ktxMemory = gpu::Texture::serialize(*testTexture);
|
auto ktxMemory = gpu::Texture::serialize(*testTexture);
|
||||||
{
|
{
|
||||||
|
|
|
@ -10,7 +10,7 @@ setup_hifi_project(Quick Gui OpenGL)
|
||||||
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
||||||
|
|
||||||
# link in the shared libraries
|
# link in the shared libraries
|
||||||
link_hifi_libraries(shared octree ktx gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics)
|
link_hifi_libraries(shared octree ktx gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics image)
|
||||||
|
|
||||||
package_libraries_for_deployment()
|
package_libraries_for_deployment()
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ setup_hifi_project(Quick Gui OpenGL)
|
||||||
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
||||||
|
|
||||||
# link in the shared libraries
|
# link in the shared libraries
|
||||||
link_hifi_libraries(shared octree gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics)
|
link_hifi_libraries(shared octree gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics image)
|
||||||
|
|
||||||
package_libraries_for_deployment()
|
package_libraries_for_deployment()
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue