Start to setup image library

This commit is contained in:
Atlante45 2017-04-07 17:08:08 -07:00
parent 2e1355618f
commit a450f52427
23 changed files with 1343 additions and 1331 deletions

View file

@ -194,7 +194,7 @@ link_hifi_libraries(
recording fbx networking model-networking entities avatars
audio audio-client animation script-engine physics
render-utils entities-renderer avatars-renderer ui auto-updater
controllers plugins
controllers plugins image
ui-plugins display-plugins input-plugins
${NON_ANDROID_LIBRARIES}
)

View file

@ -1445,8 +1445,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
QString skyboxUrl { PathUtils::resourcesPath() + "images/Default-Sky-9-cubemap.jpg" };
QString skyboxAmbientUrl { PathUtils::resourcesPath() + "images/Default-Sky-9-ambient.jpg" };
_defaultSkyboxTexture = textureCache->getImageTexture(skyboxUrl, NetworkTexture::CUBE_TEXTURE, { { "generateIrradiance", false } });
_defaultSkyboxAmbientTexture = textureCache->getImageTexture(skyboxAmbientUrl, NetworkTexture::CUBE_TEXTURE, { { "generateIrradiance", true } });
_defaultSkyboxTexture = textureCache->getImageTexture(skyboxUrl, gpu::TextureType::CUBE_TEXTURE, { { "generateIrradiance", false } });
_defaultSkyboxAmbientTexture = textureCache->getImageTexture(skyboxAmbientUrl, gpu::TextureType::CUBE_TEXTURE, { { "generateIrradiance", true } });
_defaultSkybox->setCubemap(_defaultSkyboxTexture);

View file

@ -495,7 +495,7 @@ bool EntityTreeRenderer::applySkyboxAndHasAmbient() {
bool isAmbientSet = false;
if (_pendingAmbientTexture && !_ambientTexture) {
_ambientTexture = textureCache->getTexture(_ambientTextureURL, NetworkTexture::CUBE_TEXTURE);
_ambientTexture = textureCache->getTexture(_ambientTextureURL, gpu::TextureType::CUBE_TEXTURE);
}
if (_ambientTexture && _ambientTexture->isLoaded()) {
_pendingAmbientTexture = false;
@ -512,7 +512,7 @@ bool EntityTreeRenderer::applySkyboxAndHasAmbient() {
if (_pendingSkyboxTexture &&
(!_skyboxTexture || (_skyboxTexture->getURL() != _skyboxTextureURL))) {
_skyboxTexture = textureCache->getTexture(_skyboxTextureURL, NetworkTexture::CUBE_TEXTURE);
_skyboxTexture = textureCache->getTexture(_skyboxTextureURL, gpu::TextureType::CUBE_TEXTURE);
}
if (_skyboxTexture && _skyboxTexture->isLoaded()) {
_pendingSkyboxTexture = false;

View file

@ -224,22 +224,19 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
result = GL_COMPRESSED_SRGB_ALPHA;
break;
// FIXME: WE will want to support this later
/*
case gpu::COMPRESSED_BC3_RGBA:
case gpu::COMPRESSED_BC3_RGBA:
result = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
break;
case gpu::COMPRESSED_BC3_SRGBA:
case gpu::COMPRESSED_BC3_SRGBA:
result = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
break;
case gpu::COMPRESSED_BC7_RGBA:
case gpu::COMPRESSED_BC7_RGBA:
result = GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
break;
case gpu::COMPRESSED_BC7_SRGBA:
case gpu::COMPRESSED_BC7_SRGBA:
result = GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM;
break;
*/
default:
qCWarning(gpugllogging) << "Unknown combination of texel format";
@ -364,25 +361,21 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
break;
case gpu::COMPRESSED_SRGBA:
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA;
break;
// FIXME: WE will want to support this later
/*
case gpu::COMPRESSED_BC3_RGBA:
case gpu::COMPRESSED_BC3_RGBA:
texel.internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
break;
case gpu::COMPRESSED_BC3_SRGBA:
case gpu::COMPRESSED_BC3_SRGBA:
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
break;
case gpu::COMPRESSED_BC7_RGBA:
case gpu::COMPRESSED_BC7_RGBA:
texel.internalFormat = GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
break;
case gpu::COMPRESSED_BC7_SRGBA:
case gpu::COMPRESSED_BC7_SRGBA:
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM;
break;
*/
default:
qCWarning(gpugllogging) << "Unknown combination of texel format";

View file

@ -165,12 +165,11 @@ enum Semantic {
COMPRESSED_SRGB,
COMPRESSED_SRGBA,
// FIXME: Will have to be supported later:
/*COMPRESSED_BC3_RGBA, // RGBA_S3TC_DXT5_EXT,
COMPRESSED_BC3_RGBA, // RGBA_S3TC_DXT5_EXT,
COMPRESSED_BC3_SRGBA, // SRGB_ALPHA_S3TC_DXT5_EXT
COMPRESSED_BC7_RGBA,
COMPRESSED_BC7_SRGBA, */
COMPRESSED_BC7_SRGBA,
_LAST_COMPRESSED,

View file

@ -157,6 +157,23 @@ enum class TextureUsageType {
EXTERNAL,
};
enum TextureType {
DEFAULT_TEXTURE,
STRICT_TEXTURE,
ALBEDO_TEXTURE,
NORMAL_TEXTURE,
BUMP_TEXTURE,
SPECULAR_TEXTURE,
METALLIC_TEXTURE = SPECULAR_TEXTURE, // for now spec and metallic texture are the same, converted to grey
ROUGHNESS_TEXTURE,
GLOSS_TEXTURE,
EMISSIVE_TEXTURE,
CUBE_TEXTURE,
OCCLUSION_TEXTURE,
SCATTERING_TEXTURE = OCCLUSION_TEXTURE,
LIGHTMAP_TEXTURE
};
class Texture : public Resource {
static std::atomic<uint32_t> _textureCPUCount;
static std::atomic<Size> _textureCPUMemoryUsage;

View file

@ -0,0 +1,9 @@
set(TARGET_NAME image)
setup_hifi_library()
link_hifi_libraries(shared gpu)
target_glm()
add_dependency_external_projects(nvtt)
target_include_directories(${TARGET_NAME} PRIVATE ${NVTT_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${NVTT_LIBRARIES})

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,56 @@
//
// Image.h
// image/src/image
//
// Created by Clement Brisset on 4/5/2017.
// Copyright 2017 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_image_Image_h
#define hifi_image_Image_h
#include <QVariant>
#include <gpu/Texture.h>
class QByteArray;
class QImage;
class QUrl;
namespace image {
using TextureLoader = std::function<gpu::Texture*(const QImage&, const std::string&)>;
TextureLoader getTextureLoaderForType(gpu::TextureType type, const QVariantMap& options = QVariantMap());
gpu::Texture* processImage(const QByteArray& content, const QUrl& url, const std::string& hash, int maxNumPixels, const TextureLoader& loader);
namespace TextureUsage {
gpu::Texture* create2DTextureFromImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createStrict2DTextureFromImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createAlbedoTextureFromImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createEmissiveTextureFromImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createNormalTextureFromNormalImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createNormalTextureFromBumpImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createRoughnessTextureFromImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createRoughnessTextureFromGlossImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createMetallicTextureFromImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createCubeTextureFromImage(const QImage& image, const std::string& srcImageName);
gpu::Texture* createCubeTextureFromImageWithoutIrradiance(const QImage& image, const std::string& srcImageName);
gpu::Texture* createLightmapTextureFromImage(const QImage& image, const std::string& srcImageName);
const QImage process2DImageColor(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask);
void defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
const QImage& srcImage, bool isLinear, bool doCompress);
gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict = false);
gpu::Texture* processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool generateIrradiance);
} // namespace TextureUsage
} // namespace image
#endif // hifi_image_Image_h

View file

@ -0,0 +1,14 @@
//
// ImageLogging.cpp
// image/src/image
//
// Created by Clement Brisset on 4/5/2017.
// Copyright 2017 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "ImageLogging.h"
Q_LOGGING_CATEGORY(imagelogging, "hifi.model")

View file

@ -0,0 +1,14 @@
//
// ImageLogging.h
// image/src/image
//
// Created by Clement Brisset on 4/5/2017.
// Copyright 2017 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <QLoggingCategory>
Q_DECLARE_LOGGING_CATEGORY(imagelogging)

View file

@ -1,4 +1,4 @@
set(TARGET_NAME model-networking)
setup_hifi_library()
link_hifi_libraries(shared networking model fbx ktx)
link_hifi_libraries(shared networking model fbx ktx image)

View file

@ -489,7 +489,7 @@ QUrl NetworkMaterial::getTextureUrl(const QUrl& baseUrl, const FBXTexture& textu
}
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& baseUrl, const FBXTexture& fbxTexture,
TextureType type, MapChannel channel) {
gpu::TextureType type, MapChannel channel) {
const auto url = getTextureUrl(baseUrl, fbxTexture);
const auto texture = DependencyManager::get<TextureCache>()->getTexture(url, type, fbxTexture.content, fbxTexture.maxNumPixels);
_textures[channel] = Texture { fbxTexture.name, texture };
@ -503,7 +503,7 @@ model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& baseUrl, c
return map;
}
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& url, TextureType type, MapChannel channel) {
model::TextureMapPointer NetworkMaterial::fetchTextureMap(const QUrl& url, gpu::TextureType type, MapChannel channel) {
const auto texture = DependencyManager::get<TextureCache>()->getTexture(url, type);
_textures[channel].texture = texture;
@ -518,7 +518,7 @@ NetworkMaterial::NetworkMaterial(const FBXMaterial& material, const QUrl& textur
{
_textures = Textures(MapChannel::NUM_MAP_CHANNELS);
if (!material.albedoTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.albedoTexture, NetworkTexture::ALBEDO_TEXTURE, MapChannel::ALBEDO_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.albedoTexture, gpu::TextureType::ALBEDO_TEXTURE, MapChannel::ALBEDO_MAP);
_albedoTransform = material.albedoTexture.transform;
map->setTextureTransform(_albedoTransform);
@ -535,45 +535,45 @@ NetworkMaterial::NetworkMaterial(const FBXMaterial& material, const QUrl& textur
if (!material.normalTexture.filename.isEmpty()) {
auto type = (material.normalTexture.isBumpmap ? NetworkTexture::BUMP_TEXTURE : NetworkTexture::NORMAL_TEXTURE);
auto type = (material.normalTexture.isBumpmap ? gpu::TextureType::BUMP_TEXTURE : gpu::TextureType::NORMAL_TEXTURE);
auto map = fetchTextureMap(textureBaseUrl, material.normalTexture, type, MapChannel::NORMAL_MAP);
setTextureMap(MapChannel::NORMAL_MAP, map);
}
if (!material.roughnessTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.roughnessTexture, NetworkTexture::ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.roughnessTexture, gpu::TextureType::ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
} else if (!material.glossTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.glossTexture, NetworkTexture::GLOSS_TEXTURE, MapChannel::ROUGHNESS_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.glossTexture, gpu::TextureType::GLOSS_TEXTURE, MapChannel::ROUGHNESS_MAP);
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
}
if (!material.metallicTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.metallicTexture, NetworkTexture::METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.metallicTexture, gpu::TextureType::METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
setTextureMap(MapChannel::METALLIC_MAP, map);
} else if (!material.specularTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.specularTexture, NetworkTexture::SPECULAR_TEXTURE, MapChannel::METALLIC_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.specularTexture, gpu::TextureType::SPECULAR_TEXTURE, MapChannel::METALLIC_MAP);
setTextureMap(MapChannel::METALLIC_MAP, map);
}
if (!material.occlusionTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.occlusionTexture, NetworkTexture::OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.occlusionTexture, gpu::TextureType::OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
map->setTextureTransform(material.occlusionTexture.transform);
setTextureMap(MapChannel::OCCLUSION_MAP, map);
}
if (!material.emissiveTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.emissiveTexture, NetworkTexture::EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.emissiveTexture, gpu::TextureType::EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
setTextureMap(MapChannel::EMISSIVE_MAP, map);
}
if (!material.scatteringTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.scatteringTexture, NetworkTexture::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.scatteringTexture, gpu::TextureType::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
setTextureMap(MapChannel::SCATTERING_MAP, map);
}
if (!material.lightmapTexture.filename.isEmpty()) {
auto map = fetchTextureMap(textureBaseUrl, material.lightmapTexture, NetworkTexture::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
auto map = fetchTextureMap(textureBaseUrl, material.lightmapTexture, gpu::TextureType::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
_lightmapTransform = material.lightmapTexture.transform;
_lightmapParams = material.lightmapParams;
map->setTextureTransform(_lightmapTransform);
@ -596,7 +596,7 @@ void NetworkMaterial::setTextures(const QVariantMap& textureMap) {
if (!albedoName.isEmpty()) {
auto url = textureMap.contains(albedoName) ? textureMap[albedoName].toUrl() : QUrl();
auto map = fetchTextureMap(url, NetworkTexture::ALBEDO_TEXTURE, MapChannel::ALBEDO_MAP);
auto map = fetchTextureMap(url, gpu::TextureType::ALBEDO_TEXTURE, MapChannel::ALBEDO_MAP);
map->setTextureTransform(_albedoTransform);
// when reassigning the albedo texture we also check for the alpha channel used as opacity
map->setUseAlphaChannel(true);
@ -605,45 +605,45 @@ void NetworkMaterial::setTextures(const QVariantMap& textureMap) {
if (!normalName.isEmpty()) {
auto url = textureMap.contains(normalName) ? textureMap[normalName].toUrl() : QUrl();
auto map = fetchTextureMap(url, NetworkTexture::NORMAL_TEXTURE, MapChannel::NORMAL_MAP);
auto map = fetchTextureMap(url, gpu::TextureType::NORMAL_TEXTURE, MapChannel::NORMAL_MAP);
setTextureMap(MapChannel::NORMAL_MAP, map);
}
if (!roughnessName.isEmpty()) {
auto url = textureMap.contains(roughnessName) ? textureMap[roughnessName].toUrl() : QUrl();
// FIXME: If passing a gloss map instead of a roughmap how do we know?
auto map = fetchTextureMap(url, NetworkTexture::ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
auto map = fetchTextureMap(url, gpu::TextureType::ROUGHNESS_TEXTURE, MapChannel::ROUGHNESS_MAP);
setTextureMap(MapChannel::ROUGHNESS_MAP, map);
}
if (!metallicName.isEmpty()) {
auto url = textureMap.contains(metallicName) ? textureMap[metallicName].toUrl() : QUrl();
// FIXME: If passing a specular map instead of a metallic how do we know?
auto map = fetchTextureMap(url, NetworkTexture::METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
auto map = fetchTextureMap(url, gpu::TextureType::METALLIC_TEXTURE, MapChannel::METALLIC_MAP);
setTextureMap(MapChannel::METALLIC_MAP, map);
}
if (!occlusionName.isEmpty()) {
auto url = textureMap.contains(occlusionName) ? textureMap[occlusionName].toUrl() : QUrl();
auto map = fetchTextureMap(url, NetworkTexture::OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
auto map = fetchTextureMap(url, gpu::TextureType::OCCLUSION_TEXTURE, MapChannel::OCCLUSION_MAP);
setTextureMap(MapChannel::OCCLUSION_MAP, map);
}
if (!emissiveName.isEmpty()) {
auto url = textureMap.contains(emissiveName) ? textureMap[emissiveName].toUrl() : QUrl();
auto map = fetchTextureMap(url, NetworkTexture::EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
auto map = fetchTextureMap(url, gpu::TextureType::EMISSIVE_TEXTURE, MapChannel::EMISSIVE_MAP);
setTextureMap(MapChannel::EMISSIVE_MAP, map);
}
if (!scatteringName.isEmpty()) {
auto url = textureMap.contains(scatteringName) ? textureMap[scatteringName].toUrl() : QUrl();
auto map = fetchTextureMap(url, NetworkTexture::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
auto map = fetchTextureMap(url, gpu::TextureType::SCATTERING_TEXTURE, MapChannel::SCATTERING_MAP);
setTextureMap(MapChannel::SCATTERING_MAP, map);
}
if (!lightmapName.isEmpty()) {
auto url = textureMap.contains(lightmapName) ? textureMap[lightmapName].toUrl() : QUrl();
auto map = fetchTextureMap(url, NetworkTexture::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
auto map = fetchTextureMap(url, gpu::TextureType::LIGHTMAP_TEXTURE, MapChannel::LIGHTMAP_MAP);
map->setTextureTransform(_lightmapTransform);
map->setLightmapOffsetScale(_lightmapParams.x, _lightmapParams.y);
setTextureMap(MapChannel::LIGHTMAP_MAP, map);

View file

@ -180,13 +180,11 @@ protected:
const bool& isOriginal() const { return _isOriginal; }
private:
using TextureType = NetworkTexture::Type;
// Helpers for the ctors
QUrl getTextureUrl(const QUrl& baseUrl, const FBXTexture& fbxTexture);
model::TextureMapPointer fetchTextureMap(const QUrl& baseUrl, const FBXTexture& fbxTexture,
TextureType type, MapChannel channel);
model::TextureMapPointer fetchTextureMap(const QUrl& url, TextureType type, MapChannel channel);
gpu::TextureType type, MapChannel channel);
model::TextureMapPointer fetchTextureMap(const QUrl& url, gpu::TextureType type, MapChannel channel);
Transform _albedoTransform;
Transform _lightmapTransform;

View file

@ -13,11 +13,12 @@
#include <mutex>
#include <QNetworkReply>
#include <QPainter>
#include <QCryptographicHash>
#include <QImageReader>
#include <QRunnable>
#include <QThreadPool>
#include <QImageReader>
#include <QNetworkReply>
#include <QPainter>
#if DEBUG_DUMP_TEXTURE_LOADS
#include <QtCore/QFile>
@ -31,10 +32,13 @@
#include <ktx/KTX.h>
#include <image/Image.h>
#include <NumericalConstants.h>
#include <shared/NsightHelpers.h>
#include <Finally.h>
#include <Profile.h>
#include "ModelNetworkingLogging.h"
#include <Trace.h>
@ -51,16 +55,6 @@ TextureCache::TextureCache() :
_ktxCache(KTX_DIRNAME, KTX_EXT) {
setUnusedResourceCacheSize(0);
setObjectName("TextureCache");
// Expose enum Type to JS/QML via properties
// Despite being one-off, this should be fine, because TextureCache is a SINGLETON_DEPENDENCY
QObject* type = new QObject(this);
type->setObjectName("TextureType");
setProperty("Type", QVariant::fromValue(type));
auto metaEnum = QMetaEnum::fromType<Type>();
for (int i = 0; i < metaEnum.keyCount(); ++i) {
type->setProperty(metaEnum.key(i), metaEnum.value(i));
}
}
TextureCache::~TextureCache() {
@ -172,18 +166,18 @@ const gpu::TexturePointer& TextureCache::getBlackTexture() {
/// Extra data for creating textures.
class TextureExtra {
public:
NetworkTexture::Type type;
gpu::TextureType type;
const QByteArray& content;
int maxNumPixels;
};
ScriptableResource* TextureCache::prefetch(const QUrl& url, int type, int maxNumPixels) {
auto byteArray = QByteArray();
TextureExtra extra = { (Type)type, byteArray, maxNumPixels };
TextureExtra extra = { (gpu::TextureType)type, byteArray, maxNumPixels };
return ResourceCache::prefetch(url, &extra);
}
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, Type type, const QByteArray& content, int maxNumPixels) {
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, gpu::TextureType type, const QByteArray& content, int maxNumPixels) {
TextureExtra extra = { type, content, maxNumPixels };
return ResourceCache::getResource(url, QUrl(), &extra).staticCast<NetworkTexture>();
}
@ -216,8 +210,7 @@ gpu::TexturePointer TextureCache::cacheTextureByHash(const std::string& hash, co
return result;
}
gpu::TexturePointer getFallbackTextureForType(NetworkTexture::Type type) {
gpu::TexturePointer getFallbackTextureForType(gpu::TextureType type) {
gpu::TexturePointer result;
auto textureCache = DependencyManager::get<TextureCache>();
// Since this can be called on a background thread, there's a chance that the cache
@ -226,116 +219,51 @@ gpu::TexturePointer getFallbackTextureForType(NetworkTexture::Type type) {
return result;
}
switch (type) {
case NetworkTexture::DEFAULT_TEXTURE:
case NetworkTexture::ALBEDO_TEXTURE:
case NetworkTexture::ROUGHNESS_TEXTURE:
case NetworkTexture::OCCLUSION_TEXTURE:
case gpu::DEFAULT_TEXTURE:
case gpu::ALBEDO_TEXTURE:
case gpu::ROUGHNESS_TEXTURE:
case gpu::OCCLUSION_TEXTURE:
result = textureCache->getWhiteTexture();
break;
case NetworkTexture::NORMAL_TEXTURE:
case gpu::NORMAL_TEXTURE:
result = textureCache->getBlueTexture();
break;
case NetworkTexture::EMISSIVE_TEXTURE:
case NetworkTexture::LIGHTMAP_TEXTURE:
case gpu::EMISSIVE_TEXTURE:
case gpu::LIGHTMAP_TEXTURE:
result = textureCache->getBlackTexture();
break;
case NetworkTexture::BUMP_TEXTURE:
case NetworkTexture::SPECULAR_TEXTURE:
case NetworkTexture::GLOSS_TEXTURE:
case NetworkTexture::CUBE_TEXTURE:
case NetworkTexture::CUSTOM_TEXTURE:
case NetworkTexture::STRICT_TEXTURE:
case gpu::BUMP_TEXTURE:
case gpu::SPECULAR_TEXTURE:
case gpu::GLOSS_TEXTURE:
case gpu::CUBE_TEXTURE:
case gpu::STRICT_TEXTURE:
default:
break;
}
return result;
}
NetworkTexture::TextureLoaderFunc getTextureLoaderForType(NetworkTexture::Type type,
const QVariantMap& options = QVariantMap()) {
using Type = NetworkTexture;
switch (type) {
case Type::ALBEDO_TEXTURE: {
return model::TextureUsage::createAlbedoTextureFromImage;
break;
}
case Type::EMISSIVE_TEXTURE: {
return model::TextureUsage::createEmissiveTextureFromImage;
break;
}
case Type::LIGHTMAP_TEXTURE: {
return model::TextureUsage::createLightmapTextureFromImage;
break;
}
case Type::CUBE_TEXTURE: {
if (options.value("generateIrradiance", true).toBool()) {
return model::TextureUsage::createCubeTextureFromImage;
} else {
return model::TextureUsage::createCubeTextureFromImageWithoutIrradiance;
}
break;
}
case Type::BUMP_TEXTURE: {
return model::TextureUsage::createNormalTextureFromBumpImage;
break;
}
case Type::NORMAL_TEXTURE: {
return model::TextureUsage::createNormalTextureFromNormalImage;
break;
}
case Type::ROUGHNESS_TEXTURE: {
return model::TextureUsage::createRoughnessTextureFromImage;
break;
}
case Type::GLOSS_TEXTURE: {
return model::TextureUsage::createRoughnessTextureFromGlossImage;
break;
}
case Type::SPECULAR_TEXTURE: {
return model::TextureUsage::createMetallicTextureFromImage;
break;
}
case Type::STRICT_TEXTURE: {
return model::TextureUsage::createStrict2DTextureFromImage;
break;
}
case Type::CUSTOM_TEXTURE: {
Q_ASSERT(false);
return NetworkTexture::TextureLoaderFunc();
break;
}
case Type::DEFAULT_TEXTURE:
default: {
return model::TextureUsage::create2DTextureFromImage;
break;
}
}
}
/// Returns a texture version of an image file
gpu::TexturePointer TextureCache::getImageTexture(const QString& path, Type type, QVariantMap options) {
gpu::TexturePointer TextureCache::getImageTexture(const QString& path, gpu::TextureType type, QVariantMap options) {
QImage image = QImage(path);
auto loader = getTextureLoaderForType(type, options);
auto loader = image::getTextureLoaderForType(type, options);
return gpu::TexturePointer(loader(image, QUrl::fromLocalFile(path).fileName().toStdString()));
}
QSharedPointer<Resource> TextureCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
const void* extra) {
const TextureExtra* textureExtra = static_cast<const TextureExtra*>(extra);
auto type = textureExtra ? textureExtra->type : Type::DEFAULT_TEXTURE;
auto type = textureExtra ? textureExtra->type : gpu::DEFAULT_TEXTURE;
auto content = textureExtra ? textureExtra->content : QByteArray();
auto maxNumPixels = textureExtra ? textureExtra->maxNumPixels : ABSOLUTE_MAX_TEXTURE_NUM_PIXELS;
NetworkTexture* texture = new NetworkTexture(url, type, content, maxNumPixels);
return QSharedPointer<Resource>(texture, &Resource::deleter);
}
NetworkTexture::NetworkTexture(const QUrl& url, Type type, const QByteArray& content, int maxNumPixels) :
NetworkTexture::NetworkTexture(const QUrl& url, gpu::TextureType type, const QByteArray& content, int maxNumPixels) :
Resource(url),
_type(type),
_maxNumPixels(maxNumPixels)
@ -353,13 +281,6 @@ NetworkTexture::NetworkTexture(const QUrl& url, Type type, const QByteArray& con
}
}
NetworkTexture::TextureLoaderFunc NetworkTexture::getTextureLoader() const {
if (_type == CUSTOM_TEXTURE) {
return _textureLoader;
}
return getTextureLoaderForType(_type);
}
void NetworkTexture::setImage(gpu::TexturePointer texture, int originalWidth,
int originalHeight) {
_originalWidth = originalWidth;
@ -384,34 +305,22 @@ void NetworkTexture::setImage(gpu::TexturePointer texture, int originalWidth,
}
gpu::TexturePointer NetworkTexture::getFallbackTexture() const {
if (_type == CUSTOM_TEXTURE) {
return gpu::TexturePointer();
}
return getFallbackTextureForType(_type);
}
class Reader : public QRunnable {
public:
Reader(const QWeakPointer<Resource>& resource, const QUrl& url);
void run() override final;
virtual void read() = 0;
protected:
QWeakPointer<Resource> _resource;
QUrl _url;
};
class ImageReader : public Reader {
class ImageReader : public QRunnable {
public:
ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url,
const QByteArray& data, const std::string& hash, int maxNumPixels);
void read() override final;
const QByteArray& data, int maxNumPixels);
void run() override final;
void read();
private:
static void listSupportedImageFormats();
QWeakPointer<Resource> _resource;
QUrl _url;
QByteArray _content;
std::string _hash;
int _maxNumPixels;
};
@ -420,71 +329,16 @@ void NetworkTexture::downloadFinished(const QByteArray& data) {
}
void NetworkTexture::loadContent(const QByteArray& content) {
// Hash the source image to for KTX caching
std::string hash;
{
QCryptographicHash hasher(QCryptographicHash::Md5);
hasher.addData(content);
hash = hasher.result().toHex().toStdString();
}
auto textureCache = static_cast<TextureCache*>(_cache.data());
if (textureCache != nullptr) {
// If we already have a live texture with the same hash, use it
auto texture = textureCache->getTextureByHash(hash);
// If there is no live texture, check if there's an existing KTX file
if (!texture) {
KTXFilePointer ktxFile = textureCache->_ktxCache.getFile(hash);
if (ktxFile) {
texture.reset(gpu::Texture::unserialize(ktxFile->getFilepath()));
if (texture) {
texture = textureCache->cacheTextureByHash(hash, texture);
}
}
}
// If we found the texture either because it's in use or via KTX deserialization,
// set the image and return immediately.
if (texture) {
setImage(texture, texture->getWidth(), texture->getHeight());
return;
}
}
// We failed to find an existing live or KTX texture, so trigger an image reader
QThreadPool::globalInstance()->start(new ImageReader(_self, _url, content, hash, _maxNumPixels));
QThreadPool::globalInstance()->start(new ImageReader(_self, _url, content, _maxNumPixels));
}
Reader::Reader(const QWeakPointer<Resource>& resource, const QUrl& url) :
_resource(resource), _url(url) {
ImageReader::ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url, const QByteArray& data, int maxNumPixels) :
_resource(resource),
_url(url),
_content(data),
_maxNumPixels(maxNumPixels)
{
DependencyManager::get<StatTracker>()->incrementStat("PendingProcessing");
}
void Reader::run() {
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffff0000, 0, { { "url", _url.toString() } });
DependencyManager::get<StatTracker>()->decrementStat("PendingProcessing");
CounterStat counter("Processing");
auto originalPriority = QThread::currentThread()->priority();
if (originalPriority == QThread::InheritPriority) {
originalPriority = QThread::NormalPriority;
}
QThread::currentThread()->setPriority(QThread::LowPriority);
Finally restorePriority([originalPriority]{ QThread::currentThread()->setPriority(originalPriority); });
if (!_resource.data()) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
return;
}
read();
}
ImageReader::ImageReader(const QWeakPointer<Resource>& resource, const QUrl& url,
const QByteArray& data, const std::string& hash, int maxNumPixels) :
Reader(resource, url), _content(data), _hash(hash), _maxNumPixels(maxNumPixels) {
listSupportedImageFormats();
#if DEBUG_DUMP_TEXTURE_LOADS
@ -515,89 +369,104 @@ void ImageReader::listSupportedImageFormats() {
});
}
void ImageReader::read() {
// Help the QImage loader by extracting the image file format from the url filename ext.
// Some tga are not created properly without it.
auto filename = _url.fileName().toStdString();
auto filenameExtension = filename.substr(filename.find_last_of('.') + 1);
QImage image = QImage::fromData(_content, filenameExtension.c_str());
int imageWidth = image.width();
int imageHeight = image.height();
void ImageReader::run() {
PROFILE_RANGE_EX(resource_parse_image, __FUNCTION__, 0xffff0000, 0, { { "url", _url.toString() } });
DependencyManager::get<StatTracker>()->decrementStat("PendingProcessing");
CounterStat counter("Processing");
// Validate that the image loaded
if (imageWidth == 0 || imageHeight == 0 || image.format() == QImage::Format_Invalid) {
QString reason(filenameExtension.empty() ? "" : "(no file extension)");
qCWarning(modelnetworking) << "Failed to load" << _url << reason;
auto originalPriority = QThread::currentThread()->priority();
if (originalPriority == QThread::InheritPriority) {
originalPriority = QThread::NormalPriority;
}
QThread::currentThread()->setPriority(QThread::LowPriority);
Finally restorePriority([originalPriority] { QThread::currentThread()->setPriority(originalPriority); });
read();
}
void ImageReader::read() {
auto resource = _resource.lock(); // to ensure the resource is still needed
if (!resource) {
qCWarning(modelnetworking) << "Abandoning load of" << _url << "; could not get strong ref";
return;
}
auto networkTexture = resource.staticCast<NetworkTexture>();
// Validate the image is less than _maxNumPixels, and downscale if necessary
if (imageWidth * imageHeight > _maxNumPixels) {
float scaleFactor = sqrtf(_maxNumPixels / (float)(imageWidth * imageHeight));
int originalWidth = imageWidth;
int originalHeight = imageHeight;
imageWidth = (int)(scaleFactor * (float)imageWidth + 0.5f);
imageHeight = (int)(scaleFactor * (float)imageHeight + 0.5f);
QImage newImage = image.scaled(QSize(imageWidth, imageHeight), Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
image.swap(newImage);
qCDebug(modelnetworking).nospace() << "Downscaled " << _url << " (" <<
QSize(originalWidth, originalHeight) << " to " <<
QSize(imageWidth, imageHeight) << ")";
// Hash the source image to for KTX caching
std::string hash;
{
QCryptographicHash hasher(QCryptographicHash::Md5);
hasher.addData(_content);
hash = hasher.result().toHex().toStdString();
}
gpu::TexturePointer texture = nullptr;
{
auto resource = _resource.lock(); // to ensure the resource is still needed
if (!resource) {
qCDebug(modelnetworking) << _url << "loading stopped; resource out of scope";
return;
// Maybe load from cache
auto textureCache = DependencyManager::get<TextureCache>();
if (textureCache) {
// If we already have a live texture with the same hash, use it
auto texture = textureCache->getTextureByHash(hash);
// If there is no live texture, check if there's an existing KTX file
if (!texture) {
KTXFilePointer ktxFile = textureCache->_ktxCache.getFile(hash);
if (ktxFile) {
texture.reset(gpu::Texture::unserialize(ktxFile->getFilepath()));
if (texture) {
texture = textureCache->cacheTextureByHash(hash, texture);
}
}
}
auto url = _url.toString().toStdString();
// If we found the texture either because it's in use or via KTX deserialization,
// set the image and return immediately.
if (texture) {
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, texture),
Q_ARG(int, texture->getWidth()),
Q_ARG(int, texture->getHeight()));
return;
}
}
// Proccess new texture
gpu::TexturePointer texture;
{
PROFILE_RANGE_EX(resource_parse_image_raw, __FUNCTION__, 0xffff0000, 0);
// Load the image into a gpu::Texture
auto networkTexture = resource.staticCast<NetworkTexture>();
texture.reset(networkTexture->getTextureLoader()(image, url));
texture->setSource(url);
auto loader = image::getTextureLoaderForType(networkTexture->getTextureType());
texture.reset(image::processImage(_content, _url, hash, _maxNumPixels, loader));
texture->setSource(_url.toString().toStdString());
if (texture) {
texture->setFallbackTexture(networkTexture->getFallbackTexture());
}
}
auto textureCache = DependencyManager::get<TextureCache>();
// Save the image into a KTXFile
// Save the image into a KTXFile
if (textureCache) {
auto memKtx = gpu::Texture::serialize(*texture);
if (!memKtx) {
qCWarning(modelnetworking) << "Unable to serialize texture to KTX " << _url;
}
if (memKtx && textureCache) {
if (memKtx) {
const char* data = reinterpret_cast<const char*>(memKtx->_storage->data());
size_t length = memKtx->_storage->size();
KTXFilePointer file;
auto& ktxCache = textureCache->_ktxCache;
if (!memKtx || !(file = ktxCache.writeFile(data, KTXCache::Metadata(_hash, length)))) {
if (!memKtx || !(file = ktxCache.writeFile(data, KTXCache::Metadata(hash, length)))) {
qCWarning(modelnetworking) << _url << "file cache failed";
} else {
resource.staticCast<NetworkTexture>()->_file = file;
networkTexture->_file = file;
texture->setKtxBacking(file->getFilepath());
}
} else {
qCWarning(modelnetworking) << "Unable to serialize texture to KTX " << _url;
}
// We replace the texture with the one stored in the cache. This deals with the possible race condition of two different
// images with the same hash being loaded concurrently. Only one of them will make it into the cache by hash first and will
// be the winner
if (textureCache) {
texture = textureCache->cacheTextureByHash(_hash, texture);
}
texture = textureCache->cacheTextureByHash(hash, texture);
}
auto resource = _resource.lock(); // to ensure the resource is still needed
if (resource) {
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, texture),
Q_ARG(int, imageWidth), Q_ARG(int, imageHeight));
} else {
qCDebug(modelnetworking) << _url << "loading stopped; resource out of scope";
}
QMetaObject::invokeMethod(resource.data(), "setImage",
Q_ARG(gpu::TexturePointer, texture),
Q_ARG(int, texture->getWidth()),
Q_ARG(int, texture->getHeight()));
}

View file

@ -43,29 +43,7 @@ class NetworkTexture : public Resource, public Texture {
Q_OBJECT
public:
enum Type {
DEFAULT_TEXTURE,
STRICT_TEXTURE,
ALBEDO_TEXTURE,
NORMAL_TEXTURE,
BUMP_TEXTURE,
SPECULAR_TEXTURE,
METALLIC_TEXTURE = SPECULAR_TEXTURE, // for now spec and metallic texture are the same, converted to grey
ROUGHNESS_TEXTURE,
GLOSS_TEXTURE,
EMISSIVE_TEXTURE,
CUBE_TEXTURE,
OCCLUSION_TEXTURE,
SCATTERING_TEXTURE = OCCLUSION_TEXTURE,
LIGHTMAP_TEXTURE,
CUSTOM_TEXTURE
};
Q_ENUM(Type)
typedef gpu::Texture* TextureLoader(const QImage& image, const std::string& srcImageName);
using TextureLoaderFunc = std::function<TextureLoader>;
NetworkTexture(const QUrl& url, Type type, const QByteArray& content, int maxNumPixels);
NetworkTexture(const QUrl& url, gpu::TextureType type, const QByteArray& content, int maxNumPixels);
QString getType() const override { return "NetworkTexture"; }
@ -73,9 +51,8 @@ public:
int getOriginalHeight() const { return _originalHeight; }
int getWidth() const { return _width; }
int getHeight() const { return _height; }
Type getTextureType() const { return _type; }
gpu::TextureType getTextureType() const { return _type; }
TextureLoaderFunc getTextureLoader() const;
gpu::TexturePointer getFallbackTexture() const;
signals:
@ -93,8 +70,7 @@ private:
friend class KTXReader;
friend class ImageReader;
Type _type;
TextureLoaderFunc _textureLoader { [](const QImage&, const std::string&){ return nullptr; } };
gpu::TextureType _type;
KTXFilePointer _file;
int _originalWidth { 0 };
int _originalHeight { 0 };
@ -110,8 +86,6 @@ class TextureCache : public ResourceCache, public Dependency {
Q_OBJECT
SINGLETON_DEPENDENCY
using Type = NetworkTexture::Type;
public:
/// Returns the ID of the permutation/normal texture used for Perlin noise shader programs. This texture
/// has two lines: the first, a set of random numbers in [0, 255] to be used as permutation offsets, and
@ -131,10 +105,10 @@ public:
const gpu::TexturePointer& getBlackTexture();
/// Returns a texture version of an image file
static gpu::TexturePointer getImageTexture(const QString& path, Type type = Type::DEFAULT_TEXTURE, QVariantMap options = QVariantMap());
static gpu::TexturePointer getImageTexture(const QString& path, gpu::TextureType type = gpu::DEFAULT_TEXTURE, QVariantMap options = QVariantMap());
/// Loads a texture from the specified URL.
NetworkTexturePointer getTexture(const QUrl& url, Type type = Type::DEFAULT_TEXTURE,
NetworkTexturePointer getTexture(const QUrl& url, gpu::TextureType type = gpu::DEFAULT_TEXTURE,
const QByteArray& content = QByteArray(), int maxNumPixels = ABSOLUTE_MAX_TEXTURE_NUM_PIXELS);

View file

@ -1,8 +1,4 @@
set(TARGET_NAME model)
AUTOSCRIBE_SHADER_LIB(gpu model)
setup_hifi_library()
link_hifi_libraries(shared ktx gpu)
add_dependency_external_projects(nvtt)
target_include_directories(${TARGET_NAME} PRIVATE ${NVTT_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${NVTT_LIBRARIES})
link_hifi_libraries(shared ktx gpu image)

View file

@ -10,83 +10,9 @@
//
#include "TextureMap.h"
#include <nvtt/nvtt.h>
#include <ktx/KTX.h>
#include <QImage>
#include <QPainter>
#include <QDebug>
#include <QStandardPaths>
#include <QFileInfo>
#include <QDir>
#include <QCryptographicHash>
#include <Profile.h>
#include "ModelLogging.h"
using namespace model;
using namespace gpu;
// FIXME: Declare this to enable compression
//#define COMPRESS_TEXTURES
static const uvec2 SPARSE_PAGE_SIZE(128);
static const uvec2 MAX_TEXTURE_SIZE(4096);
bool DEV_DECIMATE_TEXTURES = false;
bool needsSparseRectification(const uvec2& size) {
// Don't attempt to rectify small textures (textures less than the sparse page size in any dimension)
if (glm::any(glm::lessThan(size, SPARSE_PAGE_SIZE))) {
return false;
}
// Don't rectify textures that are already an exact multiple of sparse page size
if (uvec2(0) == (size % SPARSE_PAGE_SIZE)) {
return false;
}
// Texture is not sparse compatible, but is bigger than the sparse page size in both dimensions, rectify!
return true;
}
uvec2 rectifyToSparseSize(const uvec2& size) {
uvec2 pages = ((size / SPARSE_PAGE_SIZE) + glm::clamp(size % SPARSE_PAGE_SIZE, uvec2(0), uvec2(1)));
uvec2 result = pages * SPARSE_PAGE_SIZE;
return result;
}
std::atomic<size_t> DECIMATED_TEXTURE_COUNT { 0 };
std::atomic<size_t> RECTIFIED_TEXTURE_COUNT { 0 };
QImage processSourceImage(const QImage& srcImage, bool cubemap) {
PROFILE_RANGE(resource_parse, "processSourceImage");
const uvec2 srcImageSize = toGlm(srcImage.size());
uvec2 targetSize = srcImageSize;
while (glm::any(glm::greaterThan(targetSize, MAX_TEXTURE_SIZE))) {
targetSize /= 2;
}
if (targetSize != srcImageSize) {
++DECIMATED_TEXTURE_COUNT;
}
if (!cubemap && needsSparseRectification(targetSize)) {
++RECTIFIED_TEXTURE_COUNT;
targetSize = rectifyToSparseSize(targetSize);
}
if (DEV_DECIMATE_TEXTURES && glm::all(glm::greaterThanEqual(targetSize / SPARSE_PAGE_SIZE, uvec2(2)))) {
targetSize /= 2;
}
if (targetSize != srcImageSize) {
PROFILE_RANGE(resource_parse, "processSourceImage Rectify");
qCDebug(modelLog) << "Resizing texture from " << srcImageSize.x << "x" << srcImageSize.y << " to " << targetSize.x << "x" << targetSize.y;
return srcImage.scaled(fromGlm(targetSize), Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
}
return srcImage;
}
void TextureMap::setTextureSource(TextureSourcePointer& textureSource) {
_textureSource = textureSource;
}
@ -115,894 +41,3 @@ void TextureMap::setLightmapOffsetScale(float offset, float scale) {
_lightmapOffsetScale.x = offset;
_lightmapOffsetScale.y = scale;
}
const QImage TextureUsage::process2DImageColor(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask) {
PROFILE_RANGE(resource_parse, "process2DImageColor");
QImage image = processSourceImage(srcImage, false);
validAlpha = false;
alphaAsMask = true;
const uint8 OPAQUE_ALPHA = 255;
const uint8 TRANSPARENT_ALPHA = 0;
if (image.hasAlphaChannel()) {
if (image.format() != QImage::Format_ARGB32) {
image = image.convertToFormat(QImage::Format_ARGB32);
}
// Figure out if we can use a mask for alpha or not
int numOpaques = 0;
int numTranslucents = 0;
const int NUM_PIXELS = image.width() * image.height();
const int MAX_TRANSLUCENT_PIXELS_FOR_ALPHAMASK = (int)(0.05f * (float)(NUM_PIXELS));
const QRgb* data = reinterpret_cast<const QRgb*>(image.constBits());
for (int i = 0; i < NUM_PIXELS; ++i) {
auto alpha = qAlpha(data[i]);
if (alpha == OPAQUE_ALPHA) {
numOpaques++;
} else if (alpha != TRANSPARENT_ALPHA) {
if (++numTranslucents > MAX_TRANSLUCENT_PIXELS_FOR_ALPHAMASK) {
alphaAsMask = false;
break;
}
}
}
validAlpha = (numOpaques != NUM_PIXELS);
}
// Force all the color images to be rgba32bits
if (image.format() != QImage::Format_ARGB32) {
image = image.convertToFormat(QImage::Format_ARGB32);
}
return image;
}
void TextureUsage::defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
const QImage& image, bool isLinear, bool doCompress) {
#ifdef COMPRESS_TEXTURES
#else
doCompress = false;
#endif
if (image.hasAlphaChannel()) {
gpu::Semantic gpuSemantic;
gpu::Semantic mipSemantic;
if (isLinear) {
mipSemantic = gpu::BGRA;
if (doCompress) {
gpuSemantic = gpu::COMPRESSED_RGBA;
} else {
gpuSemantic = gpu::RGBA;
}
} else {
mipSemantic = gpu::SBGRA;
if (doCompress) {
gpuSemantic = gpu::COMPRESSED_SRGBA;
} else {
gpuSemantic = gpu::SRGBA;
}
}
formatGPU = gpu::Element(gpu::VEC4, gpu::NUINT8, gpuSemantic);
formatMip = gpu::Element(gpu::VEC4, gpu::NUINT8, mipSemantic);
} else {
gpu::Semantic gpuSemantic;
gpu::Semantic mipSemantic;
if (isLinear) {
mipSemantic = gpu::RGB;
if (doCompress) {
gpuSemantic = gpu::COMPRESSED_RGB;
} else {
gpuSemantic = gpu::RGB;
}
} else {
mipSemantic = gpu::SRGB;
if (doCompress) {
gpuSemantic = gpu::COMPRESSED_SRGB;
} else {
gpuSemantic = gpu::SRGB;
}
}
formatGPU = gpu::Element(gpu::VEC3, gpu::NUINT8, gpuSemantic);
formatMip = gpu::Element(gpu::VEC3, gpu::NUINT8, mipSemantic);
}
}
#define CPU_MIPMAPS 1
#define DEBUG_NVTT 0
void generateMips(gpu::Texture* texture, QImage& image, bool fastResize) {
#if CPU_MIPMAPS
PROFILE_RANGE(resource_parse, "generateMips");
#if DEBUG_NVTT
QDebug debug = qDebug();
debug << Q_FUNC_INFO << "\n";
debug << (QList<int>() << image.byteCount() << image.width() << image.height() << image.depth()) << "\n";
#endif // DEBUG_NVTT
auto numMips = texture->getNumMips();
for (uint16 level = 1; level < numMips; ++level) {
QSize mipSize(texture->evalMipWidth(level), texture->evalMipHeight(level));
if (fastResize) {
image = image.scaled(mipSize);
#if DEBUG_NVTT
debug << "Begin fast { " << image.byteCount() << image.width() << image.height() << image.depth() << level << " } Ends\n";
#endif // DEBUG_NVTT
texture->assignStoredMip(level, image.byteCount(), image.constBits());
} else {
QImage mipImage = image.scaled(mipSize, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
#if DEBUG_NVTT
debug << "Begin { " << mipImage.byteCount() << mipImage.width() << mipImage.height() << mipImage.depth() << level << " } Ends\n";
#endif // DEBUG_NVTT
texture->assignStoredMip(level, mipImage.byteCount(), mipImage.constBits());
}
}
#else
texture->autoGenerateMips(-1);
#endif
}
void generateFaceMips(gpu::Texture* texture, QImage& image, uint8 face) {
#if CPU_MIPMAPS
PROFILE_RANGE(resource_parse, "generateFaceMips");
auto numMips = texture->getNumMips();
for (uint16 level = 1; level < numMips; ++level) {
QSize mipSize(texture->evalMipWidth(level), texture->evalMipHeight(level));
QImage mipImage = image.scaled(mipSize, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
texture->assignStoredMipFace(level, face, mipImage.byteCount(), mipImage.constBits());
}
#else
texture->autoGenerateMips(-1);
#endif
}
struct MyOutputHandler : public nvtt::OutputHandler {
MyOutputHandler(gpu::Texture* texture, QDebug* debug) :
#if DEBUG_NVTT
_debug(debug),
#endif // DEBUG_NVTT
_texture(texture) {
}
virtual void beginImage(int size, int width, int height, int depth, int face, int miplevel) {
#if DEBUG_NVTT
auto list = QStringList() << QString::number(size)
<< QString::number(width)
<< QString::number(height)
<< QString::number(depth)
<< QString::number(face)
<< QString::number(miplevel);
_count = 0;
_str = "Begin { " + list.join(", ");
#endif // DEBUG_NVTT
_size = size;
_miplevel = miplevel;
_data = static_cast<gpu::Byte*>(malloc(size));
_current = _data;
}
virtual bool writeData(const void* data, int size) {
#if DEBUG_NVTT
++_count;
#endif // DEBUG_NVTT
assert(_current + size <= _data + _size);
memcpy(_current, data, size);
_current += size;
return true;
}
virtual void endImage() {
#if DEBUG_NVTT
_str += " } End " + QString::number(_count) + "\n";
*_debug << qPrintable(_str);
#endif // DEBUG_NVTT
_texture->assignStoredMip(_miplevel, _size, static_cast<const gpu::Byte*>(_data));
free(_data);
_data = nullptr;
}
#if DEBUG_NVTT
int _count = 0;
QString _str;
QDebug* _debug{ nullptr };
#endif // DEBUG_NVTT
gpu::Byte* _data{ nullptr };
gpu::Byte* _current{ nullptr };
gpu::Texture* _texture{ nullptr };
int _miplevel = 0;
int _size = 0;
};
struct MyErrorHandler : public nvtt::ErrorHandler {
virtual void error(nvtt::Error e) override {
qDebug() << "Texture compression error:" << nvtt::errorString(e);
}
};
void generateNVTTMips(gpu::Texture* texture, QImage& image, nvtt::InputFormat inputFormat, nvtt::Format compressionFormat) {
#if CPU_MIPMAPS
PROFILE_RANGE(resource_parse, "generateMips");
/*/
generateMips(texture, image, false);
return;
/**/
#if DEBUG_NVTT
QDebug debug = qDebug();
QDebug* debugPtr = &debug;
debug << Q_FUNC_INFO << "\n";
debug << (QList<int>() << image.byteCount() << image.width() << image.height() << image.depth()) << "\n";
#else
QDebug* debugPtr = nullptr;
#endif // DEBUG_NVTT
const int w = image.width(), h = image.height();
const void* data = static_cast<const void*>(image.constBits());
nvtt::InputOptions inputOptions;
inputOptions.setTextureLayout(nvtt::TextureType_2D, w, h);
inputOptions.setMipmapData(data, w, h);
inputOptions.setFormat(inputFormat);
// inputOptions.setAlphaMode(AlphaMode alphaMode);
// inputOptions.setGamma(float inputGamma, float outputGamma);
// inputOptions.setWrapMode(WrapMode mode);
// inputOptions.setMaxExtents(int d);
// inputOptions.setRoundMode(RoundMode mode);
inputOptions.setMipmapGeneration(true);
inputOptions.setMipmapFilter(nvtt::MipmapFilter_Box);
nvtt::OutputOptions outputOptions;
outputOptions.setOutputHeader(false);
MyOutputHandler outputHandler(texture, debugPtr);
outputOptions.setOutputHandler(&outputHandler);
MyErrorHandler errorHandler;
outputOptions.setErrorHandler(&errorHandler);
nvtt::CompressionOptions compressionOptions;
compressionOptions.setFormat(compressionFormat);
compressionOptions.setQuality(nvtt::Quality_Fastest);
nvtt::Compressor compressor;
compressor.process(inputOptions, compressionOptions, outputOptions);
#else
texture->autoGenerateMips(-1);
#endif
}
gpu::Texture* TextureUsage::process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict) {
PROFILE_RANGE(resource_parse, "process2DTextureColorFromImage");
bool validAlpha = false;
bool alphaAsMask = true;
QImage image = process2DImageColor(srcImage, validAlpha, alphaAsMask);
gpu::Texture* theTexture = nullptr;
if ((image.width() > 0) && (image.height() > 0)) {
gpu::Element formatGPU;
gpu::Element formatMip;
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
if (isStrict) {
theTexture = (gpu::Texture::createStrict(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
} else {
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
}
theTexture->setSource(srcImageName);
auto usage = gpu::Texture::Usage::Builder().withColor();
if (validAlpha) {
usage.withAlpha();
if (alphaAsMask) {
usage.withAlphaMask();
}
}
theTexture->setUsage(usage.build());
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
if (generateMips) {
::generateNVTTMips(theTexture, image, nvtt::InputFormat_BGRA_8UB, nvtt::Format_RGBA);
}
theTexture->setSource(srcImageName);
}
return theTexture;
}
gpu::Texture* TextureUsage::createStrict2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true, true);
}
gpu::Texture* TextureUsage::create2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, srcImageName, false, false, true);
}
gpu::Texture* TextureUsage::createAlbedoTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
}
gpu::Texture* TextureUsage::createEmissiveTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
}
gpu::Texture* TextureUsage::createLightmapTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return process2DTextureColorFromImage(srcImage, srcImageName, false, true, true);
}
gpu::Texture* TextureUsage::createNormalTextureFromNormalImage(const QImage& srcImage, const std::string& srcImageName) {
PROFILE_RANGE(resource_parse, "createNormalTextureFromNormalImage");
QImage image = processSourceImage(srcImage, false);
// Make sure the normal map source image is ARGB32
if (image.format() != QImage::Format_ARGB32) {
image = image.convertToFormat(QImage::Format_ARGB32);
}
gpu::Texture* theTexture = nullptr;
if ((image.width() > 0) && (image.height() > 0)) {
gpu::Element formatMip = gpu::Element::COLOR_BGRA_32;
gpu::Element formatGPU = gpu::Element::COLOR_RGBA_32;
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
generateMips(theTexture, image, true);
theTexture->setSource(srcImageName);
}
return theTexture;
}
int clampPixelCoordinate(int coordinate, int maxCoordinate) {
return coordinate - ((int)(coordinate < 0) * coordinate) + ((int)(coordinate > maxCoordinate) * (maxCoordinate - coordinate));
}
const int RGBA_MAX = 255;
// transform -1 - 1 to 0 - 255 (from sobel value to rgb)
double mapComponent(double sobelValue) {
const double factor = RGBA_MAX / 2.0;
return (sobelValue + 1.0) * factor;
}
gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcImage, const std::string& srcImageName) {
PROFILE_RANGE(resource_parse, "createNormalTextureFromBumpImage");
QImage image = processSourceImage(srcImage, false);
if (image.format() != QImage::Format_Grayscale8) {
image = image.convertToFormat(QImage::Format_Grayscale8);
}
// PR 5540 by AlessandroSigna integrated here as a specialized TextureLoader for bumpmaps
// The conversion is done using the Sobel Filter to calculate the derivatives from the grayscale image
const double pStrength = 2.0;
int width = image.width();
int height = image.height();
QImage result(width, height, QImage::Format_ARGB32);
for (int i = 0; i < width; i++) {
const int iNextClamped = clampPixelCoordinate(i + 1, width - 1);
const int iPrevClamped = clampPixelCoordinate(i - 1, width - 1);
for (int j = 0; j < height; j++) {
const int jNextClamped = clampPixelCoordinate(j + 1, height - 1);
const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1);
// surrounding pixels
const QRgb topLeft = image.pixel(iPrevClamped, jPrevClamped);
const QRgb top = image.pixel(iPrevClamped, j);
const QRgb topRight = image.pixel(iPrevClamped, jNextClamped);
const QRgb right = image.pixel(i, jNextClamped);
const QRgb bottomRight = image.pixel(iNextClamped, jNextClamped);
const QRgb bottom = image.pixel(iNextClamped, j);
const QRgb bottomLeft = image.pixel(iNextClamped, jPrevClamped);
const QRgb left = image.pixel(i, jPrevClamped);
// take their gray intensities
// since it's a grayscale image, the value of each component RGB is the same
const double tl = qRed(topLeft);
const double t = qRed(top);
const double tr = qRed(topRight);
const double r = qRed(right);
const double br = qRed(bottomRight);
const double b = qRed(bottom);
const double bl = qRed(bottomLeft);
const double l = qRed(left);
// apply the sobel filter
const double dX = (tr + pStrength * r + br) - (tl + pStrength * l + bl);
const double dY = (bl + pStrength * b + br) - (tl + pStrength * t + tr);
const double dZ = RGBA_MAX / pStrength;
glm::vec3 v(dX, dY, dZ);
glm::normalize(v);
// convert to rgb from the value obtained computing the filter
QRgb qRgbValue = qRgba(mapComponent(v.z), mapComponent(v.y), mapComponent(v.x), 1.0);
result.setPixel(i, j, qRgbValue);
}
}
gpu::Texture* theTexture = nullptr;
if ((result.width() > 0) && (result.height() > 0)) {
gpu::Element formatMip = gpu::Element::COLOR_BGRA_32;
gpu::Element formatGPU = gpu::Element::COLOR_RGBA_32;
theTexture = (gpu::Texture::create2D(formatGPU, result.width(), result.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, result.byteCount(), result.constBits());
generateMips(theTexture, result, true);
theTexture->setSource(srcImageName);
}
return theTexture;
}
gpu::Texture* TextureUsage::createRoughnessTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
PROFILE_RANGE(resource_parse, "createRoughnessTextureFromImage");
QImage image = processSourceImage(srcImage, false);
if (!image.hasAlphaChannel()) {
if (image.format() != QImage::Format_RGB888) {
image = image.convertToFormat(QImage::Format_RGB888);
}
} else {
if (image.format() != QImage::Format_RGBA8888) {
image = image.convertToFormat(QImage::Format_RGBA8888);
}
}
image = image.convertToFormat(QImage::Format_Grayscale8);
gpu::Texture* theTexture = nullptr;
if ((image.width() > 0) && (image.height() > 0)) {
#ifdef COMPRESS_TEXTURES
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
#else
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
#endif
gpu::Element formatMip = gpu::Element::COLOR_R_8;
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
generateMips(theTexture, image, true);
theTexture->setSource(srcImageName);
}
return theTexture;
}
gpu::Texture* TextureUsage::createRoughnessTextureFromGlossImage(const QImage& srcImage, const std::string& srcImageName) {
PROFILE_RANGE(resource_parse, "createRoughnessTextureFromGlossImage");
QImage image = processSourceImage(srcImage, false);
if (!image.hasAlphaChannel()) {
if (image.format() != QImage::Format_RGB888) {
image = image.convertToFormat(QImage::Format_RGB888);
}
} else {
if (image.format() != QImage::Format_RGBA8888) {
image = image.convertToFormat(QImage::Format_RGBA8888);
}
}
// Gloss turned into Rough
image.invertPixels(QImage::InvertRgba);
image = image.convertToFormat(QImage::Format_Grayscale8);
gpu::Texture* theTexture = nullptr;
if ((image.width() > 0) && (image.height() > 0)) {
#ifdef COMPRESS_TEXTURES
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
#else
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
#endif
gpu::Element formatMip = gpu::Element::COLOR_R_8;
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
generateMips(theTexture, image, true);
theTexture->setSource(srcImageName);
}
return theTexture;
}
gpu::Texture* TextureUsage::createMetallicTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
PROFILE_RANGE(resource_parse, "createMetallicTextureFromImage");
QImage image = processSourceImage(srcImage, false);
if (!image.hasAlphaChannel()) {
if (image.format() != QImage::Format_RGB888) {
image = image.convertToFormat(QImage::Format_RGB888);
}
} else {
if (image.format() != QImage::Format_RGBA8888) {
image = image.convertToFormat(QImage::Format_RGBA8888);
}
}
image = image.convertToFormat(QImage::Format_Grayscale8);
gpu::Texture* theTexture = nullptr;
if ((image.width() > 0) && (image.height() > 0)) {
#ifdef COMPRESS_TEXTURES
gpu::Element formatGPU = gpu::Element(gpu::SCALAR, gpu::NUINT8, gpu::COMPRESSED_R);
#else
gpu::Element formatGPU = gpu::Element::COLOR_R_8;
#endif
gpu::Element formatMip = gpu::Element::COLOR_R_8;
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
theTexture->setSource(srcImageName);
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, image.byteCount(), image.constBits());
generateMips(theTexture, image, true);
theTexture->setSource(srcImageName);
}
return theTexture;
}
class CubeLayout {
public:
enum SourceProjection {
FLAT = 0,
EQUIRECTANGULAR,
};
int _type = FLAT;
int _widthRatio = 1;
int _heightRatio = 1;
class Face {
public:
int _x = 0;
int _y = 0;
bool _horizontalMirror = false;
bool _verticalMirror = false;
Face() {}
Face(int x, int y, bool horizontalMirror, bool verticalMirror) : _x(x), _y(y), _horizontalMirror(horizontalMirror), _verticalMirror(verticalMirror) {}
};
Face _faceXPos;
Face _faceXNeg;
Face _faceYPos;
Face _faceYNeg;
Face _faceZPos;
Face _faceZNeg;
CubeLayout(int wr, int hr, Face fXP, Face fXN, Face fYP, Face fYN, Face fZP, Face fZN) :
_type(FLAT),
_widthRatio(wr),
_heightRatio(hr),
_faceXPos(fXP),
_faceXNeg(fXN),
_faceYPos(fYP),
_faceYNeg(fYN),
_faceZPos(fZP),
_faceZNeg(fZN) {}
CubeLayout(int wr, int hr) :
_type(EQUIRECTANGULAR),
_widthRatio(wr),
_heightRatio(hr) {}
static const CubeLayout CUBEMAP_LAYOUTS[];
static const int NUM_CUBEMAP_LAYOUTS;
static int findLayout(int width, int height) {
// Find the layout of the cubemap in the 2D image
int foundLayout = -1;
for (int i = 0; i < NUM_CUBEMAP_LAYOUTS; i++) {
if ((height * CUBEMAP_LAYOUTS[i]._widthRatio) == (width * CUBEMAP_LAYOUTS[i]._heightRatio)) {
foundLayout = i;
break;
}
}
return foundLayout;
}
static QImage extractEquirectangularFace(const QImage& source, gpu::Texture::CubeFace face, int faceWidth) {
QImage image(faceWidth, faceWidth, source.format());
glm::vec2 dstInvSize(1.0f / (float)image.width(), 1.0f / (float)image.height());
struct CubeToXYZ {
gpu::Texture::CubeFace _face;
CubeToXYZ(gpu::Texture::CubeFace face) : _face(face) {}
glm::vec3 xyzFrom(const glm::vec2& uv) {
auto faceDir = glm::normalize(glm::vec3(-1.0f + 2.0f * uv.x, -1.0f + 2.0f * uv.y, 1.0f));
switch (_face) {
case gpu::Texture::CubeFace::CUBE_FACE_BACK_POS_Z:
return glm::vec3(-faceDir.x, faceDir.y, faceDir.z);
case gpu::Texture::CubeFace::CUBE_FACE_FRONT_NEG_Z:
return glm::vec3(faceDir.x, faceDir.y, -faceDir.z);
case gpu::Texture::CubeFace::CUBE_FACE_LEFT_NEG_X:
return glm::vec3(faceDir.z, faceDir.y, faceDir.x);
case gpu::Texture::CubeFace::CUBE_FACE_RIGHT_POS_X:
return glm::vec3(-faceDir.z, faceDir.y, -faceDir.x);
case gpu::Texture::CubeFace::CUBE_FACE_BOTTOM_NEG_Y:
return glm::vec3(-faceDir.x, -faceDir.z, faceDir.y);
case gpu::Texture::CubeFace::CUBE_FACE_TOP_POS_Y:
default:
return glm::vec3(-faceDir.x, faceDir.z, -faceDir.y);
}
}
};
CubeToXYZ cubeToXYZ(face);
struct RectToXYZ {
RectToXYZ() {}
glm::vec2 uvFrom(const glm::vec3& xyz) {
auto flatDir = glm::normalize(glm::vec2(xyz.x, xyz.z));
auto uvRad = glm::vec2(atan2(flatDir.x, flatDir.y), asin(xyz.y));
const float LON_TO_RECT_U = 1.0f / (glm::pi<float>());
const float LAT_TO_RECT_V = 2.0f / glm::pi<float>();
return glm::vec2(0.5f * uvRad.x * LON_TO_RECT_U + 0.5f, 0.5f * uvRad.y * LAT_TO_RECT_V + 0.5f);
}
};
RectToXYZ rectToXYZ;
int srcFaceHeight = source.height();
int srcFaceWidth = source.width();
glm::vec2 dstCoord;
glm::ivec2 srcPixel;
for (int y = 0; y < faceWidth; ++y) {
dstCoord.y = 1.0f - (y + 0.5f) * dstInvSize.y; // Fill cube face images from top to bottom
for (int x = 0; x < faceWidth; ++x) {
dstCoord.x = (x + 0.5f) * dstInvSize.x;
auto xyzDir = cubeToXYZ.xyzFrom(dstCoord);
auto srcCoord = rectToXYZ.uvFrom(xyzDir);
srcPixel.x = floor(srcCoord.x * srcFaceWidth);
// Flip the vertical axis to QImage going top to bottom
srcPixel.y = floor((1.0f - srcCoord.y) * srcFaceHeight);
if (((uint32) srcPixel.x < (uint32) source.width()) && ((uint32) srcPixel.y < (uint32) source.height())) {
image.setPixel(x, y, source.pixel(QPoint(srcPixel.x, srcPixel.y)));
// Keep for debug, this is showing the dir as a color
// glm::u8vec4 rgba((xyzDir.x + 1.0)*0.5 * 256, (xyzDir.y + 1.0)*0.5 * 256, (xyzDir.z + 1.0)*0.5 * 256, 256);
// unsigned int val = 0xff000000 | (rgba.r) | (rgba.g << 8) | (rgba.b << 16);
// image.setPixel(x, y, val);
}
}
}
return image;
}
};
const CubeLayout CubeLayout::CUBEMAP_LAYOUTS[] = {
// Here is the expected layout for the faces in an image with the 2/1 aspect ratio:
// THis is detected as an Equirectangular projection
// WIDTH
// <--------------------------->
// ^ +------+------+------+------+
// H | | | | |
// E | | | | |
// I | | | | |
// G +------+------+------+------+
// H | | | | |
// T | | | | |
// | | | | | |
// v +------+------+------+------+
//
// FaceWidth = width = height / 6
{ 2, 1 },
// Here is the expected layout for the faces in an image with the 1/6 aspect ratio:
//
// WIDTH
// <------>
// ^ +------+
// | | |
// | | +X |
// | | |
// H +------+
// E | |
// I | -X |
// G | |
// H +------+
// T | |
// | | +Y |
// | | |
// | +------+
// | | |
// | | -Y |
// | | |
// H +------+
// E | |
// I | +Z |
// G | |
// H +------+
// T | |
// | | -Z |
// | | |
// V +------+
//
// FaceWidth = width = height / 6
{ 1, 6,
{ 0, 0, true, false },
{ 0, 1, true, false },
{ 0, 2, false, true },
{ 0, 3, false, true },
{ 0, 4, true, false },
{ 0, 5, true, false }
},
// Here is the expected layout for the faces in an image with the 3/4 aspect ratio:
//
// <-----------WIDTH----------->
// ^ +------+------+------+------+
// | | | | | |
// | | | +Y | | |
// | | | | | |
// H +------+------+------+------+
// E | | | | |
// I | -X | -Z | +X | +Z |
// G | | | | |
// H +------+------+------+------+
// T | | | | |
// | | | -Y | | |
// | | | | | |
// V +------+------+------+------+
//
// FaceWidth = width / 4 = height / 3
{ 4, 3,
{ 2, 1, true, false },
{ 0, 1, true, false },
{ 1, 0, false, true },
{ 1, 2, false, true },
{ 3, 1, true, false },
{ 1, 1, true, false }
},
// Here is the expected layout for the faces in an image with the 4/3 aspect ratio:
//
// <-------WIDTH-------->
// ^ +------+------+------+
// | | | | |
// | | | +Y | |
// | | | | |
// H +------+------+------+
// E | | | |
// I | -X | -Z | +X |
// G | | | |
// H +------+------+------+
// T | | | |
// | | | -Y | |
// | | | | |
// | +------+------+------+
// | | | | |
// | | | +Z! | | <+Z is upside down!
// | | | | |
// V +------+------+------+
//
// FaceWidth = width / 3 = height / 4
{ 3, 4,
{ 2, 1, true, false },
{ 0, 1, true, false },
{ 1, 0, false, true },
{ 1, 2, false, true },
{ 1, 3, false, true },
{ 1, 1, true, false }
}
};
const int CubeLayout::NUM_CUBEMAP_LAYOUTS = sizeof(CubeLayout::CUBEMAP_LAYOUTS) / sizeof(CubeLayout);
gpu::Texture* TextureUsage::processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool generateIrradiance) {
PROFILE_RANGE(resource_parse, "processCubeTextureColorFromImage");
gpu::Texture* theTexture = nullptr;
if ((srcImage.width() > 0) && (srcImage.height() > 0)) {
QImage image = processSourceImage(srcImage, true);
if (image.format() != QImage::Format_ARGB32) {
image = image.convertToFormat(QImage::Format_ARGB32);
}
gpu::Element formatGPU;
gpu::Element formatMip;
defineColorTexelFormats(formatGPU, formatMip, image, isLinear, doCompress);
// Find the layout of the cubemap in the 2D image
// Use the original image size since processSourceImage may have altered the size / aspect ratio
int foundLayout = CubeLayout::findLayout(srcImage.width(), srcImage.height());
std::vector<QImage> faces;
// If found, go extract the faces as separate images
if (foundLayout >= 0) {
auto& layout = CubeLayout::CUBEMAP_LAYOUTS[foundLayout];
if (layout._type == CubeLayout::FLAT) {
int faceWidth = image.width() / layout._widthRatio;
faces.push_back(image.copy(QRect(layout._faceXPos._x * faceWidth, layout._faceXPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXPos._horizontalMirror, layout._faceXPos._verticalMirror));
faces.push_back(image.copy(QRect(layout._faceXNeg._x * faceWidth, layout._faceXNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXNeg._horizontalMirror, layout._faceXNeg._verticalMirror));
faces.push_back(image.copy(QRect(layout._faceYPos._x * faceWidth, layout._faceYPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYPos._horizontalMirror, layout._faceYPos._verticalMirror));
faces.push_back(image.copy(QRect(layout._faceYNeg._x * faceWidth, layout._faceYNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYNeg._horizontalMirror, layout._faceYNeg._verticalMirror));
faces.push_back(image.copy(QRect(layout._faceZPos._x * faceWidth, layout._faceZPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZPos._horizontalMirror, layout._faceZPos._verticalMirror));
faces.push_back(image.copy(QRect(layout._faceZNeg._x * faceWidth, layout._faceZNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZNeg._horizontalMirror, layout._faceZNeg._verticalMirror));
} else if (layout._type == CubeLayout::EQUIRECTANGULAR) {
// THe face width is estimated from the input image
const int EQUIRECT_FACE_RATIO_TO_WIDTH = 4;
const int EQUIRECT_MAX_FACE_WIDTH = 2048;
int faceWidth = std::min(image.width() / EQUIRECT_FACE_RATIO_TO_WIDTH, EQUIRECT_MAX_FACE_WIDTH);
for (int face = gpu::Texture::CUBE_FACE_RIGHT_POS_X; face < gpu::Texture::NUM_CUBE_FACES; face++) {
QImage faceImage = CubeLayout::extractEquirectangularFace(image, (gpu::Texture::CubeFace) face, faceWidth);
faces.push_back(faceImage);
}
}
} else {
qCDebug(modelLog) << "Failed to find a known cube map layout from this image:" << QString(srcImageName.c_str());
return nullptr;
}
// If the 6 faces have been created go on and define the true Texture
if (faces.size() == gpu::Texture::NUM_FACES_PER_TYPE[gpu::Texture::TEX_CUBE]) {
theTexture = gpu::Texture::createCube(formatGPU, faces[0].width(), gpu::Texture::MAX_NUM_MIPS, gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
theTexture->setSource(srcImageName);
theTexture->setStoredMipFormat(formatMip);
int f = 0;
for (auto& face : faces) {
theTexture->assignStoredMipFace(0, f, face.byteCount(), face.constBits());
if (generateMips) {
generateFaceMips(theTexture, face, f);
}
f++;
}
// Generate irradiance while we are at it
if (generateIrradiance) {
PROFILE_RANGE(resource_parse, "generateIrradiance");
theTexture->generateIrradiance();
}
theTexture->setSource(srcImageName);
}
}
return theTexture;
}
gpu::Texture* TextureUsage::createCubeTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
return processCubeTextureColorFromImage(srcImage, srcImageName, false, true, true, true);
}
gpu::Texture* TextureUsage::createCubeTextureFromImageWithoutIrradiance(const QImage& srcImage, const std::string& srcImageName) {
return processCubeTextureColorFromImage(srcImage, srcImageName, false, true, true, false);
}

View file

@ -13,48 +13,10 @@
#include "gpu/Texture.h"
#include "Material.h"
#include "Transform.h"
#include <qurl.h>
class QImage;
namespace model {
typedef glm::vec3 Color;
class TextureUsage {
public:
gpu::Texture::Type _type{ gpu::Texture::TEX_2D };
Material::MapFlags _materialUsage{ MaterialKey::ALBEDO_MAP };
int _environmentUsage = 0;
static gpu::Texture* create2DTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createStrict2DTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createAlbedoTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createEmissiveTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createNormalTextureFromNormalImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createNormalTextureFromBumpImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createRoughnessTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createRoughnessTextureFromGlossImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createMetallicTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createCubeTextureFromImage(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createCubeTextureFromImageWithoutIrradiance(const QImage& image, const std::string& srcImageName);
static gpu::Texture* createLightmapTextureFromImage(const QImage& image, const std::string& srcImageName);
static const QImage process2DImageColor(const QImage& srcImage, bool& validAlpha, bool& alphaAsMask);
static void defineColorTexelFormats(gpu::Element& formatGPU, gpu::Element& formatMip,
const QImage& srcImage, bool isLinear, bool doCompress);
static gpu::Texture* process2DTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool isStrict = false);
static gpu::Texture* processCubeTextureColorFromImage(const QImage& srcImage, const std::string& srcImageName, bool isLinear, bool doCompress, bool generateMips, bool generateIrradiance);
};
class TextureMap {
public:
TextureMap() {}

View file

@ -199,7 +199,7 @@ void RenderDeferredTask::build(JobModel& task, const render::Varying& input, ren
{
// Grab a texture map representing the different status icons and assign that to the drawStatsuJob
auto iconMapPath = PathUtils::resourcesPath() + "icons/statusIconAtlas.svg";
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath, NetworkTexture::STRICT_TEXTURE);
auto statusIconMap = DependencyManager::get<TextureCache>()->getImageTexture(iconMapPath, gpu::TextureType::STRICT_TEXTURE);
task.addJob<DrawStatus>("DrawStatus", opaques, DrawStatus(statusIconMap));
}
}

View file

@ -3,7 +3,7 @@ AUTOSCRIBE_SHADER_LIB(gpu model render-utils)
# This is not a testcase -- just set it up as a regular hifi project
setup_hifi_project(Quick Gui OpenGL Script Widgets)
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
link_hifi_libraries(networking gl gpu gpu-gl procedural shared fbx model model-networking animation script-engine render render-utils octree )
link_hifi_libraries(networking gl gpu gpu-gl procedural shared fbx model model-networking animation script-engine render render-utils octree image)
package_libraries_for_deployment()
target_nsight()

View file

@ -10,6 +10,6 @@ setup_hifi_project(Quick Gui OpenGL)
set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
# link in the shared libraries
link_hifi_libraries(shared octree ktx gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics)
link_hifi_libraries(shared octree ktx gl gpu gpu-gl render model model-networking networking render-utils fbx entities entities-renderer animation audio avatars script-engine physics image)
package_libraries_for_deployment()

View file

@ -43,6 +43,7 @@
#include <gl/Config.h>
#include <model/TextureMap.h>
#include <ktx/KTX.h>
#include <image/Image.h>
QSharedPointer<FileLogger> logger;
@ -94,7 +95,7 @@ int main(int argc, char** argv) {
QLoggingCategory::setFilterRules(LOG_FILTER_RULES);
QImage image(TEST_IMAGE);
gpu::Texture* testTexture = model::TextureUsage::process2DTextureColorFromImage(image, TEST_IMAGE.toStdString(), true, false, true);
gpu::Texture* testTexture = image::TextureUsage::process2DTextureColorFromImage(image, TEST_IMAGE.toStdString(), true, false, true);
auto ktxMemory = gpu::Texture::serialize(*testTexture);
{