diff --git a/interface/src/raypick/LaserPointer.cpp b/interface/src/raypick/LaserPointer.cpp index bd746c9090..12daae0351 100644 --- a/interface/src/raypick/LaserPointer.cpp +++ b/interface/src/raypick/LaserPointer.cpp @@ -233,16 +233,19 @@ PointerEvent LaserPointer::buildPointerEvent(const PickedObject& target, const P // If we just started triggering and we haven't moved too much, don't update intersection and pos2D TriggerState& state = hover ? _latestState : _states[button]; - float sensorToWorldScale = DependencyManager::get()->getMyAvatar()->getSensorToWorldScale(); - float deadspotSquared = TOUCH_PRESS_TO_MOVE_DEADSPOT_SQUARED * sensorToWorldScale * sensorToWorldScale; - bool withinDeadspot = usecTimestampNow() - state.triggerStartTime < POINTER_MOVE_DELAY && glm::distance2(pos2D, state.triggerPos2D) < deadspotSquared; - if ((state.triggering || state.wasTriggering) && !state.deadspotExpired && withinDeadspot) { - pos2D = state.triggerPos2D; - intersection = state.intersection; - surfaceNormal = state.surfaceNormal; - } - if (!withinDeadspot) { - state.deadspotExpired = true; + auto avatar = DependencyManager::get()->getMyAvatar(); + if (avatar) { + float sensorToWorldScale = avatar->getSensorToWorldScale(); + float deadspotSquared = TOUCH_PRESS_TO_MOVE_DEADSPOT_SQUARED * sensorToWorldScale * sensorToWorldScale; + bool withinDeadspot = usecTimestampNow() - state.triggerStartTime < POINTER_MOVE_DELAY && glm::distance2(pos2D, state.triggerPos2D) < deadspotSquared; + if ((state.triggering || state.wasTriggering) && !state.deadspotExpired && withinDeadspot) { + pos2D = state.triggerPos2D; + intersection = state.intersection; + surfaceNormal = state.surfaceNormal; + } + if (!withinDeadspot) { + state.deadspotExpired = true; + } } return PointerEvent(pos2D, intersection, surfaceNormal, direction); diff --git a/libraries/baking/src/TextureBaker.cpp b/libraries/baking/src/TextureBaker.cpp index 3756ae86de..54d304b7d8 100644 --- a/libraries/baking/src/TextureBaker.cpp +++ b/libraries/baking/src/TextureBaker.cpp @@ -131,7 +131,10 @@ void TextureBaker::handleTextureNetworkReply() { void TextureBaker::processTexture() { // the baked textures need to have the source hash added for cache checks in Interface // so we add that to the processed texture before handling it off to be serialized - auto hashData = QCryptographicHash::hash(_originalTexture, QCryptographicHash::Md5); + QCryptographicHash hasher(QCryptographicHash::Md5); + hasher.addData(_originalTexture); + hasher.addData((const char*)&_textureType, sizeof(_textureType)); + auto hashData = hasher.result(); std::string hash = hashData.toHex().toStdString(); TextureMeta meta; @@ -206,7 +209,7 @@ void TextureBaker::processTexture() { } // Uncompressed KTX - if (_textureType == image::TextureUsage::Type::CUBE_TEXTURE) { + if (_textureType == image::TextureUsage::Type::SKY_TEXTURE || _textureType == image::TextureUsage::Type::AMBIENT_TEXTURE) { buffer->reset(); auto processedTexture = image::processImage(std::move(buffer), _textureURL.toString().toStdString(), image::ColorChannel::NONE, ABSOLUTE_MAX_TEXTURE_NUM_PIXELS, _textureType, false, gpu::BackendTarget::GL45, _abortProcessing); diff --git a/libraries/entities-renderer/src/RenderableZoneEntityItem.cpp b/libraries/entities-renderer/src/RenderableZoneEntityItem.cpp index 8a7fa3f8e7..64cca404cb 100644 --- a/libraries/entities-renderer/src/RenderableZoneEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableZoneEntityItem.cpp @@ -465,7 +465,7 @@ void ZoneEntityRenderer::setAmbientURL(const QString& ambientUrl) { } else { _pendingAmbientTexture = true; auto textureCache = DependencyManager::get(); - _ambientTexture = textureCache->getTexture(_ambientTextureURL, image::TextureUsage::CUBE_TEXTURE); + _ambientTexture = textureCache->getTexture(_ambientTextureURL, image::TextureUsage::AMBIENT_TEXTURE); // keep whatever is assigned on the ambient map/sphere until texture is loaded } @@ -506,7 +506,7 @@ void ZoneEntityRenderer::setSkyboxURL(const QString& skyboxUrl) { } else { _pendingSkyboxTexture = true; auto textureCache = DependencyManager::get(); - _skyboxTexture = textureCache->getTexture(_skyboxTextureURL, image::TextureUsage::CUBE_TEXTURE); + _skyboxTexture = textureCache->getTexture(_skyboxTextureURL, image::TextureUsage::SKY_TEXTURE); } } diff --git a/libraries/image/CMakeLists.txt b/libraries/image/CMakeLists.txt index 0c733ae789..62f48f66e2 100644 --- a/libraries/image/CMakeLists.txt +++ b/libraries/image/CMakeLists.txt @@ -2,6 +2,7 @@ set(TARGET_NAME image) setup_hifi_library() link_hifi_libraries(shared gpu) target_nvtt() +target_tbb() target_etc2comp() target_openexr() diff --git a/libraries/image/src/image/CubeMap.cpp b/libraries/image/src/image/CubeMap.cpp new file mode 100644 index 0000000000..9196377daa --- /dev/null +++ b/libraries/image/src/image/CubeMap.cpp @@ -0,0 +1,660 @@ +// +// CubeMap.h +// image/src/image +// +// Created by Olivier Prat on 03/27/2019. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#include "CubeMap.h" + +#include +#include + +#include "RandomAndNoise.h" +#include "BRDF.h" +#include "ImageLogging.h" + +#ifndef M_PI +#define M_PI 3.14159265359 +#endif + +#include + +using namespace image; + +static const glm::vec3 FACE_NORMALS[24] = { + // POSITIVE X + glm::vec3(1.0f, 1.0f, 1.0f), + glm::vec3(1.0f, 1.0f, -1.0f), + glm::vec3(1.0f, -1.0f, 1.0f), + glm::vec3(1.0f, -1.0f, -1.0f), + // NEGATIVE X + glm::vec3(-1.0f, 1.0f, -1.0f), + glm::vec3(-1.0f, 1.0f, 1.0f), + glm::vec3(-1.0f, -1.0f, -1.0f), + glm::vec3(-1.0f, -1.0f, 1.0f), + // POSITIVE Y + glm::vec3(-1.0f, 1.0f, -1.0f), + glm::vec3(1.0f, 1.0f, -1.0f), + glm::vec3(-1.0f, 1.0f, 1.0f), + glm::vec3(1.0f, 1.0f, 1.0f), + // NEGATIVE Y + glm::vec3(-1.0f, -1.0f, 1.0f), + glm::vec3(1.0f, -1.0f, 1.0f), + glm::vec3(-1.0f, -1.0f, -1.0f), + glm::vec3(1.0f, -1.0f, -1.0f), + // POSITIVE Z + glm::vec3(-1.0f, 1.0f, 1.0f), + glm::vec3(1.0f, 1.0f, 1.0f), + glm::vec3(-1.0f, -1.0f, 1.0f), + glm::vec3(1.0f, -1.0f, 1.0f), + // NEGATIVE Z + glm::vec3(1.0f, 1.0f, -1.0f), + glm::vec3(-1.0f, 1.0f, -1.0f), + glm::vec3(1.0f, -1.0f, -1.0f), + glm::vec3(-1.0f, -1.0f, -1.0f) +}; + +struct CubeFaceMip { + + CubeFaceMip(gpu::uint16 level, const CubeMap* cubemap) { + _dims = cubemap->getMipDimensions(level); + _lineStride = cubemap->getMipLineStride(level); + } + + CubeFaceMip(const CubeFaceMip& other) : _dims(other._dims), _lineStride(other._lineStride) { + + } + + gpu::Vec2i _dims; + size_t _lineStride; +}; + +class CubeMap::ConstMip : public CubeFaceMip { +public: + + ConstMip(gpu::uint16 level, const CubeMap* cubemap) : + CubeFaceMip(level, cubemap), _faces(cubemap->_mips[level]) { + } + + glm::vec4 fetch(int face, glm::vec2 uv) const { + glm::vec2 coordFrac = uv * glm::vec2(_dims) - 0.5f; + glm::vec2 coords = glm::floor(coordFrac); + + coordFrac -= coords; + + coords += (float)EDGE_WIDTH; + + const auto& pixels = _faces[face]; + gpu::Vec2i loCoords(coords); + gpu::Vec2i hiCoords; + + hiCoords = glm::clamp(loCoords + 1, gpu::Vec2i(0, 0), _dims - 1 + (int)EDGE_WIDTH); + loCoords = glm::clamp(loCoords, gpu::Vec2i(0, 0), _dims - 1 + (int)EDGE_WIDTH); + + const size_t offsetLL = loCoords.x + loCoords.y * _lineStride; + const size_t offsetHL = hiCoords.x + loCoords.y * _lineStride; + const size_t offsetLH = loCoords.x + hiCoords.y * _lineStride; + const size_t offsetHH = hiCoords.x + hiCoords.y * _lineStride; + assert(offsetLL >= 0 && offsetLL < _lineStride * (_dims.y + 2 * EDGE_WIDTH)); + assert(offsetHL >= 0 && offsetHL < _lineStride * (_dims.y + 2 * EDGE_WIDTH)); + assert(offsetLH >= 0 && offsetLH < _lineStride * (_dims.y + 2 * EDGE_WIDTH)); + assert(offsetHH >= 0 && offsetHH < _lineStride * (_dims.y + 2 * EDGE_WIDTH)); + glm::vec4 colorLL = pixels[offsetLL]; + glm::vec4 colorHL = pixels[offsetHL]; + glm::vec4 colorLH = pixels[offsetLH]; + glm::vec4 colorHH = pixels[offsetHH]; + + colorLL += (colorHL - colorLL) * coordFrac.x; + colorLH += (colorHH - colorLH) * coordFrac.x; + return colorLL + (colorLH - colorLL) * coordFrac.y; + } + +private: + + const Faces& _faces; + +}; + +class CubeMap::Mip : public CubeFaceMip { +public: + + explicit Mip(gpu::uint16 level, CubeMap* cubemap) : + CubeFaceMip(level, cubemap), _faces(cubemap->_mips[level]) { + } + + Mip(const Mip& other) : CubeFaceMip(other), _faces(other._faces) { + } + + void applySeams() { + if (EDGE_WIDTH == 0) { + return; + } + + // Copy edge rows and columns from neighbouring faces to fix seam filtering issues + seamColumnAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, -1, -1); + seamColumnAndRow(gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, _dims.y, 1); + seamColumnAndColumn(gpu::Texture::CUBE_FACE_FRONT_NEG_Z, -1, gpu::Texture::CUBE_FACE_RIGHT_POS_X, _dims.x, 1); + seamColumnAndColumn(gpu::Texture::CUBE_FACE_BACK_POS_Z, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, -1, 1); + + seamRowAndRow(gpu::Texture::CUBE_FACE_BACK_POS_Z, -1, gpu::Texture::CUBE_FACE_TOP_POS_Y, _dims.y, 1); + seamRowAndRow(gpu::Texture::CUBE_FACE_BACK_POS_Z, _dims.y, gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, -1, 1); + seamColumnAndColumn(gpu::Texture::CUBE_FACE_BACK_POS_Z, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, _dims.x, 1); + + seamRowAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, -1, gpu::Texture::CUBE_FACE_FRONT_NEG_Z, -1, -1); + seamColumnAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, -1, 1); + + seamColumnAndColumn(gpu::Texture::CUBE_FACE_LEFT_NEG_X, -1, gpu::Texture::CUBE_FACE_FRONT_NEG_Z, _dims.x, 1); + seamColumnAndRow(gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, _dims.y, -1); + + seamRowAndRow(gpu::Texture::CUBE_FACE_FRONT_NEG_Z, _dims.y, gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, _dims.y, -1); + + // Duplicate corner pixels + for (int face = 0; face < 6; face++) { + auto& pixels = _faces[face]; + + pixels[0] = pixels[1]; + pixels[_dims.x + 1] = pixels[_dims.x]; + pixels[(_dims.y + 1)*(_dims.x + 2)] = pixels[(_dims.y + 1)*(_dims.x + 2) + 1]; + pixels[(_dims.y + 2)*(_dims.x + 2) - 1] = pixels[(_dims.y + 2)*(_dims.x + 2) - 2]; + } + } + +private: + + Faces& _faces; + + inline static void copy(CubeMap::Face::const_iterator srcFirst, CubeMap::Face::const_iterator srcLast, size_t srcStride, CubeMap::Face::iterator dstBegin, size_t dstStride) { + while (srcFirst <= srcLast) { + *dstBegin = *srcFirst; + srcFirst += srcStride; + dstBegin += dstStride; + } + } + + static std::pair getSrcAndDst(int dim, int value) { + int src; + int dst; + + if (value < 0) { + src = 1; + dst = 0; + } else if (value >= dim) { + src = dim; + dst = dim + 1; + } + return std::make_pair(src, dst); + } + + void seamColumnAndColumn(int face0, int col0, int face1, int col1, int inc) { + auto coords0 = getSrcAndDst(_dims.x, col0); + auto coords1 = getSrcAndDst(_dims.x, col1); + + copyColumnToColumn(face0, coords0.first, face1, coords1.second, inc); + copyColumnToColumn(face1, coords1.first, face0, coords0.second, inc); + } + + void seamColumnAndRow(int face0, int col0, int face1, int row1, int inc) { + auto coords0 = getSrcAndDst(_dims.x, col0); + auto coords1 = getSrcAndDst(_dims.y, row1); + + copyColumnToRow(face0, coords0.first, face1, coords1.second, inc); + copyRowToColumn(face1, coords1.first, face0, coords0.second, inc); + } + + void seamRowAndRow(int face0, int row0, int face1, int row1, int inc) { + auto coords0 = getSrcAndDst(_dims.y, row0); + auto coords1 = getSrcAndDst(_dims.y, row1); + + copyRowToRow(face0, coords0.first, face1, coords1.second, inc); + copyRowToRow(face1, coords1.first, face0, coords0.second, inc); + } + + void copyColumnToColumn(int srcFace, int srcCol, int dstFace, int dstCol, const int dstInc) { + const auto lastOffset = _lineStride * (_dims.y - 1); + auto srcFirst = _faces[srcFace].begin() + srcCol + _lineStride; + auto srcLast = srcFirst + lastOffset; + + auto dstFirst = _faces[dstFace].begin() + dstCol + _lineStride; + auto dstLast = dstFirst + lastOffset; + const auto dstStride = _lineStride * dstInc; + + assert(srcFirst < _faces[srcFace].end()); + assert(srcLast < _faces[srcFace].end()); + assert(dstFirst < _faces[dstFace].end()); + assert(dstLast < _faces[dstFace].end()); + + if (dstInc < 0) { + std::swap(dstFirst, dstLast); + } + + copy(srcFirst, srcLast, _lineStride, dstFirst, dstStride); + } + + void copyRowToRow(int srcFace, int srcRow, int dstFace, int dstRow, const int dstInc) { + const auto lastOffset =(_dims.x - 1); + auto srcFirst = _faces[srcFace].begin() + srcRow * _lineStride + 1; + auto srcLast = srcFirst + lastOffset; + + auto dstFirst = _faces[dstFace].begin() + dstRow * _lineStride + 1; + auto dstLast = dstFirst + lastOffset; + + assert(srcFirst < _faces[srcFace].end()); + assert(srcLast < _faces[srcFace].end()); + assert(dstFirst < _faces[dstFace].end()); + assert(dstLast < _faces[dstFace].end()); + + if (dstInc < 0) { + std::swap(dstFirst, dstLast); + } + + copy(srcFirst, srcLast, 1, dstFirst, dstInc); + } + + void copyColumnToRow(int srcFace, int srcCol, int dstFace, int dstRow, int dstInc) { + const auto srcLastOffset = _lineStride * (_dims.y - 1); + auto srcFirst = _faces[srcFace].begin() + srcCol + _lineStride; + auto srcLast = srcFirst + srcLastOffset; + + const auto dstLastOffset = (_dims.x - 1); + auto dstFirst = _faces[dstFace].begin() + dstRow * _lineStride + 1; + auto dstLast = dstFirst + dstLastOffset; + + assert(srcFirst < _faces[srcFace].end()); + assert(srcLast < _faces[srcFace].end()); + assert(dstFirst < _faces[dstFace].end()); + assert(dstLast < _faces[dstFace].end()); + + if (dstInc < 0) { + std::swap(dstFirst, dstLast); + } + + copy(srcFirst, srcLast, _lineStride, dstFirst, dstInc); + } + + void copyRowToColumn(int srcFace, int srcRow, int dstFace, int dstCol, int dstInc) { + const auto srcLastOffset = (_dims.x - 1); + auto srcFirst = _faces[srcFace].begin() + srcRow * _lineStride + 1; + auto srcLast = srcFirst + srcLastOffset; + + const auto dstLastOffset = _lineStride * (_dims.y - 1); + auto dstFirst = _faces[dstFace].begin() + dstCol + _lineStride; + auto dstLast = dstFirst + dstLastOffset; + const auto dstStride = _lineStride * dstInc; + + assert(srcFirst < _faces[srcFace].end()); + assert(srcLast < _faces[srcFace].end()); + assert(dstFirst < _faces[dstFace].end()); + assert(dstLast < _faces[dstFace].end()); + + if (dstInc < 0) { + std::swap(dstFirst, dstLast); + } + + copy(srcFirst, srcLast, 1, dstFirst, dstStride); + } +}; + +static void copySurface(const nvtt::Surface& source, glm::vec4* dest, size_t dstLineStride) { + const float* srcRedIt = source.channel(0); + const float* srcGreenIt = source.channel(1); + const float* srcBlueIt = source.channel(2); + const float* srcAlphaIt = source.channel(3); + + for (int y = 0; y < source.height(); y++) { + glm::vec4* dstColIt = dest; + for (int x = 0; x < source.width(); x++) { + *dstColIt = glm::vec4(*srcRedIt, *srcGreenIt, *srcBlueIt, *srcAlphaIt); + dstColIt++; + srcRedIt++; + srcGreenIt++; + srcBlueIt++; + srcAlphaIt++; + } + dest += dstLineStride; + } +} + +CubeMap::CubeMap(int width, int height, int mipCount) { + reset(width, height, mipCount); +} + +CubeMap::CubeMap(const std::vector& faces, int mipCount, const std::atomic& abortProcessing) { + reset(faces.front().getWidth(), faces.front().getHeight(), mipCount); + + int face; + + nvtt::Surface surface; + surface.setAlphaMode(nvtt::AlphaMode_None); + surface.setWrapMode(nvtt::WrapMode_Mirror); + + // Compute mips + for (face = 0; face < 6; face++) { + Image faceImage = faces[face].getConvertedToFormat(Image::Format_RGBAF); + + surface.setImage(nvtt::InputFormat_RGBA_32F, _width, _height, 1, faceImage.editBits()); + + auto mipLevel = 0; + copySurface(surface, editFace(0, face), getMipLineStride(0)); + + while (surface.canMakeNextMipmap() && !abortProcessing.load()) { + surface.buildNextMipmap(nvtt::MipmapFilter_Box); + mipLevel++; + + copySurface(surface, editFace(mipLevel, face), getMipLineStride(mipLevel)); + } + } + + if (abortProcessing.load()) { + return; + } + + for (gpu::uint16 mipLevel = 0; mipLevel < mipCount; ++mipLevel) { + Mip mip(mipLevel, this); + mip.applySeams(); + } +} + +void CubeMap::applyGamma(float value) { + for (auto& mip : _mips) { + for (auto& face : mip) { + for (auto& pixel : face) { + pixel.r = std::pow(pixel.r, value); + pixel.g = std::pow(pixel.g, value); + pixel.b = std::pow(pixel.b, value); + } + } + } +} + +void CubeMap::copyFace(int width, int height, const glm::vec4* source, size_t srcLineStride, glm::vec4* dest, size_t dstLineStride) { + for (int y = 0; y < height; y++) { + std::copy(source, source + width, dest); + source += srcLineStride; + dest += dstLineStride; + } +} + +Image CubeMap::getFaceImage(gpu::uint16 mipLevel, int face) const { + auto mipDims = getMipDimensions(mipLevel); + Image faceImage(mipDims.x, mipDims.y, Image::Format_RGBAF); + copyFace(mipDims.x, mipDims.y, getFace(mipLevel, face), getMipLineStride(mipLevel), (glm::vec4*)faceImage.editBits(), faceImage.getBytesPerLineCount() / sizeof(glm::vec4)); + return faceImage; +} + +void CubeMap::reset(int width, int height, int mipCount) { + assert(mipCount >0 && width > 0 && height > 0); + _width = width; + _height = height; + _mips.resize(mipCount); + for (auto mipLevel = 0; mipLevel < mipCount; mipLevel++) { + auto mipDimensions = getMipDimensions(mipLevel); + // Add extra pixels on edges to perform edge seam fixup (we will duplicate pixels from + // neighbouring faces) + auto mipPixelCount = (mipDimensions.x + 2 * EDGE_WIDTH) * (mipDimensions.y + 2 * EDGE_WIDTH); + + for (auto& face : _mips[mipLevel]) { + face.resize(mipPixelCount); + } + } +} + +void CubeMap::copyTo(CubeMap& other) const { + other._width = _width; + other._height = _height; + other._mips = _mips; +} + +void CubeMap::getFaceUV(const glm::vec3& dir, int* index, glm::vec2* uv) { + // Taken from https://en.wikipedia.org/wiki/Cube_mapping + float absX = std::abs(dir.x); + float absY = std::abs(dir.y); + float absZ = std::abs(dir.z); + + auto isXPositive = dir.x > 0; + auto isYPositive = dir.y > 0; + auto isZPositive = dir.z > 0; + + float maxAxis = 1.0f; + float uc = 0.0f; + float vc = 0.0f; + + // POSITIVE X + if (isXPositive && absX >= absY && absX >= absZ) { + // u (0 to 1) goes from +z to -z + // v (0 to 1) goes from -y to +y + maxAxis = absX; + uc = -dir.z; + vc = -dir.y; + *index = 0; + } + // NEGATIVE X + else if (!isXPositive && absX >= absY && absX >= absZ) { + // u (0 to 1) goes from -z to +z + // v (0 to 1) goes from -y to +y + maxAxis = absX; + uc = dir.z; + vc = -dir.y; + *index = 1; + } + // POSITIVE Y + else if (isYPositive && absY >= absX && absY >= absZ) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from +z to -z + maxAxis = absY; + uc = dir.x; + vc = dir.z; + *index = 2; + } + // NEGATIVE Y + else if (!isYPositive && absY >= absX && absY >= absZ) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from -z to +z + maxAxis = absY; + uc = dir.x; + vc = -dir.z; + *index = 3; + } + // POSITIVE Z + else if (isZPositive && absZ >= absX && absZ >= absY) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from -y to +y + maxAxis = absZ; + uc = dir.x; + vc = -dir.y; + *index = 4; + } + // NEGATIVE Z + else if (!isZPositive && absZ >= absX && absZ >= absY) { + // u (0 to 1) goes from +x to -x + // v (0 to 1) goes from -y to +y + maxAxis = absZ; + uc = -dir.x; + vc = -dir.y; + *index = 5; + } + + // Convert range from -1 to 1 to 0 to 1 + uv->x = 0.5f * (uc / maxAxis + 1.0f); + uv->y = 0.5f * (vc / maxAxis + 1.0f); +} + +glm::vec4 CubeMap::fetchLod(const glm::vec3& dir, float lod) const { + lod = glm::clamp(lod, 0.0f, _mips.size() - 1); + + gpu::uint16 loLevel = (gpu::uint16)std::floor(lod); + gpu::uint16 hiLevel = (gpu::uint16)std::ceil(lod); + float lodFrac = lod - (float)loLevel; + ConstMip loMip(loLevel, this); + ConstMip hiMip(hiLevel, this); + int face; + glm::vec2 uv; + glm::vec4 loColor; + glm::vec4 hiColor; + + getFaceUV(dir, &face, &uv); + + loColor = loMip.fetch(face, uv); + hiColor = hiMip.fetch(face, uv); + + return loColor + (hiColor - loColor) * lodFrac; +} + +struct CubeMap::GGXSamples { + float invTotalWeight; + std::vector points; +}; + +// All the GGX convolution code is inspired from: +// https://placeholderart.wordpress.com/2015/07/28/implementation-notes-runtime-environment-map-filtering-for-image-based-lighting/ +// Computation is done in tangent space so normal is always (0,0,1) which simplifies a lot of things + +void CubeMap::generateGGXSamples(GGXSamples& data, float roughness, const int resolution) { + glm::vec2 xi; + glm::vec3 L; + glm::vec3 H; + const float saTexel = (float)(4.0 * M_PI / (6.0 * resolution * resolution)); + const float mipBias = 3.0f; + const auto sampleCount = data.points.size(); + const auto hammersleySequenceLength = data.points.size(); + size_t sampleIndex = 0; + size_t hammersleySampleIndex = 0; + float NdotL; + + data.invTotalWeight = 0.0f; + + // Do some computation in tangent space + while (sampleIndex < sampleCount) { + if (hammersleySampleIndex < hammersleySequenceLength) { + xi = hammersley::evaluate((int)hammersleySampleIndex, (int)hammersleySequenceLength); + H = ggx::sample(xi, roughness); + L = H * (2.0f * H.z) - glm::vec3(0.0f, 0.0f, 1.0f); + NdotL = L.z; + hammersleySampleIndex++; + } else { + NdotL = -1.0f; + } + + while (NdotL <= 0.0f) { + // Create a purely random sample + xi.x = rand() / float(RAND_MAX); + xi.y = rand() / float(RAND_MAX); + H = ggx::sample(xi, roughness); + L = H * (2.0f * H.z) - glm::vec3(0.0f, 0.0f, 1.0f); + NdotL = L.z; + } + + float NdotH = std::max(0.0f, H.z); + float HdotV = NdotH; + float D = ggx::evaluate(NdotH, roughness); + float pdf = (D * NdotH / (4.0f * HdotV)) + 0.0001f; + float saSample = 1.0f / (float(sampleCount) * pdf + 0.0001f); + float mipLevel = std::max(0.5f * std::log2(saSample / saTexel) + mipBias, 0.0f); + + auto& sample = data.points[sampleIndex]; + sample.x = L.x; + sample.y = L.y; + sample.z = L.z; + sample.w = mipLevel; + + data.invTotalWeight += NdotL; + + sampleIndex++; + } + data.invTotalWeight = 1.0f / data.invTotalWeight; +} + +void CubeMap::convolveForGGX(CubeMap& output, const std::atomic& abortProcessing) const { + // This should match the value in the getMipLevelFromRoughness function (LightAmbient.slh) + static const float ROUGHNESS_1_MIP_RESOLUTION = 1.5f; + static const size_t MAX_SAMPLE_COUNT = 4000; + + const auto mipCount = getMipCount(); + GGXSamples params; + + params.points.reserve(MAX_SAMPLE_COUNT); + + for (gpu::uint16 mipLevel = 0; mipLevel < mipCount; ++mipLevel) { + // This is the inverse code found in LightAmbient.slh in getMipLevelFromRoughness + float levelAlpha = float(mipLevel) / (mipCount - ROUGHNESS_1_MIP_RESOLUTION); + float mipRoughness = levelAlpha * (1.0f + 2.0f * levelAlpha) / 3.0f; + + mipRoughness = std::max(1e-3f, mipRoughness); + mipRoughness = std::min(1.0f, mipRoughness); + + size_t mipTotalPixelCount = getMipWidth(mipLevel) * getMipHeight(mipLevel) * 6; + size_t sampleCount = 1U + size_t(4000 * mipRoughness * mipRoughness); + + sampleCount = std::min(sampleCount, 2 * mipTotalPixelCount); + sampleCount = std::min(MAX_SAMPLE_COUNT, sampleCount); + + params.points.resize(sampleCount); + generateGGXSamples(params, mipRoughness, _width); + + for (int face = 0; face < 6; face++) { + convolveMipFaceForGGX(params, output, mipLevel, face, abortProcessing); + if (abortProcessing.load()) { + return; + } + } + } +} + +void CubeMap::convolveMipFaceForGGX(const GGXSamples& samples, CubeMap& output, gpu::uint16 mipLevel, int face, const std::atomic& abortProcessing) const { + const glm::vec3* faceNormals = FACE_NORMALS + face * 4; + const glm::vec3 deltaYNormalLo = faceNormals[2] - faceNormals[0]; + const glm::vec3 deltaYNormalHi = faceNormals[3] - faceNormals[1]; + const auto mipDimensions = output.getMipDimensions(mipLevel); + const auto outputLineStride = output.getMipLineStride(mipLevel); + auto outputFacePixels = output.editFace(mipLevel, face); + + tbb::parallel_for(tbb::blocked_range2d(0, mipDimensions.y, 32, 0, mipDimensions.x, 32), [&](const tbb::blocked_range2d& range) { + auto rowRange = range.rows(); + auto colRange = range.cols(); + + for (auto y = rowRange.begin(); y < rowRange.end(); y++) { + if (abortProcessing.load()) { + break; + } + + const float yAlpha = (y + 0.5f) / mipDimensions.y; + const glm::vec3 normalXLo = faceNormals[0] + deltaYNormalLo * yAlpha; + const glm::vec3 normalXHi = faceNormals[1] + deltaYNormalHi * yAlpha; + const glm::vec3 deltaXNormal = normalXHi - normalXLo; + + for (auto x = colRange.begin(); x < colRange.end(); x++) { + const float xAlpha = (x + 0.5f) / mipDimensions.x; + // Interpolate normal for this pixel + const glm::vec3 normal = glm::normalize(normalXLo + deltaXNormal * xAlpha); + + outputFacePixels[x + y * outputLineStride] = computeConvolution(normal, samples); + } + } + }); +} + +glm::vec4 CubeMap::computeConvolution(const glm::vec3& N, const GGXSamples& samples) const { + // from tangent-space vector to world-space + glm::vec3 bitangent = std::abs(N.z) < 0.999f ? glm::vec3(0.0f, 0.0f, 1.0f) : glm::vec3(1.0f, 0.0f, 0.0f); + glm::vec3 tangent = glm::normalize(glm::cross(bitangent, N)); + bitangent = glm::cross(N, tangent); + + const size_t sampleCount = samples.points.size(); + glm::vec4 prefilteredColor = glm::vec4(0.0f); + + for (size_t i = 0; i < sampleCount; ++i) { + const auto& sample = samples.points[i]; + glm::vec3 L(sample.x, sample.y, sample.z); + float NdotL = L.z; + float mipLevel = sample.w; + // Now back to world space + L = tangent * L.x + bitangent * L.y + N * L.z; + prefilteredColor += fetchLod(L, mipLevel) * NdotL; + } + prefilteredColor = prefilteredColor * samples.invTotalWeight; + prefilteredColor.a = 1.0f; + return prefilteredColor; +} \ No newline at end of file diff --git a/libraries/image/src/image/CubeMap.h b/libraries/image/src/image/CubeMap.h new file mode 100644 index 0000000000..0745267cb6 --- /dev/null +++ b/libraries/image/src/image/CubeMap.h @@ -0,0 +1,92 @@ +// +// CubeMap.h +// image/src/image +// +// Created by Olivier Prat on 03/27/2019. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_image_CubeMap_h +#define hifi_image_CubeMap_h + +#include +#include +#include +#include +#include + +#include "Image.h" + +namespace image { + + class CubeMap { + + enum { + EDGE_WIDTH = 1 + }; + + public: + + CubeMap(int width, int height, int mipCount); + CubeMap(const std::vector& faces, int mipCount, const std::atomic& abortProcessing = false); + + void reset(int width, int height, int mipCount); + void copyTo(CubeMap& other) const; + + void applyGamma(float value); + + gpu::uint16 getMipCount() const { return (gpu::uint16)_mips.size(); } + int getMipWidth(gpu::uint16 mipLevel) const { + return std::max(1, _width >> mipLevel); + } + int getMipHeight(gpu::uint16 mipLevel) const { + return std::max(1, _height >> mipLevel); + } + gpu::Vec2i getMipDimensions(gpu::uint16 mipLevel) const { + return gpu::Vec2i(getMipWidth(mipLevel), getMipHeight(mipLevel)); + } + + size_t getMipLineStride(gpu::uint16 mipLevel) const { + return getMipWidth(mipLevel) + 2 * EDGE_WIDTH; + } + + glm::vec4* editFace(gpu::uint16 mipLevel, int face) { + return _mips[mipLevel][face].data() + (getMipLineStride(mipLevel) + 1)*EDGE_WIDTH; + } + + const glm::vec4* getFace(gpu::uint16 mipLevel, int face) const { + return _mips[mipLevel][face].data() + (getMipLineStride(mipLevel) + 1)*EDGE_WIDTH; + } + + Image getFaceImage(gpu::uint16 mipLevel, int face) const; + + void convolveForGGX(CubeMap& output, const std::atomic& abortProcessing) const; + glm::vec4 fetchLod(const glm::vec3& dir, float lod) const; + + private: + + struct GGXSamples; + class Mip; + class ConstMip; + + using Face = std::vector; + using Faces = std::array; + + int _width; + int _height; + std::vector _mips; + + static void getFaceUV(const glm::vec3& dir, int* index, glm::vec2* uv); + static void generateGGXSamples(GGXSamples& data, float roughness, const int resolution); + static void copyFace(int width, int height, const glm::vec4* source, size_t srcLineStride, glm::vec4* dest, size_t dstLineStride); + void convolveMipFaceForGGX(const GGXSamples& samples, CubeMap& output, gpu::uint16 mipLevel, int face, const std::atomic& abortProcessing) const; + glm::vec4 computeConvolution(const glm::vec3& normal, const GGXSamples& samples) const; + + }; + +} + +#endif // hifi_image_CubeMap_h diff --git a/libraries/image/src/image/Image.cpp b/libraries/image/src/image/Image.cpp index df5ed15867..2ef83e42d8 100644 --- a/libraries/image/src/image/Image.cpp +++ b/libraries/image/src/image/Image.cpp @@ -6,28 +6,91 @@ using namespace image; +Image::Image(int width, int height, Format format) : + _dims(width, height), + _format(format) { + if (_format == Format_RGBAF) { + _floatData.resize(width*height); + } else { + _packedData = QImage(width, height, (QImage::Format)format); + } +} + +size_t Image::getByteCount() const { + if (_format == Format_RGBAF) { + return sizeof(FloatPixels::value_type) * _floatData.size(); + } else { + return _packedData.byteCount(); + } +} + +size_t Image::getBytesPerLineCount() const { + if (_format == Format_RGBAF) { + return sizeof(FloatPixels::value_type) * _dims.x; + } else { + return _packedData.bytesPerLine(); + } +} + +glm::uint8* Image::editScanLine(int y) { + if (_format == Format_RGBAF) { + return reinterpret_cast(_floatData.data() + y * _dims.x); + } else { + return _packedData.scanLine(y); + } +} + +const glm::uint8* Image::getScanLine(int y) const { + if (_format == Format_RGBAF) { + return reinterpret_cast(_floatData.data() + y * _dims.x); + } else { + return _packedData.scanLine(y); + } +} + +glm::uint8* Image::editBits() { + if (_format == Format_RGBAF) { + return reinterpret_cast(_floatData.data()); + } else { + return _packedData.bits(); + } +} + +const glm::uint8* Image::getBits() const { + if (_format == Format_RGBAF) { + return reinterpret_cast(_floatData.data()); + } else { + return _packedData.bits(); + } +} + Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, TransformationMode transformMode) const { - if ((Image::Format)_data.format() == Image::Format_PACKED_FLOAT) { - // Start by converting to full float - glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()]; - auto unpackFunc = getHDRUnpackingFunction(); - auto floatDataIt = floatPixels; - for (glm::uint32 lineNb = 0; lineNb < getHeight(); lineNb++) { - const glm::uint32* srcPixelIt = reinterpret_cast(getScanLine((int)lineNb)); - const glm::uint32* srcPixelEnd = srcPixelIt + getWidth(); - - while (srcPixelIt < srcPixelEnd) { - *floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f); - ++srcPixelIt; - ++floatDataIt; - } - } - - // Perform filtered resize with NVTT - static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats"); + if (_format == Format_PACKED_FLOAT || _format == Format_RGBAF) { nvtt::Surface surface; - surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels); - delete[] floatPixels; + + if (_format == Format_RGBAF) { + surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, _floatData.data()); + } else { + // Start by converting to full float + glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()]; + auto unpackFunc = getHDRUnpackingFunction(); + auto floatDataIt = floatPixels; + for (glm::uint32 lineNb = 0; lineNb < getHeight(); lineNb++) { + const glm::uint32* srcPixelIt = reinterpret_cast(getScanLine((int)lineNb)); + const glm::uint32* srcPixelEnd = srcPixelIt + getWidth(); + + while (srcPixelIt < srcPixelEnd) { + *floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f); + ++srcPixelIt; + ++floatDataIt; + } + } + + // Perform filtered resize with NVTT + static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats"); + surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels); + delete[] floatPixels; + } nvtt::ResizeFilter filter = nvtt::ResizeFilter_Kaiser; if (transformMode == Qt::TransformationMode::FastTransformation) { @@ -35,44 +98,148 @@ Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, Transforma } surface.resize(dstSize.x, dstSize.y, 1, filter); - // And convert back to original format - QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT); - - auto packFunc = getHDRPackingFunction(); auto srcRedIt = reinterpret_cast(surface.channel(0)); auto srcGreenIt = reinterpret_cast(surface.channel(1)); auto srcBlueIt = reinterpret_cast(surface.channel(2)); - for (glm::uint32 lineNb = 0; lineNb < dstSize.y; lineNb++) { - glm::uint32* dstPixelIt = reinterpret_cast(resizedImage.scanLine((int)lineNb)); - glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x; + auto srcAlphaIt = reinterpret_cast(surface.channel(3)); + + if (_format == Format_RGBAF) { + Image output(_dims.x, _dims.y, _format); + auto dstPixelIt = output._floatData.begin(); + auto dstPixelEnd = output._floatData.end(); while (dstPixelIt < dstPixelEnd) { - *dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt)); + *dstPixelIt = glm::vec4(*srcRedIt, *srcGreenIt, *srcBlueIt, *srcAlphaIt); ++srcRedIt; ++srcGreenIt; ++srcBlueIt; + ++srcAlphaIt; + ++dstPixelIt; } + + return output; + } else { + // And convert back to original format + QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT); + + auto packFunc = getHDRPackingFunction(); + for (glm::uint32 lineNb = 0; lineNb < dstSize.y; lineNb++) { + glm::uint32* dstPixelIt = reinterpret_cast(resizedImage.scanLine((int)lineNb)); + glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x; + + while (dstPixelIt < dstPixelEnd) { + *dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt)); + ++srcRedIt; + ++srcGreenIt; + ++srcBlueIt; + ++dstPixelIt; + } + } + return resizedImage; } - return resizedImage; } else { - return _data.scaled(fromGlm(dstSize), ratioMode, transformMode); + return _packedData.scaled(fromGlm(dstSize), ratioMode, transformMode); } } Image Image::getConvertedToFormat(Format newFormat) const { - assert(getFormat() != Format_PACKED_FLOAT); - return _data.convertToFormat((QImage::Format)newFormat); + const float MAX_COLOR_VALUE = 255.0f; + + if (newFormat == _format) { + return *this; + } else if ((_format != Format_R11G11B10F && _format != Format_RGBAF) && (newFormat != Format_R11G11B10F && newFormat != Format_RGBAF)) { + return _packedData.convertToFormat((QImage::Format)newFormat); + } else if (_format == Format_PACKED_FLOAT) { + Image newImage(_dims.x, _dims.y, newFormat); + + switch (newFormat) { + case Format_RGBAF: + convertToFloatFromPacked(getBits(), _dims.x, _dims.y, getBytesPerLineCount(), gpu::Element::COLOR_R11G11B10, newImage._floatData.data(), _dims.x); + break; + + default: + { + auto unpackFunc = getHDRUnpackingFunction(); + const glm::uint32* srcIt = reinterpret_cast(getBits()); + + for (int y = 0; y < _dims.y; y++) { + for (int x = 0; x < _dims.x; x++) { + auto color = glm::clamp(unpackFunc(*srcIt) * MAX_COLOR_VALUE, 0.0f, 255.0f); + newImage.setPackedPixel(x, y, qRgb(color.r, color.g, color.b)); + srcIt++; + } + } + break; + } + } + return newImage; + } else if (_format == Format_RGBAF) { + Image newImage(_dims.x, _dims.y, newFormat); + + switch (newFormat) { + case Format_R11G11B10F: + convertToPackedFromFloat(newImage.editBits(), _dims.x, _dims.y, getBytesPerLineCount(), gpu::Element::COLOR_R11G11B10, _floatData.data(), _dims.x); + break; + + default: + { + FloatPixels::const_iterator srcIt = _floatData.begin(); + + for (int y = 0; y < _dims.y; y++) { + for (int x = 0; x < _dims.x; x++) { + auto color = glm::clamp((*srcIt) * MAX_COLOR_VALUE, 0.0f, 255.0f); + newImage.setPackedPixel(x, y, qRgba(color.r, color.g, color.b, color.a)); + srcIt++; + } + } + break; + } + } + return newImage; + } else { + Image newImage(_dims.x, _dims.y, newFormat); + assert(newImage.hasFloatFormat()); + + if (newFormat == Format_RGBAF) { + FloatPixels::iterator dstIt = newImage._floatData.begin(); + + for (int y = 0; y < _dims.y; y++) { + auto line = (const QRgb*)getScanLine(y); + for (int x = 0; x < _dims.x; x++) { + QRgb pixel = line[x]; + *dstIt = glm::vec4(qRed(pixel), qGreen(pixel), qBlue(pixel), qAlpha(pixel)) / MAX_COLOR_VALUE; + dstIt++; + } + } + } else { + auto packFunc = getHDRPackingFunction(); + glm::uint32* dstIt = reinterpret_cast( newImage.editBits() ); + + for (int y = 0; y < _dims.y; y++) { + auto line = (const QRgb*)getScanLine(y); + for (int x = 0; x < _dims.x; x++) { + QRgb pixel = line[x]; + *dstIt = packFunc(glm::vec3(qRed(pixel), qGreen(pixel), qBlue(pixel)) / MAX_COLOR_VALUE); + dstIt++; + } + } + } + return newImage; + } } void Image::invertPixels() { - _data.invertPixels(QImage::InvertRgba); + assert(_format != Format_PACKED_FLOAT && _format != Format_RGBAF); + _packedData.invertPixels(QImage::InvertRgba); } Image Image::getSubImage(QRect rect) const { - return _data.copy(rect); + assert(_format != Format_RGBAF); + return _packedData.copy(rect); } Image Image::getMirrored(bool horizontal, bool vertical) const { - return _data.mirrored(horizontal, vertical); + assert(_format != Format_RGBAF); + return _packedData.mirrored(horizontal, vertical); } diff --git a/libraries/image/src/image/Image.h b/libraries/image/src/image/Image.h index bfecf4f2a1..129061900f 100644 --- a/libraries/image/src/image/Image.h +++ b/libraries/image/src/image/Image.h @@ -48,37 +48,69 @@ namespace image { Format_RGBA8888_Premultiplied = QImage::Format_RGBA8888_Premultiplied, Format_Grayscale8 = QImage::Format_Grayscale8, Format_R11G11B10F = QImage::Format_RGB30, - Format_PACKED_FLOAT = Format_R11G11B10F + Format_PACKED_FLOAT = Format_R11G11B10F, + // RGBA 32 bit single precision float per component + Format_RGBAF = 100 }; using AspectRatioMode = Qt::AspectRatioMode; using TransformationMode = Qt::TransformationMode; - Image() {} - Image(int width, int height, Format format) : _data(width, height, (QImage::Format)format) {} - Image(const QImage& data) : _data(data) {} - void operator=(const QImage& image) { - _data = image; + Image() : _dims(0,0) {} + Image(int width, int height, Format format); + Image(const QImage& data) : _packedData(data), _dims(data.width(), data.height()), _format((Format)data.format()) {} + + void operator=(const QImage& other) { + _packedData = other; + _floatData.clear(); + _dims.x = other.width(); + _dims.y = other.height(); + _format = (Format)other.format(); } - bool isNull() const { return _data.isNull(); } - - Format getFormat() const { return (Format)_data.format(); } - bool hasAlphaChannel() const { return _data.hasAlphaChannel(); } - - glm::uint32 getWidth() const { return (glm::uint32)_data.width(); } - glm::uint32 getHeight() const { return (glm::uint32)_data.height(); } - glm::uvec2 getSize() const { return toGlm(_data.size()); } - size_t getByteCount() const { return _data.byteCount(); } - - QRgb getPixel(int x, int y) const { return _data.pixel(x, y); } - void setPixel(int x, int y, QRgb value) { - _data.setPixel(x, y, value); + void operator=(const Image& other) { + if (&other != this) { + _packedData = other._packedData; + _floatData = other._floatData; + _dims = other._dims; + _format = other._format; + } } - glm::uint8* editScanLine(int y) { return _data.scanLine(y); } - const glm::uint8* getScanLine(int y) const { return _data.scanLine(y); } - const glm::uint8* getBits() const { return _data.constBits(); } + bool isNull() const { return _packedData.isNull() && _floatData.empty(); } + + Format getFormat() const { return _format; } + bool hasAlphaChannel() const { return _packedData.hasAlphaChannel() || _format == Format_RGBAF; } + bool hasFloatFormat() const { return _format == Format_R11G11B10F || _format == Format_RGBAF; } + + glm::uint32 getWidth() const { return (glm::uint32)_dims.x; } + glm::uint32 getHeight() const { return (glm::uint32)_dims.y; } + glm::uvec2 getSize() const { return glm::uvec2(_dims); } + size_t getByteCount() const; + size_t getBytesPerLineCount() const; + + QRgb getPackedPixel(int x, int y) const { + assert(_format != Format_RGBAF); + return _packedData.pixel(x, y); + } + void setPackedPixel(int x, int y, QRgb value) { + assert(_format != Format_RGBAF); + _packedData.setPixel(x, y, value); + } + + glm::vec4 getFloatPixel(int x, int y) const { + assert(_format == Format_RGBAF); + return _floatData[x + y*_dims.x]; + } + void setFloatPixel(int x, int y, const glm::vec4& value) { + assert(_format == Format_RGBAF); + _floatData[x + y * _dims.x] = value; + } + + glm::uint8* editScanLine(int y); + const glm::uint8* getScanLine(int y) const; + glm::uint8* editBits(); + const glm::uint8* getBits() const; Image getScaled(glm::uvec2 newSize, AspectRatioMode ratioMode, TransformationMode transformationMode = Qt::SmoothTransformation) const; Image getConvertedToFormat(Format newFormat) const; @@ -90,7 +122,13 @@ namespace image { private: - QImage _data; + using FloatPixels = std::vector; + + // For QImage supported formats + QImage _packedData; + FloatPixels _floatData; + glm::ivec2 _dims; + Format _format; }; } // namespace image diff --git a/libraries/image/src/image/TextureProcessing.cpp b/libraries/image/src/image/TextureProcessing.cpp index 037229ace5..5b3d546f8e 100644 --- a/libraries/image/src/image/TextureProcessing.cpp +++ b/libraries/image/src/image/TextureProcessing.cpp @@ -29,10 +29,10 @@ #include "OpenEXRReader.h" #endif #include "ImageLogging.h" +#include "CubeMap.h" using namespace gpu; -#define CPU_MIPMAPS 1 #include #undef _CRT_SECURE_NO_WARNINGS @@ -111,11 +111,13 @@ TextureUsage::TextureLoader TextureUsage::getTextureLoaderForType(Type type, con return image::TextureUsage::createEmissiveTextureFromImage; case LIGHTMAP_TEXTURE: return image::TextureUsage::createLightmapTextureFromImage; - case CUBE_TEXTURE: + case SKY_TEXTURE: + return image::TextureUsage::createCubeTextureFromImage; + case AMBIENT_TEXTURE: if (options.value("generateIrradiance", true).toBool()) { - return image::TextureUsage::createCubeTextureFromImage; + return image::TextureUsage::createAmbientCubeTextureAndIrradianceFromImage; } else { - return image::TextureUsage::createCubeTextureFromImageWithoutIrradiance; + return image::TextureUsage::createAmbientCubeTextureFromImage; } case BUMP_TEXTURE: return image::TextureUsage::createNormalTextureFromBumpImage; @@ -186,14 +188,24 @@ gpu::TexturePointer TextureUsage::createMetallicTextureFromImage(Image&& srcImag return process2DTextureGrayscaleFromImage(std::move(srcImage), srcImageName, compress, target, false, abortProcessing); } -gpu::TexturePointer TextureUsage::createCubeTextureFromImage(Image&& srcImage, const std::string& srcImageName, +gpu::TexturePointer TextureUsage::createCubeTextureAndIrradianceFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, BackendTarget target, const std::atomic& abortProcessing) { - return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, true, abortProcessing); + return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, CUBE_GENERATE_IRRADIANCE, abortProcessing); } -gpu::TexturePointer TextureUsage::createCubeTextureFromImageWithoutIrradiance(Image&& srcImage, const std::string& srcImageName, - bool compress, BackendTarget target, const std::atomic& abortProcessing) { - return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, false, abortProcessing); +gpu::TexturePointer TextureUsage::createCubeTextureFromImage(Image&& srcImage, const std::string& srcImageName, + bool compress, BackendTarget target, const std::atomic& abortProcessing) { + return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, CUBE_DEFAULT, abortProcessing); +} + +gpu::TexturePointer TextureUsage::createAmbientCubeTextureFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing) { + return processCubeTextureColorFromImage(std::move(image), srcImageName, compress, target, CUBE_GGX_CONVOLVE, abortProcessing); +} + +gpu::TexturePointer TextureUsage::createAmbientCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing) { + return processCubeTextureColorFromImage(std::move(image), srcImageName, compress, target, CUBE_GENERATE_IRRADIANCE | CUBE_GGX_CONVOLVE, abortProcessing); } static float denormalize(float value, const float minValue) { @@ -215,11 +227,17 @@ static uint32 packR11G11B10F(const glm::vec3& color) { return glm::packF2x11_1x10(ucolor); } +static uint32 packUnorm4x8(const glm::vec3& color) { + return glm::packUnorm4x8(glm::vec4(color, 1.0f)); +} + static std::function getHDRPackingFunction(const gpu::Element& format) { if (format == gpu::Element::COLOR_RGB9E5) { return glm::packF3x9_E1x5; } else if (format == gpu::Element::COLOR_R11G11B10) { return packR11G11B10F; + } else if (format == gpu::Element::COLOR_RGBA_32 || format == gpu::Element::COLOR_SRGBA_32 || format == gpu::Element::COLOR_BGRA_32 || format == gpu::Element::COLOR_SBGRA_32) { + return packUnorm4x8; } else { qCWarning(imagelogging) << "Unknown handler format"; Q_UNREACHABLE(); @@ -231,18 +249,24 @@ std::function getHDRPackingFunction() { return getHDRPackingFunction(GPU_CUBEMAP_HDR_FORMAT); } -std::function getHDRUnpackingFunction() { - if (GPU_CUBEMAP_HDR_FORMAT == gpu::Element::COLOR_RGB9E5) { +std::function getHDRUnpackingFunction(const gpu::Element& format) { + if (format == gpu::Element::COLOR_RGB9E5) { return glm::unpackF3x9_E1x5; - } else if (GPU_CUBEMAP_HDR_FORMAT == gpu::Element::COLOR_R11G11B10) { + } else if (format == gpu::Element::COLOR_R11G11B10) { return glm::unpackF2x11_1x10; + } else if (format == gpu::Element::COLOR_RGBA_32 || format == gpu::Element::COLOR_SRGBA_32 || format == gpu::Element::COLOR_BGRA_32 || format == gpu::Element::COLOR_SBGRA_32) { + return glm::unpackUnorm4x8; } else { - qCWarning(imagelogging) << "Unknown HDR encoding format in Image"; + qCWarning(imagelogging) << "Unknown handler format"; Q_UNREACHABLE(); return nullptr; } } +std::function getHDRUnpackingFunction() { + return getHDRUnpackingFunction(GPU_CUBEMAP_HDR_FORMAT); +} + Image processRawImageData(QIODevice& content, const std::string& filename) { // Help the Image loader by extracting the image file format from the url filename ext. // Some tga are not created properly without it. @@ -490,13 +514,15 @@ struct MyErrorHandler : public nvtt::ErrorHandler { } }; +#if defined(NVTT_API) class SequentialTaskDispatcher : public nvtt::TaskDispatcher { public: - SequentialTaskDispatcher(const std::atomic& abortProcessing) : _abortProcessing(abortProcessing) {}; + SequentialTaskDispatcher(const std::atomic& abortProcessing = false) : _abortProcessing(abortProcessing) { + } const std::atomic& _abortProcessing; - virtual void dispatch(nvtt::Task* task, void* context, int count) override { + void dispatch(nvtt::Task* task, void* context, int count) override { for (int i = 0; i < count; i++) { if (!_abortProcessing.load()) { task(context, i); @@ -506,108 +532,137 @@ public: } } }; +#endif -void generateHDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing, int face) { - // Take a local copy to force move construction - // https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#f18-for-consume-parameters-pass-by-x-and-stdmove-the-parameter - Image localCopy = std::move(image); +void convertToFloatFromPacked(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat, + glm::vec4* output, size_t outputLinePixelStride) { + glm::vec4* outputIt; + auto unpackFunc = getHDRUnpackingFunction(sourceFormat); - assert(localCopy.getFormat() == Image::Format_PACKED_FLOAT); - - const int width = localCopy.getWidth(), height = localCopy.getHeight(); - std::vector data; - std::vector::iterator dataIt; - auto mipFormat = texture->getStoredMipFormat(); - std::function unpackFunc = getHDRUnpackingFunction(); - - nvtt::InputFormat inputFormat = nvtt::InputFormat_RGBA_32F; - nvtt::WrapMode wrapMode = nvtt::WrapMode_Mirror; - nvtt::AlphaMode alphaMode = nvtt::AlphaMode_None; - - nvtt::CompressionOptions compressionOptions; - compressionOptions.setQuality(nvtt::Quality_Production); - - // TODO: gles: generate ETC mips instead? - if (mipFormat == gpu::Element::COLOR_COMPRESSED_BCX_HDR_RGB) { - compressionOptions.setFormat(nvtt::Format_BC6); - } else if (mipFormat == gpu::Element::COLOR_RGB9E5) { - compressionOptions.setFormat(nvtt::Format_RGB); - compressionOptions.setPixelType(nvtt::PixelType_Float); - compressionOptions.setPixelFormat(32, 32, 32, 0); - } else if (mipFormat == gpu::Element::COLOR_R11G11B10) { - compressionOptions.setFormat(nvtt::Format_RGB); - compressionOptions.setPixelType(nvtt::PixelType_Float); - compressionOptions.setPixelFormat(32, 32, 32, 0); - } else { - qCWarning(imagelogging) << "Unknown mip format"; - Q_UNREACHABLE(); - return; - } - - data.resize(width * height); - dataIt = data.begin(); + outputLinePixelStride -= width; + outputIt = output; for (auto lineNb = 0; lineNb < height; lineNb++) { - const uint32* srcPixelIt = reinterpret_cast(localCopy.getScanLine(lineNb)); + const uint32* srcPixelIt = reinterpret_cast(source + lineNb * srcLineByteStride); const uint32* srcPixelEnd = srcPixelIt + width; while (srcPixelIt < srcPixelEnd) { - *dataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f); + *outputIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f); ++srcPixelIt; - ++dataIt; + ++outputIt; } + outputIt += outputLinePixelStride; } - assert(dataIt == data.end()); +} - // We're done with the localCopy, free up the memory to avoid bloating the heap - localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one. +void convertToPackedFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat, + const glm::vec4* source, size_t srcLinePixelStride) { + const glm::vec4* sourceIt; + auto packFunc = getHDRPackingFunction(outputFormat); + + srcLinePixelStride -= width; + sourceIt = source; + for (auto lineNb = 0; lineNb < height; lineNb++) { + uint32* outPixelIt = reinterpret_cast(output + lineNb * outputLineByteStride); + uint32* outPixelEnd = outPixelIt + width; + + while (outPixelIt < outPixelEnd) { + *outPixelIt = packFunc(*sourceIt); + ++outPixelIt; + ++sourceIt; + } + sourceIt += srcLinePixelStride; + } +} + +nvtt::OutputHandler* getNVTTCompressionOutputHandler(gpu::Texture* outputTexture, int face, nvtt::CompressionOptions& compressionOptions) { + auto outputFormat = outputTexture->getStoredMipFormat(); + bool useNVTT = false; + + compressionOptions.setQuality(nvtt::Quality_Production); + + if (outputFormat == gpu::Element::COLOR_COMPRESSED_BCX_HDR_RGB) { + useNVTT = true; + compressionOptions.setFormat(nvtt::Format_BC6); + } else if (outputFormat == gpu::Element::COLOR_RGB9E5) { + compressionOptions.setFormat(nvtt::Format_RGB); + compressionOptions.setPixelType(nvtt::PixelType_Float); + compressionOptions.setPixelFormat(32, 32, 32, 0); + } else if (outputFormat == gpu::Element::COLOR_R11G11B10) { + compressionOptions.setFormat(nvtt::Format_RGB); + compressionOptions.setPixelType(nvtt::PixelType_Float); + compressionOptions.setPixelFormat(32, 32, 32, 0); + } else if (outputFormat == gpu::Element::COLOR_SRGBA_32) { + useNVTT = true; + compressionOptions.setFormat(nvtt::Format_RGB); + compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm); + compressionOptions.setPixelFormat(8, 8, 8, 0); + } else { + qCWarning(imagelogging) << "Unknown mip format"; + Q_UNREACHABLE(); + return nullptr; + } + + if (!useNVTT) { + // Don't use NVTT (at least version 2.1) as it outputs wrong RGB9E5 and R11G11B10F values from floats + return new PackedFloatOutputHandler(outputTexture, face, outputFormat); + } else { + return new OutputHandler(outputTexture, face); + } +} + +void convertImageToHDRTexture(gpu::Texture* texture, Image&& image, BackendTarget target, int baseMipLevel, bool buildMips, const std::atomic& abortProcessing, int face) { + assert(image.hasFloatFormat()); + + Image localCopy = image.getConvertedToFormat(Image::Format_RGBAF); + + const int width = localCopy.getWidth(); + const int height = localCopy.getHeight(); nvtt::OutputOptions outputOptions; outputOptions.setOutputHeader(false); - std::unique_ptr outputHandler; + + nvtt::CompressionOptions compressionOptions; + std::unique_ptr outputHandler{ getNVTTCompressionOutputHandler(texture, face, compressionOptions) }; + MyErrorHandler errorHandler; outputOptions.setErrorHandler(&errorHandler); nvtt::Context context; - int mipLevel = 0; - - if (mipFormat == gpu::Element::COLOR_RGB9E5 || mipFormat == gpu::Element::COLOR_R11G11B10) { - // Don't use NVTT (at least version 2.1) as it outputs wrong RGB9E5 and R11G11B10F values from floats - outputHandler.reset(new PackedFloatOutputHandler(texture, face, mipFormat)); - } else { - outputHandler.reset(new OutputHandler(texture, face)); - } + int mipLevel = baseMipLevel; outputOptions.setOutputHandler(outputHandler.get()); nvtt::Surface surface; - surface.setImage(inputFormat, width, height, 1, &(*data.begin())); - surface.setAlphaMode(alphaMode); - surface.setWrapMode(wrapMode); + surface.setImage(nvtt::InputFormat_RGBA_32F, width, height, 1, localCopy.getBits()); + surface.setAlphaMode(nvtt::AlphaMode_None); + surface.setWrapMode(nvtt::WrapMode_Mirror); SequentialTaskDispatcher dispatcher(abortProcessing); nvtt::Compressor compressor; context.setTaskDispatcher(&dispatcher); context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); - while (surface.canMakeNextMipmap() && !abortProcessing.load()) { - surface.buildNextMipmap(nvtt::MipmapFilter_Box); - context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); + if (buildMips) { + while (surface.canMakeNextMipmap() && !abortProcessing.load()) { + surface.buildNextMipmap(nvtt::MipmapFilter_Box); + context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); + } } } -void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing, int face) { +void convertImageToLDRTexture(gpu::Texture* texture, Image&& image, BackendTarget target, int baseMipLevel, bool buildMips, const std::atomic& abortProcessing, int face) { // Take a local copy to force move construction // https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#f18-for-consume-parameters-pass-by-x-and-stdmove-the-parameter Image localCopy = std::move(image); - assert(localCopy.getFormat() != Image::Format_PACKED_FLOAT); - if (localCopy.getFormat() != Image::Format_ARGB32) { - localCopy = localCopy.getConvertedToFormat(Image::Format_ARGB32); - } - const int width = localCopy.getWidth(), height = localCopy.getHeight(); auto mipFormat = texture->getStoredMipFormat(); + int mipLevel = baseMipLevel; if (target != BackendTarget::GLES32) { + if (localCopy.getFormat() != Image::Format_ARGB32) { + localCopy = localCopy.getConvertedToFormat(Image::Format_ARGB32); + } + const void* data = static_cast(localCopy.getBits()); nvtt::TextureType textureType = nvtt::TextureType_2D; nvtt::InputFormat inputFormat = nvtt::InputFormat_BGRA_8UB; @@ -618,23 +673,22 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, float inputGamma = 2.2f; float outputGamma = 2.2f; - nvtt::InputOptions inputOptions; - inputOptions.setTextureLayout(textureType, width, height); + nvtt::Surface surface; + surface.setImage(inputFormat, width, height, 1, data); + surface.setAlphaMode(alphaMode); + surface.setWrapMode(wrapMode); - inputOptions.setMipmapData(data, width, height); - // setMipmapData copies the memory, so free up the memory afterward to avoid bloating the heap + // Surface copies the memory, so free up the memory afterward to avoid bloating the heap data = nullptr; localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one. + nvtt::InputOptions inputOptions; + inputOptions.setTextureLayout(textureType, width, height); + inputOptions.setFormat(inputFormat); inputOptions.setGamma(inputGamma, outputGamma); - inputOptions.setAlphaMode(alphaMode); - inputOptions.setWrapMode(wrapMode); inputOptions.setRoundMode(roundMode); - inputOptions.setMipmapGeneration(true); - inputOptions.setMipmapFilter(nvtt::MipmapFilter_Box); - nvtt::CompressionOptions compressionOptions; compressionOptions.setQuality(nvtt::Quality_Production); @@ -718,11 +772,22 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, outputOptions.setErrorHandler(&errorHandler); SequentialTaskDispatcher dispatcher(abortProcessing); - nvtt::Compressor compressor; - compressor.setTaskDispatcher(&dispatcher); - compressor.process(inputOptions, compressionOptions, outputOptions); + nvtt::Compressor context; + + context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); + if (buildMips) { + while (surface.canMakeNextMipmap() && !abortProcessing.load()) { + surface.buildNextMipmap(nvtt::MipmapFilter_Box); + context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); + } + } } else { - int numMips = 1 + (int)log2(std::max(width, height)); + int numMips = 1; + + if (buildMips) { + numMips += (int)log2(std::max(width, height)) - baseMipLevel; + } + assert(numMips > 0); Etc::RawImage *mipMaps = new Etc::RawImage[numMips]; Etc::Image::Format etcFormat = Etc::Image::Format::DEFAULT; @@ -756,23 +821,13 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const float effort = 1.0f; const int numEncodeThreads = 4; int encodingTime; - const float MAX_COLOR = 255.0f; - std::vector floatData; - floatData.resize(width * height); - for (int y = 0; y < height; y++) { - QRgb *line = (QRgb *)localCopy.editScanLine(y); - for (int x = 0; x < width; x++) { - QRgb &pixel = line[x]; - floatData[x + y * width] = vec4(qRed(pixel), qGreen(pixel), qBlue(pixel), qAlpha(pixel)) / MAX_COLOR; - } + if (localCopy.getFormat() != Image::Format_RGBAF) { + localCopy = localCopy.getConvertedToFormat(Image::Format_RGBAF); } - // free up the memory afterward to avoid bloating the heap - localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one. - Etc::EncodeMipmaps( - (float *)floatData.data(), width, height, + (float *)localCopy.editBits(), width, height, etcFormat, errorMetric, effort, numEncodeThreads, numEncodeThreads, numMips, Etc::FILTER_WRAP_NONE, @@ -782,9 +837,9 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, for (int i = 0; i < numMips; i++) { if (mipMaps[i].paucEncodingBits.get()) { if (face >= 0) { - texture->assignStoredMipFace(i, face, mipMaps[i].uiEncodingBitsBytes, static_cast(mipMaps[i].paucEncodingBits.get())); + texture->assignStoredMipFace(i+baseMipLevel, face, mipMaps[i].uiEncodingBitsBytes, static_cast(mipMaps[i].paucEncodingBits.get())); } else { - texture->assignStoredMip(i, mipMaps[i].uiEncodingBitsBytes, static_cast(mipMaps[i].paucEncodingBits.get())); + texture->assignStoredMip(i + baseMipLevel, mipMaps[i].uiEncodingBitsBytes, static_cast(mipMaps[i].paucEncodingBits.get())); } } } @@ -795,22 +850,27 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, #endif -void generateMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing = false, int face = -1) { -#if CPU_MIPMAPS - PROFILE_RANGE(resource_parse, "generateMips"); +void convertImageToTexture(gpu::Texture* texture, Image& image, BackendTarget target, int face, int baseMipLevel, bool buildMips, const std::atomic& abortProcessing) { + PROFILE_RANGE(resource_parse, "convertToTextureWithMips"); if (target == BackendTarget::GLES32) { - generateLDRMips(texture, std::move(image), target, abortProcessing, face); + convertImageToLDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face); } else { - if (image.getFormat() == Image::Format_PACKED_FLOAT) { - generateHDRMips(texture, std::move(image), target, abortProcessing, face); + if (image.hasFloatFormat()) { + convertImageToHDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face); } else { - generateLDRMips(texture, std::move(image), target, abortProcessing, face); + convertImageToLDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face); } } -#else - texture->setAutoGenerateMips(true); -#endif +} + +void convertToTextureWithMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing, int face) { + convertImageToTexture(texture, image, target, face, 0, true, abortProcessing); +} + +void convertToTexture(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing, int face, int mipLevel) { + PROFILE_RANGE(resource_parse, "convertToTexture"); + convertImageToTexture(texture, image, target, face, mipLevel, false, abortProcessing); } void processTextureAlpha(const Image& srcImage, bool& validAlpha, bool& alphaAsMask) { @@ -900,7 +960,7 @@ gpu::TexturePointer TextureUsage::process2DTextureColorFromImage(Image&& srcImag theTexture->setUsage(usage.build()); theTexture->setStoredMipFormat(formatMip); theTexture->assignStoredMip(0, image.getByteCount(), image.getBits()); - generateMips(theTexture.get(), std::move(image), target, abortProcessing); + convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing); } return theTexture; @@ -944,14 +1004,14 @@ Image processBumpMap(Image&& image) { const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1); // surrounding pixels - const QRgb topLeft = localCopy.getPixel(iPrevClamped, jPrevClamped); - const QRgb top = localCopy.getPixel(iPrevClamped, j); - const QRgb topRight = localCopy.getPixel(iPrevClamped, jNextClamped); - const QRgb right = localCopy.getPixel(i, jNextClamped); - const QRgb bottomRight = localCopy.getPixel(iNextClamped, jNextClamped); - const QRgb bottom = localCopy.getPixel(iNextClamped, j); - const QRgb bottomLeft = localCopy.getPixel(iNextClamped, jPrevClamped); - const QRgb left = localCopy.getPixel(i, jPrevClamped); + const QRgb topLeft = localCopy.getPackedPixel(iPrevClamped, jPrevClamped); + const QRgb top = localCopy.getPackedPixel(iPrevClamped, j); + const QRgb topRight = localCopy.getPackedPixel(iPrevClamped, jNextClamped); + const QRgb right = localCopy.getPackedPixel(i, jNextClamped); + const QRgb bottomRight = localCopy.getPackedPixel(iNextClamped, jNextClamped); + const QRgb bottom = localCopy.getPackedPixel(iNextClamped, j); + const QRgb bottomLeft = localCopy.getPackedPixel(iNextClamped, jPrevClamped); + const QRgb left = localCopy.getPackedPixel(i, jPrevClamped); // take their gray intensities // since it's a grayscale image, the value of each component RGB is the same @@ -974,12 +1034,13 @@ Image processBumpMap(Image&& image) { // convert to rgb from the value obtained computing the filter QRgb qRgbValue = qRgba(mapComponent(v.z), mapComponent(v.y), mapComponent(v.x), 1.0); - result.setPixel(i, j, qRgbValue); + result.setPackedPixel(i, j, qRgbValue); } } return result; } + gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, BackendTarget target, bool isBumpMap, const std::atomic& abortProcessing) { @@ -1014,7 +1075,7 @@ gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(Image&& src theTexture->setSource(srcImageName); theTexture->setStoredMipFormat(formatMip); theTexture->assignStoredMip(0, image.getByteCount(), image.getBits()); - generateMips(theTexture.get(), std::move(image), target, abortProcessing); + convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing); } return theTexture; @@ -1054,7 +1115,7 @@ gpu::TexturePointer TextureUsage::process2DTextureGrayscaleFromImage(Image&& src theTexture->setSource(srcImageName); theTexture->setStoredMipFormat(formatMip); theTexture->assignStoredMip(0, image.getByteCount(), image.getBits()); - generateMips(theTexture.get(), std::move(image), target, abortProcessing); + convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing); } return theTexture; @@ -1416,8 +1477,41 @@ Image convertToHDRFormat(Image&& srcImage, gpu::Element format) { return hdrImage; } +static bool isLinearTextureFormat(gpu::Element format) { + return !((format == gpu::Element::COLOR_SRGBA_32) + || (format == gpu::Element::COLOR_SBGRA_32) + || (format == gpu::Element::COLOR_SR_8) + || (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGB) + || (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA_MASK) + || (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA) + || (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA_HIGH) + || (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGB) + || (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGBA) + || (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGB_PUNCHTHROUGH_ALPHA)); +} + +void convolveForGGX(const std::vector& faces, gpu::Texture* texture, BackendTarget target, const std::atomic& abortProcessing = false) { + PROFILE_RANGE(resource_parse, "convolveForGGX"); + CubeMap source(faces, texture->getNumMips(), abortProcessing); + CubeMap output(texture->getWidth(), texture->getHeight(), texture->getNumMips()); + + if (!faces.front().hasFloatFormat()) { + source.applyGamma(2.2f); + } + source.convolveForGGX(output, abortProcessing); + if (!isLinearTextureFormat(texture->getTexelFormat())) { + output.applyGamma(1.0f/2.2f); + } + + for (int face = 0; face < 6; face++) { + for (gpu::uint16 mipLevel = 0; mipLevel < output.getMipCount(); mipLevel++) { + convertToTexture(texture, output.getFaceImage(mipLevel, face), target, abortProcessing, face, mipLevel); + } + } +} + gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, - bool compress, BackendTarget target, bool generateIrradiance, + bool compress, BackendTarget target, int options, const std::atomic& abortProcessing) { PROFILE_RANGE(resource_parse, "processCubeTextureColorFromImage"); @@ -1491,7 +1585,7 @@ gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcIm theTexture->setStoredMipFormat(formatMip); // Generate irradiance while we are at it - if (generateIrradiance) { + if (options & CUBE_GENERATE_IRRADIANCE) { PROFILE_RANGE(resource_parse, "generateIrradiance"); gpu::Element irradianceFormat; // TODO: we could locally compress the irradiance texture on Android, but we don't need to @@ -1513,9 +1607,16 @@ gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcIm auto irradiance = irradianceTexture->getIrradiance(); theTexture->overrideIrradiance(irradiance); } - - for (uint8 face = 0; face < faces.size(); ++face) { - generateMips(theTexture.get(), std::move(faces[face]), target, abortProcessing, face); + + if (options & CUBE_GGX_CONVOLVE) { + // Performs and convolution AND mip map generation + convolveForGGX(faces, theTexture.get(), target, abortProcessing); + } else { + // Create mip maps and compress to final format in one go + for (uint8 face = 0; face < faces.size(); ++face) { + // Force building the mip maps right now on CPU if we are convolving for GGX later on + convertToTextureWithMips(theTexture.get(), std::move(faces[face]), target, abortProcessing, face); + } } } diff --git a/libraries/image/src/image/TextureProcessing.h b/libraries/image/src/image/TextureProcessing.h index 72e2400721..6f93af1b29 100644 --- a/libraries/image/src/image/TextureProcessing.h +++ b/libraries/image/src/image/TextureProcessing.h @@ -17,11 +17,16 @@ #include #include "Image.h" +#include namespace image { std::function getHDRPackingFunction(); std::function getHDRUnpackingFunction(); + void convertToFloatFromPacked(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat, + glm::vec4* output, size_t outputLinePixelStride); + void convertToPackedFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat, + const glm::vec4* source, size_t srcLinePixelStride); namespace TextureUsage { @@ -62,7 +67,8 @@ enum Type { ROUGHNESS_TEXTURE, GLOSS_TEXTURE, EMISSIVE_TEXTURE, - CUBE_TEXTURE, + SKY_TEXTURE, + AMBIENT_TEXTURE, OCCLUSION_TEXTURE, SCATTERING_TEXTURE = OCCLUSION_TEXTURE, LIGHTMAP_TEXTURE, @@ -92,8 +98,12 @@ gpu::TexturePointer createMetallicTextureFromImage(Image&& image, const std::str bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); gpu::TexturePointer createCubeTextureFromImage(Image&& image, const std::string& srcImageName, bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); -gpu::TexturePointer createCubeTextureFromImageWithoutIrradiance(Image&& image, const std::string& srcImageName, - bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); +gpu::TexturePointer createCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); +gpu::TexturePointer createAmbientCubeTextureFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); +gpu::TexturePointer createAmbientCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); gpu::TexturePointer createLightmapTextureFromImage(Image&& image, const std::string& srcImageName, bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); gpu::TexturePointer process2DTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, @@ -102,9 +112,14 @@ gpu::TexturePointer process2DTextureNormalMapFromImage(Image&& srcImage, const s gpu::BackendTarget target, bool isBumpMap, const std::atomic& abortProcessing); gpu::TexturePointer process2DTextureGrayscaleFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, gpu::BackendTarget target, bool isInvertedPixels, const std::atomic& abortProcessing); -gpu::TexturePointer processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, - gpu::BackendTarget target, bool generateIrradiance, const std::atomic& abortProcessing); +enum CubeTextureOptions { + CUBE_DEFAULT = 0x0, + CUBE_GENERATE_IRRADIANCE = 0x1, + CUBE_GGX_CONVOLVE = 0x2 +}; +gpu::TexturePointer processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, + gpu::BackendTarget target, int option, const std::atomic& abortProcessing); } // namespace TextureUsage const QStringList getSupportedFormats(); @@ -113,6 +128,9 @@ gpu::TexturePointer processImage(std::shared_ptr content, const std:: int maxNumPixels, TextureUsage::Type textureType, bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing = false); +void convertToTextureWithMips(gpu::Texture* texture, Image&& image, gpu::BackendTarget target, const std::atomic& abortProcessing = false, int face = -1); +void convertToTexture(gpu::Texture* texture, Image&& image, gpu::BackendTarget target, const std::atomic& abortProcessing = false, int face = -1, int mipLevel = 0); + } // namespace image #endif // hifi_image_TextureProcessing_h diff --git a/libraries/material-networking/src/material-networking/TextureCache.cpp b/libraries/material-networking/src/material-networking/TextureCache.cpp index 6af59930fa..6ceb5d328a 100644 --- a/libraries/material-networking/src/material-networking/TextureCache.cpp +++ b/libraries/material-networking/src/material-networking/TextureCache.cpp @@ -224,10 +224,14 @@ NetworkTexturePointer TextureCache::getTexture(const QUrl& url, image::TextureUs return getResourceTexture(url); } auto modifiedUrl = url; - if (type == image::TextureUsage::CUBE_TEXTURE) { + if (type == image::TextureUsage::SKY_TEXTURE) { QUrlQuery query { url.query() }; query.addQueryItem("skybox", ""); modifiedUrl.setQuery(query.toString()); + } else if (type == image::TextureUsage::AMBIENT_TEXTURE) { + QUrlQuery query{ url.query() }; + query.addQueryItem("ambient", ""); + modifiedUrl.setQuery(query.toString()); } TextureExtra extra = { type, content, maxNumPixels, sourceChannel }; return ResourceCache::getResource(modifiedUrl, QUrl(), &extra, std::hash()(extra)).staticCast(); @@ -283,7 +287,8 @@ gpu::TexturePointer getFallbackTextureForType(image::TextureUsage::Type type) { case image::TextureUsage::BUMP_TEXTURE: case image::TextureUsage::SPECULAR_TEXTURE: case image::TextureUsage::GLOSS_TEXTURE: - case image::TextureUsage::CUBE_TEXTURE: + case image::TextureUsage::SKY_TEXTURE: + case image::TextureUsage::AMBIENT_TEXTURE: case image::TextureUsage::STRICT_TEXTURE: default: break; @@ -408,7 +413,7 @@ void NetworkTexture::setExtra(void* extra) { _shouldFailOnRedirect = _currentlyLoadingResourceType != ResourceType::KTX; - if (_type == image::TextureUsage::CUBE_TEXTURE) { + if (_type == image::TextureUsage::SKY_TEXTURE) { setLoadPriority(this, SKYBOX_LOAD_PRIORITY); } else if (_currentlyLoadingResourceType == ResourceType::KTX) { setLoadPriority(this, HIGH_MIPS_LOAD_PRIORITY); diff --git a/libraries/render-utils/src/AntialiasingEffect.cpp b/libraries/render-utils/src/AntialiasingEffect.cpp index 17c13df19a..a445ea2343 100644 --- a/libraries/render-utils/src/AntialiasingEffect.cpp +++ b/libraries/render-utils/src/AntialiasingEffect.cpp @@ -26,7 +26,7 @@ #include "ViewFrustum.h" #include "GeometryCache.h" #include "FramebufferCache.h" - +#include "RandomAndNoise.h" namespace ru { using render_utils::slot::texture::Texture; @@ -359,36 +359,11 @@ int JitterSampleConfig::play() { return _state; } -template -class Halton { -public: - - float eval(int index) const { - float f = 1.0f; - float r = 0.0f; - float invB = 1.0f / (float)B; - index++; // Indices start at 1, not 0 - - while (index > 0) { - f = f * invB; - r = r + f * (float)(index % B); - index = index / B; - - } - - return r; - } - -}; - - JitterSample::SampleSequence::SampleSequence(){ // Halton sequence (2,3) - Halton<2> genX; - Halton<3> genY; for (int i = 0; i < SEQUENCE_LENGTH; i++) { - offsets[i] = glm::vec2(genX.eval(i), genY.eval(i)); + offsets[i] = glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i)); offsets[i] -= vec2(0.5f); } offsets[SEQUENCE_LENGTH] = glm::vec2(0.0f); diff --git a/libraries/render-utils/src/DeferredLightingEffect.cpp b/libraries/render-utils/src/DeferredLightingEffect.cpp index ab9dea2325..b8c720e9ca 100644 --- a/libraries/render-utils/src/DeferredLightingEffect.cpp +++ b/libraries/render-utils/src/DeferredLightingEffect.cpp @@ -365,6 +365,7 @@ void PrepareDeferred::run(const RenderContextPointer& renderContext, const Input // For the rest of the rendering, bind the lighting model batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); }); } @@ -416,6 +417,7 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext, // THe lighting model batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); // Subsurface scattering specific if (surfaceGeometryFramebuffer) { @@ -642,25 +644,37 @@ void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs config->setGPUBatchRunTime(_gpuTimer->getGPUAverage(), _gpuTimer->getBatchAverage()); } - - void DefaultLightingSetup::run(const RenderContextPointer& renderContext) { if (!_defaultLight || !_defaultBackground) { + auto defaultSkyboxURL = PathUtils::resourcesUrl() + "images/Default-Sky-9-cubemap/Default-Sky-9-cubemap.texmeta.json"; + if (!_defaultSkyboxNetworkTexture) { PROFILE_RANGE(render, "Process Default Skybox"); _defaultSkyboxNetworkTexture = DependencyManager::get()->getTexture( - PathUtils::resourcesUrl() + "images/Default-Sky-9-cubemap/Default-Sky-9-cubemap.texmeta.json", image::TextureUsage::CUBE_TEXTURE); + defaultSkyboxURL, image::TextureUsage::SKY_TEXTURE); + } + + if (!_defaultAmbientNetworkTexture) { + PROFILE_RANGE(render, "Process Default Ambient map"); + _defaultAmbientNetworkTexture = DependencyManager::get()->getTexture( + defaultSkyboxURL, image::TextureUsage::AMBIENT_TEXTURE); } if (_defaultSkyboxNetworkTexture && _defaultSkyboxNetworkTexture->isLoaded() && _defaultSkyboxNetworkTexture->getGPUTexture()) { - _defaultSkyboxAmbientTexture = _defaultSkyboxNetworkTexture->getGPUTexture(); - _defaultSkybox->setCubemap(_defaultSkyboxAmbientTexture); + _defaultSkybox->setCubemap(_defaultSkyboxNetworkTexture->getGPUTexture()); } else { // Don't do anything until the skybox has loaded return; } + if (_defaultAmbientNetworkTexture && _defaultAmbientNetworkTexture->isLoaded() && _defaultAmbientNetworkTexture->getGPUTexture()) { + _defaultAmbientTexture = _defaultAmbientNetworkTexture->getGPUTexture(); + } else { + // Don't do anything until the ambient box has been loaded + return; + } + auto lightStage = renderContext->_scene->getStage(); if (lightStage) { @@ -674,8 +688,8 @@ void DefaultLightingSetup::run(const RenderContextPointer& renderContext) { lp->setAmbientSpherePreset(gpu::SphericalHarmonics::Preset::OLD_TOWN_SQUARE); lp->setAmbientIntensity(0.5f); - lp->setAmbientMap(_defaultSkyboxAmbientTexture); - auto irradianceSH = _defaultSkyboxAmbientTexture->getIrradiance(); + lp->setAmbientMap(_defaultAmbientTexture); + auto irradianceSH = _defaultAmbientTexture->getIrradiance(); if (irradianceSH) { lp->setAmbientSphere((*irradianceSH)); } diff --git a/libraries/render-utils/src/DeferredLightingEffect.h b/libraries/render-utils/src/DeferredLightingEffect.h index f4935000ef..1cc6ca4767 100644 --- a/libraries/render-utils/src/DeferredLightingEffect.h +++ b/libraries/render-utils/src/DeferredLightingEffect.h @@ -212,7 +212,8 @@ protected: HazeStage::Index _defaultHazeID{ HazeStage::INVALID_INDEX }; graphics::SkyboxPointer _defaultSkybox { new ProceduralSkybox() }; NetworkTexturePointer _defaultSkyboxNetworkTexture; - gpu::TexturePointer _defaultSkyboxAmbientTexture; + NetworkTexturePointer _defaultAmbientNetworkTexture; + gpu::TexturePointer _defaultAmbientTexture; }; #endif // hifi_DeferredLightingEffect_h diff --git a/libraries/render-utils/src/LightAmbient.slh b/libraries/render-utils/src/LightAmbient.slh index 4ea9c0cd4c..cb76a8e545 100644 --- a/libraries/render-utils/src/LightAmbient.slh +++ b/libraries/render-utils/src/LightAmbient.slh @@ -17,8 +17,9 @@ vec4 evalSkyboxLight(vec3 direction, float lod) { #if !defined(GL_ES) float filterLod = textureQueryLod(skyboxMap, direction).x; - // Keep texture filtering LOD as limit to prevent aliasing on specular reflection - lod = max(lod, filterLod); + // Keep texture filtering LOD as limit to prevent aliasing on specular reflection, but add + // a bias to limit overblurring with convolved maps + lod = max(lod, filterLod-2); #endif return textureLod(skyboxMap, direction, lod); @@ -26,16 +27,30 @@ vec4 evalSkyboxLight(vec3 direction, float lod) { <@endfunc@> <@func declareEvalAmbientSpecularIrradiance(supportAmbientSphere, supportAmbientMap, supportIfAmbientMapElseAmbientSphere)@> +LAYOUT(binding=RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL) uniform sampler2D ambientFresnelLUT; -vec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) { +vec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float roughness) { +#if RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT + vec2 ambientFresnel = texture(ambientFresnelLUT, vec2(roughness, ndotd)).xy; + return fresnelColor * ambientFresnel.x + vec3(ambientFresnel.y); +#else + float gloss = 1.0-roughness; float f = pow(1.0 - ndotd, 5.0); return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f; +#endif } <@if supportAmbientMap@> <$declareSkyboxMap()$> <@endif@> +float getMipLevelFromRoughness(float roughness, float lodCount) { + // This should match the value in the CubeMap::convolveForGGX method (CubeMap.cpp) + float ROUGHNESS_1_MIP_RESOLUTION = 1.5; + float deltaLod = lodCount - ROUGHNESS_1_MIP_RESOLUTION; + return deltaLod * (sqrt(1.0+24.0*roughness)-1.0) / 4.0; +} + vec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) { vec3 specularLight; <@if supportIfAmbientMapElseAmbientSphere@> @@ -43,10 +58,10 @@ vec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, ve <@endif@> <@if supportAmbientMap@> { - float levels = getLightAmbientMapNumMips(ambient); - float m = 12.0 / (1.0+11.0*surface.roughness); - float lod = levels - m; + float levelCount = getLightAmbientMapNumMips(ambient); + float lod = getMipLevelFromRoughness(surface.roughness, levelCount); lod = max(lod, 0.0); + specularLight = evalSkyboxLight(lightDir, lod).xyz; } <@endif@> @@ -87,7 +102,7 @@ void evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambie vec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz; <@endif@> - vec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness); + vec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, surface.roughness); diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz; diff --git a/libraries/render-utils/src/LightingModel.cpp b/libraries/render-utils/src/LightingModel.cpp index 2a85fcd960..5fcec1f033 100644 --- a/libraries/render-utils/src/LightingModel.cpp +++ b/libraries/render-utils/src/LightingModel.cpp @@ -9,10 +9,88 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // #include "LightingModel.h" +#include "RandomAndNoise.h" +#include "BRDF.h" + +#include "render-utils/ShaderConstants.h" + +#include + +gpu::TexturePointer LightingModel::_ambientFresnelLUT; LightingModel::LightingModel() { Parameters parameters; _parametersBuffer = gpu::BufferView(std::make_shared(sizeof(Parameters), (const gpu::Byte*) ¶meters, sizeof(Parameters))); + +#if RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT + if (!_ambientFresnelLUT) { + // Code taken from the IntegrateBRDF method as described in this talk : + // https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf + const auto N_roughness = 32; + const auto N_NdotV = 256; + + using LUTVector = std::vector; + using LUTValueType = LUTVector::value_type::value_type; + + LUTVector lut(N_roughness * N_NdotV); + + _ambientFresnelLUT = gpu::Texture::create2D(gpu::Element{ gpu::VEC2, gpu::NUINT16, gpu::XY }, N_roughness, N_NdotV, 1U, + gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP)); + + tbb::parallel_for(tbb::blocked_range2d(0, N_NdotV, 8, 0, N_roughness, 8), [&](const tbb::blocked_range2d& range) { + auto roughnessRange = range.cols(); + auto ndotvRange = range.rows(); + + for (auto j = ndotvRange.begin(); j < ndotvRange.end(); j++) { + const float NdotV = j / float(N_NdotV - 1); + + glm::vec3 V; + V.x = std::sqrt(1.0f - NdotV * NdotV); // sin + V.y = 0; + V.z = NdotV; // cos + + for (auto k = roughnessRange.begin(); k < roughnessRange.end(); k++) { + const float roughness = k / float(N_roughness - 1); + const float alpha = roughness * roughness; + const float alphaSquared = alpha * alpha; + + float A = 0.0f; + float B = 0.0f; + + const uint NumSamples = 1024; + for (uint i = 0; i < NumSamples; i++) { + glm::vec2 Xi = hammersley::evaluate(i, NumSamples); + glm::vec3 H = ggx::sample(Xi, roughness); + float VdotH = glm::dot(V, H); + glm::vec3 L = 2.0f * VdotH * H - V; + float NdotL = L.z; + + if (NdotL > 0.0f) { + VdotH = glm::clamp(VdotH, 0.0f, 1.0f); + + float NdotH = glm::clamp(H.z, 0.0f, 1.0f); + float G = smith::evaluateFastWithoutNdotV(alphaSquared, NdotV, NdotL); + float G_Vis = (G * VdotH) / NdotH; + float Fc = std::pow(1.0f - VdotH, 5.0f); + + A += (1.0f - Fc) * G_Vis; + B += Fc * G_Vis; + } + } + + A /= NumSamples; + B /= NumSamples; + + auto& lutValue = lut[k + j * N_roughness]; + lutValue.x = (LUTValueType)(glm::min(1.0f, A) * std::numeric_limits::max()); + lutValue.y = (LUTValueType)(glm::min(1.0f, B) * std::numeric_limits::max()); + } + } + }); + + _ambientFresnelLUT->assignStoredMip(0, N_roughness * N_NdotV * sizeof(LUTVector::value_type), (const gpu::Byte*)lut.data()); + } +#endif } void LightingModel::setUnlit(bool enable) { diff --git a/libraries/render-utils/src/LightingModel.h b/libraries/render-utils/src/LightingModel.h index f6bd6dcd46..a488abcb09 100644 --- a/libraries/render-utils/src/LightingModel.h +++ b/libraries/render-utils/src/LightingModel.h @@ -83,6 +83,7 @@ public: bool isShadowEnabled() const; UniformBufferView getParametersBuffer() const { return _parametersBuffer; } + gpu::TexturePointer getAmbientFresnelLUT() const { return _ambientFresnelLUT; } protected: @@ -126,6 +127,7 @@ protected: Parameters() {} }; UniformBufferView _parametersBuffer; + static gpu::TexturePointer _ambientFresnelLUT; }; using LightingModelPointer = std::shared_ptr; diff --git a/libraries/render-utils/src/RenderCommonTask.cpp b/libraries/render-utils/src/RenderCommonTask.cpp index b1a62625b2..18532b7a66 100644 --- a/libraries/render-utils/src/RenderCommonTask.cpp +++ b/libraries/render-utils/src/RenderCommonTask.cpp @@ -94,6 +94,7 @@ void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs& // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); if (_opaquePass) { renderStateSortShapes(renderContext, _shapePlumber, inItems, _maxDrawn); diff --git a/libraries/render-utils/src/RenderDeferredTask.cpp b/libraries/render-utils/src/RenderDeferredTask.cpp index ea2b05a6fa..d52f1da043 100644 --- a/libraries/render-utils/src/RenderDeferredTask.cpp +++ b/libraries/render-utils/src/RenderDeferredTask.cpp @@ -471,6 +471,7 @@ void RenderTransparentDeferred::run(const RenderContextPointer& renderContext, c // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); // Set the light deferredLightingEffect->setupKeyLightBatch(args, batch, *lightFrame); @@ -536,6 +537,7 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); // From the lighting model define a global shapeKey ORED with individiual keys ShapeKey::Builder keyBuilder; diff --git a/libraries/render-utils/src/RenderForwardTask.cpp b/libraries/render-utils/src/RenderForwardTask.cpp index 0bc117bdb9..5e30308a05 100755 --- a/libraries/render-utils/src/RenderForwardTask.cpp +++ b/libraries/render-utils/src/RenderForwardTask.cpp @@ -251,6 +251,7 @@ void DrawForward::run(const RenderContextPointer& renderContext, const Inputs& i // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); // From the lighting model define a global shapeKey ORED with individiual keys ShapeKey::Builder keyBuilder; diff --git a/libraries/render-utils/src/render-utils/ShaderConstants.h b/libraries/render-utils/src/render-utils/ShaderConstants.h index 8c289e62d1..76c8dd4981 100644 --- a/libraries/render-utils/src/render-utils/ShaderConstants.h +++ b/libraries/render-utils/src/render-utils/ShaderConstants.h @@ -14,6 +14,10 @@ #ifndef RENDER_UTILS_SHADER_CONSTANTS_H #define RENDER_UTILS_SHADER_CONSTANTS_H +// Feature enabling flags (possibly need to rebuild shaders if this changes) +#define RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT 1 + +// Binding slots #define RENDER_UTILS_ATTR_TEXCOORD01 0 #define RENDER_UTILS_ATTR_COLOR 1 @@ -54,6 +58,7 @@ #define RENDER_UTILS_TEXTURE_DEFERRED_DIFFUSED_CURVATURE 7 #define RENDER_UTILS_TEXTURE_DEFERRED_LIGHTING 10 #define RENDER_UTILS_TEXTURE_SKYBOX 11 +#define RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL 14 #define RENDER_UTILS_BUFFER_SHADOW_PARAMS 2 #define RENDER_UTILS_TEXTURE_SHADOW 12 @@ -198,6 +203,7 @@ enum Texture { BloomColor = RENDER_UTILS_TEXTURE_BLOOM_COLOR, ToneMappingColor = RENDER_UTILS_TEXTURE_TM_COLOR, TextFont = RENDER_UTILS_TEXTURE_TEXT_FONT, + AmbientFresnel = RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL, DebugTexture0 = RENDER_UTILS_DEBUG_TEXTURE0, }; } // namespace texture diff --git a/libraries/shared/src/BRDF.cpp b/libraries/shared/src/BRDF.cpp new file mode 100644 index 0000000000..fe438f12a1 --- /dev/null +++ b/libraries/shared/src/BRDF.cpp @@ -0,0 +1,45 @@ +#include "BRDF.h" + +#include +#ifndef M_PI +#define M_PI 3.14159265359 +#endif + +namespace ggx { + +float evaluate(float NdotH, float roughness) { + float alpha = roughness * roughness; + float alphaSquared = alpha * alpha; + float denom = (float)(NdotH * NdotH * (alphaSquared - 1.0f) + 1.0f); + return alphaSquared / (denom * denom); +} + +glm::vec3 sample(const glm::vec2& Xi, const float roughness) { + const float a = roughness * roughness; + + float phi = 2.0f * (float) M_PI * Xi.x; + float cosTheta = std::sqrt((1.0f - Xi.y) / (1.0f + (a*a - 1.0f) * Xi.y)); + float sinTheta = std::sqrt(1.0f - cosTheta * cosTheta); + + // from spherical coordinates to cartesian coordinates + glm::vec3 H; + H.x = std::cos(phi) * sinTheta; + H.y = std::sin(phi) * sinTheta; + H.z = cosTheta; + + return H; +} + +} + + +namespace smith { + + float evaluateFastWithoutNdotV(float alphaSquared, float NdotV, float NdotL) { + float oneMinusAlphaSquared = 1.0f - alphaSquared; + float G = NdotL * std::sqrt(alphaSquared + NdotV * NdotV * oneMinusAlphaSquared); + G = G + NdotV * std::sqrt(alphaSquared + NdotL * NdotL * oneMinusAlphaSquared); + return 2.0f * NdotL / G; + } + +} diff --git a/libraries/shared/src/BRDF.h b/libraries/shared/src/BRDF.h new file mode 100644 index 0000000000..4e6cdd1f38 --- /dev/null +++ b/libraries/shared/src/BRDF.h @@ -0,0 +1,36 @@ +#pragma once +// +// BRDF.h +// +// Created by Olivier Prat on 04/04/19. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#ifndef SHARED_BRDF_H +#define SHARED_BRDF_H + +#include +#include + +// GGX micro-facet model +namespace ggx { + float evaluate(float NdotH, float roughness); + glm::vec3 sample(const glm::vec2& Xi, const float roughness); +} + +// Smith visibility function +namespace smith { + float evaluateFastWithoutNdotV(float alphaSquared, float NdotV, float NdotL); + + inline float evaluateFast(float alphaSquared, float NdotV, float NdotL) { + return evaluateFastWithoutNdotV(alphaSquared, NdotV, NdotL) * NdotV; + } + + inline float evaluate(float roughness, float NdotV, float NdotL) { + return evaluateFast(roughness*roughness*roughness*roughness, NdotV, NdotL); + } +} + +#endif // SHARED_BRDF_H \ No newline at end of file diff --git a/libraries/shared/src/RandomAndNoise.h b/libraries/shared/src/RandomAndNoise.h new file mode 100644 index 0000000000..7bde14a141 --- /dev/null +++ b/libraries/shared/src/RandomAndNoise.h @@ -0,0 +1,52 @@ +// +// RandomAndNoise.h +// +// Created by Olivier Prat on 05/16/18. +// Copyright 2018 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#ifndef RANDOM_AND_NOISE_H +#define RANDOM_AND_NOISE_H + +#include + +namespace halton { + // Low discrepancy Halton sequence generator + template + float evaluate(int index) { + float f = 1.0f; + float r = 0.0f; + float invB = 1.0f / (float)B; + index++; // Indices start at 1, not 0 + + while (index > 0) { + f = f * invB; + r = r + f * (float)(index % B); + index = index / B; + + } + + return r; + } +} + +inline float getRadicalInverseVdC(uint32_t bits) { + bits = (bits << 16u) | (bits >> 16u); + bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u); + bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u); + bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u); + bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u); + return float(bits) * 2.3283064365386963e-10f; // / 0x100000000\n" +} + +namespace hammersley { + // Low discrepancy Hammersley 2D sequence generator + inline glm::vec2 evaluate(int k, const int sequenceLength) { + return glm::vec2(float(k) / float(sequenceLength), getRadicalInverseVdC(k)); + } +} + + +#endif \ No newline at end of file diff --git a/libraries/shared/src/TBBHelpers.h b/libraries/shared/src/TBBHelpers.h index 6b5c4d416b..0c4deace6a 100644 --- a/libraries/shared/src/TBBHelpers.h +++ b/libraries/shared/src/TBBHelpers.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef _WIN32 #pragma warning( pop ) diff --git a/tools/oven/src/BakerCLI.cpp b/tools/oven/src/BakerCLI.cpp index 64462a4e37..669b821456 100644 --- a/tools/oven/src/BakerCLI.cpp +++ b/tools/oven/src/BakerCLI.cpp @@ -80,8 +80,9 @@ void BakerCLI::bakeFile(QUrl inputUrl, const QString& outputPath, const QString& { "roughness", image::TextureUsage::ROUGHNESS_TEXTURE }, { "gloss", image::TextureUsage::GLOSS_TEXTURE }, { "emissive", image::TextureUsage::EMISSIVE_TEXTURE }, - { "cube", image::TextureUsage::CUBE_TEXTURE }, - { "skybox", image::TextureUsage::CUBE_TEXTURE }, + { "cube", image::TextureUsage::SKY_TEXTURE }, + { "skybox", image::TextureUsage::SKY_TEXTURE }, + { "ambient", image::TextureUsage::AMBIENT_TEXTURE }, { "occlusion", image::TextureUsage::OCCLUSION_TEXTURE }, { "scattering", image::TextureUsage::SCATTERING_TEXTURE }, { "lightmap", image::TextureUsage::LIGHTMAP_TEXTURE }, diff --git a/tools/oven/src/DomainBaker.cpp b/tools/oven/src/DomainBaker.cpp index 50a3d212c0..7d6a14d5da 100644 --- a/tools/oven/src/DomainBaker.cpp +++ b/tools/oven/src/DomainBaker.cpp @@ -387,13 +387,13 @@ void DomainBaker::enumerateEntities() { if (entity.contains(AMBIENT_LIGHT_KEY)) { auto ambientLight = entity[AMBIENT_LIGHT_KEY].toObject(); if (ambientLight.contains(AMBIENT_URL_KEY)) { - addTextureBaker(AMBIENT_LIGHT_KEY + "." + AMBIENT_URL_KEY, ambientLight[AMBIENT_URL_KEY].toString(), image::TextureUsage::CUBE_TEXTURE, *it); + addTextureBaker(AMBIENT_LIGHT_KEY + "." + AMBIENT_URL_KEY, ambientLight[AMBIENT_URL_KEY].toString(), image::TextureUsage::AMBIENT_TEXTURE, *it); } } if (entity.contains(SKYBOX_KEY)) { auto skybox = entity[SKYBOX_KEY].toObject(); if (skybox.contains(SKYBOX_URL_KEY)) { - addTextureBaker(SKYBOX_KEY + "." + SKYBOX_URL_KEY, skybox[SKYBOX_URL_KEY].toString(), image::TextureUsage::CUBE_TEXTURE, *it); + addTextureBaker(SKYBOX_KEY + "." + SKYBOX_URL_KEY, skybox[SKYBOX_URL_KEY].toString(), image::TextureUsage::SKY_TEXTURE, *it); } } diff --git a/tools/oven/src/ui/SkyboxBakeWidget.cpp b/tools/oven/src/ui/SkyboxBakeWidget.cpp index 71ae0cbab0..6c6e0340ac 100644 --- a/tools/oven/src/ui/SkyboxBakeWidget.cpp +++ b/tools/oven/src/ui/SkyboxBakeWidget.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -61,6 +62,15 @@ void SkyboxBakeWidget::setupUI() { // start a new row for next component ++rowIndex; + // setup a section to enable Ambient map baking + _ambientMapBox = new QCheckBox("Bake ambient map(s)"); + _ambientMapBox->setChecked(false); + + gridLayout->addWidget(_ambientMapBox, rowIndex, 1); + + // start a new row for next component + ++rowIndex; + // setup a section to choose the output directory QLabel* outputDirectoryLabel = new QLabel("Output Directory"); @@ -176,51 +186,67 @@ void SkyboxBakeWidget::bakeButtonClicked() { // if the URL doesn't have a scheme, assume it is a local file if (skyboxToBakeURL.scheme() != "http" && skyboxToBakeURL.scheme() != "https" && skyboxToBakeURL.scheme() != "ftp") { - skyboxToBakeURL.setScheme("file"); + skyboxToBakeURL = QUrl::fromLocalFile(fileURLString); } // everything seems to be in place, kick off a bake for this skybox now - auto baker = std::unique_ptr { - new TextureBaker(skyboxToBakeURL, image::TextureUsage::CUBE_TEXTURE, outputDirectory.absolutePath()) - }; + addBaker(new TextureBaker(skyboxToBakeURL, image::TextureUsage::SKY_TEXTURE, outputDirectory.absolutePath()), + outputDirectory); - // move the baker to a worker thread - baker->moveToThread(Oven::instance().getNextWorkerThread()); + if (_ambientMapBox->isChecked()) { + QString ambientMapBaseFilename; + QString urlPath = skyboxToBakeURL.path(); + auto urlParts = urlPath.split('.'); - // invoke the bake method on the baker thread - QMetaObject::invokeMethod(baker.get(), "bake"); + urlParts.front() += "-ambient"; + ambientMapBaseFilename = QUrl(urlParts.front()).fileName(); - // make sure we hear about the results of this baker when it is done - connect(baker.get(), &TextureBaker::finished, this, &SkyboxBakeWidget::handleFinishedBaker); - - // add a pending row to the results window to show that this bake is in process - auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow(); - auto resultsRow = resultsWindow->addPendingResultRow(skyboxToBakeURL.fileName(), outputDirectory); - - // keep a unique_ptr to this baker - // and remember the row that represents it in the results table - _bakers.emplace_back(std::move(baker), resultsRow); + // we need to bake the corresponding ambient map too + addBaker(new TextureBaker(skyboxToBakeURL, image::TextureUsage::AMBIENT_TEXTURE, outputDirectory.absolutePath(), QString(), ambientMapBaseFilename), + outputDirectory); + } } } +void SkyboxBakeWidget::addBaker(TextureBaker* baker, const QDir& outputDirectory) { + auto textureBaker = std::unique_ptr{ baker }; + + // move the textureBaker to a worker thread + textureBaker->moveToThread(Oven::instance().getNextWorkerThread()); + + // make sure we hear about the results of this textureBaker when it is done + connect(textureBaker.get(), &TextureBaker::finished, this, &SkyboxBakeWidget::handleFinishedBaker); + + // invoke the bake method on the textureBaker thread + QMetaObject::invokeMethod(textureBaker.get(), "bake"); + + // add a pending row to the results window to show that this bake is in process + auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow(); + auto resultsRow = resultsWindow->addPendingResultRow(baker->getBaseFilename(), outputDirectory); + + // keep a unique_ptr to this textureBaker + // and remember the row that represents it in the results table + _bakers.emplace_back(std::move(textureBaker), resultsRow); +} + void SkyboxBakeWidget::handleFinishedBaker() { - if (auto baker = qobject_cast(sender())) { + if (auto textureBaker = qobject_cast(sender())) { // add the results of this bake to the results window - auto it = std::find_if(_bakers.begin(), _bakers.end(), [baker](const BakerRowPair& value) { - return value.first.get() == baker; + auto it = std::find_if(_bakers.begin(), _bakers.end(), [textureBaker](const BakerRowPair& value) { + return value.first.get() == textureBaker; }); if (it != _bakers.end()) { auto resultRow = it->second; auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow(); - if (baker->hasErrors()) { - resultsWindow->changeStatusForRow(resultRow, baker->getErrors().join("\n")); + if (textureBaker->hasErrors()) { + resultsWindow->changeStatusForRow(resultRow, textureBaker->getErrors().join("\n")); } else { resultsWindow->changeStatusForRow(resultRow, "Success"); } - // drop our strong pointer to the baker now that we are done with it + // drop our strong pointer to the textureBaker now that we are done with it _bakers.erase(it); } } diff --git a/tools/oven/src/ui/SkyboxBakeWidget.h b/tools/oven/src/ui/SkyboxBakeWidget.h index f00ab07f33..f560964649 100644 --- a/tools/oven/src/ui/SkyboxBakeWidget.h +++ b/tools/oven/src/ui/SkyboxBakeWidget.h @@ -21,6 +21,7 @@ #include "BakeWidget.h" class QLineEdit; +class QCheckBox; class SkyboxBakeWidget : public BakeWidget { Q_OBJECT @@ -42,9 +43,12 @@ private: QLineEdit* _selectionLineEdit; QLineEdit* _outputDirLineEdit; + QCheckBox* _ambientMapBox; Setting::Handle _exportDirectory; Setting::Handle _selectionStartDirectory; + + void addBaker(TextureBaker* baker, const QDir& outputDir); }; #endif // hifi_SkyboxBakeWidget_h