Trying to work uniformly with Image

This commit is contained in:
Olivier Prat 2019-04-03 10:26:39 +02:00
parent 1aedfff6f7
commit e3355cd6a4
7 changed files with 383 additions and 238 deletions

View file

@ -18,8 +18,6 @@
#include "TextureProcessing.h"
#include "ImageLogging.h"
#include <nvtt/nvtt.h>
#ifndef M_PI
#define M_PI 3.14159265359
#endif
@ -323,31 +321,6 @@ CubeMap::CubeMap(int width, int height, int mipCount) {
reset(width, height, mipCount);
}
struct CubeMap::MipMapOutputHandler : public nvtt::OutputHandler {
MipMapOutputHandler(CubeMap* cube) : _cubemap(cube) {}
void beginImage(int size, int width, int height, int depth, int face, int miplevel) override {
_data = _cubemap->editFace(miplevel, face);
_current = _data;
}
bool writeData(const void* data, int size) override {
assert((size % sizeof(glm::vec4)) == 0);
memcpy(_current, data, size);
_current += size / sizeof(glm::vec4);
return true;
}
void endImage() override {
_data = nullptr;
_current = nullptr;
}
CubeMap* _cubemap{ nullptr };
glm::vec4* _data{ nullptr };
glm::vec4* _current{ nullptr };
};
CubeMap::CubeMap(const std::vector<Image>& faces, gpu::Element srcTextureFormat, int mipCount, const std::atomic<bool>& abortProcessing) {
reset(faces.front().getWidth(), faces.front().getHeight(), mipCount);
@ -362,7 +335,7 @@ CubeMap::CubeMap(const std::vector<Image>& faces, gpu::Element srcTextureFormat,
// Compute mips
for (face = 0; face < 6; face++) {
convertToFloat(faces[face].getBits(), _width, _height, faces[face].getBytesPerLineCount(), srcTextureFormat, floatPixels.data(), _width);
convertToFloatFromPacked(faces[face].getBits(), _width, _height, faces[face].getBytesPerLineCount(), srcTextureFormat, floatPixels.data(), _width);
surface.setImage(nvtt::InputFormat_RGBA_32F, _width, _height, 1, &floatPixels.front().x);
auto mipLevel = 0;
@ -394,6 +367,13 @@ void CubeMap::copyFace(int width, int height, const glm::vec4* source, size_t sr
}
}
Image CubeMap::getFaceImage(gpu::uint16 mipLevel, int face) const {
auto mipDims = getMipDimensions(mipLevel);
Image faceImage(mipDims.x, mipDims.y, Image::Format_RGBAF);
copyFace(mipDims.x, mipDims.y, getFace(mipLevel, face), getMipLineStride(mipLevel), (glm::vec4*)faceImage.editBits(), faceImage.getBytesPerLineCount() / sizeof(glm::vec4));
return faceImage;
}
void CubeMap::reset(int width, int height, int mipCount) {
assert(mipCount >0 && width > 0 && height > 0);
_width = width;
@ -417,52 +397,6 @@ void CubeMap::copyTo(CubeMap& other) const {
other._mips = _mips;
}
void CubeMap::copyTo(gpu::Texture* texture, const std::atomic<bool>& abortProcessing) const {
assert(_width == texture->getWidth() && _height == texture->getHeight() && texture->getNumMips() == _mips.size());
struct CompressionpErrorHandler : public nvtt::ErrorHandler {
virtual void error(nvtt::Error e) override {
qCWarning(imagelogging) << "Texture compression error:" << nvtt::errorString(e);
}
};
CompressionpErrorHandler errorHandler;
nvtt::OutputOptions outputOptions;
outputOptions.setOutputHeader(false);
outputOptions.setErrorHandler(&errorHandler);
nvtt::Surface surface;
surface.setAlphaMode(nvtt::AlphaMode_None);
surface.setWrapMode(nvtt::WrapMode_Mirror);
std::vector<glm::vec4> floatPixels;
floatPixels.resize(_width * _height);
nvtt::CompressionOptions compressionOptions;
SequentialTaskDispatcher dispatcher(abortProcessing);
nvtt::Context context;
context.setTaskDispatcher(&dispatcher);
for (int face = 0; face < 6; face++) {
for (gpu::uint16 mipLevel = 0; mipLevel < _mips.size() && !abortProcessing.load(); mipLevel++) {
auto mipDims = getMipDimensions(mipLevel);
std::unique_ptr<nvtt::OutputHandler> outputHandler{ getNVTTCompressionOutputHandler(texture, face, compressionOptions) };
outputOptions.setOutputHandler(outputHandler.get());
copyFace(mipDims.x, mipDims.y, getFace(mipLevel, face), getMipLineStride(mipLevel), &floatPixels.front(), mipDims.x);
surface.setImage(nvtt::InputFormat_RGBA_32F, mipDims.x, mipDims.y, 1, &floatPixels.front().x);
context.compress(surface, face, mipLevel, compressionOptions, outputOptions);
}
if (abortProcessing.load()) {
break;
}
}
}
void CubeMap::getFaceUV(const glm::vec3& dir, int* index, glm::vec2* uv) {
// Taken from https://en.wikipedia.org/wiki/Cube_mapping
float absX = std::abs(dir.x);

View file

@ -34,7 +34,6 @@ namespace image {
CubeMap(const std::vector<Image>& faces, gpu::Element faceFormat, int mipCount, const std::atomic<bool>& abortProcessing = false);
void reset(int width, int height, int mipCount);
void copyTo(gpu::Texture* texture, const std::atomic<bool>& abortProcessing = false) const;
void copyTo(CubeMap& other) const;
gpu::uint16 getMipCount() const { return (gpu::uint16)_mips.size(); }
@ -60,13 +59,14 @@ namespace image {
return _mips[mipLevel][face].data() + (getMipLineStride(mipLevel) + 1)*EDGE_WIDTH;
}
Image getFaceImage(gpu::uint16 mipLevel, int face) const;
void convolveForGGX(CubeMap& output, const std::atomic<bool>& abortProcessing) const;
glm::vec4 fetchLod(const glm::vec3& dir, float lod) const;
private:
struct GGXSamples;
struct MipMapOutputHandler;
class Mip;
class ConstMip;

View file

@ -6,28 +6,91 @@
using namespace image;
Image::Image(int width, int height, Format format) :
_dims(width, height),
_format(format) {
if (_format == Format_RGBAF) {
_floatData.resize(width*height);
} else {
_packedData = QImage(width, height, (QImage::Format)format);
}
}
size_t Image::getByteCount() const {
if (_format == Format_RGBAF) {
return sizeof(FloatPixels::value_type) * _floatData.size();
} else {
return _packedData.byteCount();
}
}
size_t Image::getBytesPerLineCount() const {
if (_format == Format_RGBAF) {
return sizeof(FloatPixels::value_type) * _dims.x;
} else {
return _packedData.bytesPerLine();
}
}
glm::uint8* Image::editScanLine(int y) {
if (_format == Format_RGBAF) {
return reinterpret_cast<glm::uint8*>(_floatData.data() + y * _dims.x);
} else {
return _packedData.scanLine(y);
}
}
const glm::uint8* Image::getScanLine(int y) const {
if (_format == Format_RGBAF) {
return reinterpret_cast<const glm::uint8*>(_floatData.data() + y * _dims.x);
} else {
return _packedData.scanLine(y);
}
}
glm::uint8* Image::editBits() {
if (_format == Format_RGBAF) {
return reinterpret_cast<glm::uint8*>(_floatData.data());
} else {
return _packedData.bits();
}
}
const glm::uint8* Image::getBits() const {
if (_format == Format_RGBAF) {
return reinterpret_cast<const glm::uint8*>(_floatData.data());
} else {
return _packedData.bits();
}
}
Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, TransformationMode transformMode) const {
if (_data.format() == Image::Format_PACKED_FLOAT) {
// Start by converting to full float
glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()];
auto unpackFunc = getHDRUnpackingFunction();
auto floatDataIt = floatPixels;
for (auto lineNb = 0; lineNb < getHeight(); lineNb++) {
const glm::uint32* srcPixelIt = reinterpret_cast<const glm::uint32*>(getScanLine(lineNb));
const glm::uint32* srcPixelEnd = srcPixelIt + getWidth();
while (srcPixelIt < srcPixelEnd) {
*floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f);
++srcPixelIt;
++floatDataIt;
}
}
// Perform filtered resize with NVTT
static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats");
if (_format == Format_PACKED_FLOAT || _format == Format_RGBAF) {
nvtt::Surface surface;
surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels);
delete[] floatPixels;
if (_format == Format_RGBAF) {
surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, _floatData.data());
} else {
// Start by converting to full float
glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()];
auto unpackFunc = getHDRUnpackingFunction();
auto floatDataIt = floatPixels;
for (auto lineNb = 0; lineNb < getHeight(); lineNb++) {
const glm::uint32* srcPixelIt = reinterpret_cast<const glm::uint32*>(getScanLine(lineNb));
const glm::uint32* srcPixelEnd = srcPixelIt + getWidth();
while (srcPixelIt < srcPixelEnd) {
*floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f);
++srcPixelIt;
++floatDataIt;
}
}
// Perform filtered resize with NVTT
static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats");
surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels);
delete[] floatPixels;
}
nvtt::ResizeFilter filter = nvtt::ResizeFilter_Kaiser;
if (transformMode == Qt::TransformationMode::FastTransformation) {
@ -35,44 +98,148 @@ Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, Transforma
}
surface.resize(dstSize.x, dstSize.y, 1, nvtt::ResizeFilter_Box);
// And convert back to original format
QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT);
auto packFunc = getHDRPackingFunction();
auto srcRedIt = reinterpret_cast<const float*>(surface.channel(0));
auto srcGreenIt = reinterpret_cast<const float*>(surface.channel(1));
auto srcBlueIt = reinterpret_cast<const float*>(surface.channel(2));
for (auto lineNb = 0; lineNb < dstSize.y; lineNb++) {
glm::uint32* dstPixelIt = reinterpret_cast<glm::uint32*>(resizedImage.scanLine(lineNb));
glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x;
auto srcAlphaIt = reinterpret_cast<const float*>(surface.channel(3));
if (_format == Format_RGBAF) {
Image output(_dims.x, _dims.y, _format);
auto dstPixelIt = output._floatData.begin();
auto dstPixelEnd = output._floatData.end();
while (dstPixelIt < dstPixelEnd) {
*dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt));
*dstPixelIt = glm::vec4(*srcRedIt, *srcGreenIt, *srcBlueIt, *srcAlphaIt);
++srcRedIt;
++srcGreenIt;
++srcBlueIt;
++srcAlphaIt;
++dstPixelIt;
}
return output;
} else {
// And convert back to original format
QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT);
auto packFunc = getHDRPackingFunction();
for (auto lineNb = 0; lineNb < dstSize.y; lineNb++) {
glm::uint32* dstPixelIt = reinterpret_cast<glm::uint32*>(resizedImage.scanLine(lineNb));
glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x;
while (dstPixelIt < dstPixelEnd) {
*dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt));
++srcRedIt;
++srcGreenIt;
++srcBlueIt;
++dstPixelIt;
}
}
return resizedImage;
}
return resizedImage;
} else {
return _data.scaled(fromGlm(dstSize), ratioMode, transformMode);
return _packedData.scaled(fromGlm(dstSize), ratioMode, transformMode);
}
}
Image Image::getConvertedToFormat(Format newFormat) const {
assert(getFormat() != Format_PACKED_FLOAT);
return _data.convertToFormat((QImage::Format)newFormat);
const float MAX_COLOR_VALUE = 255.0f;
if (newFormat == _format) {
return *this;
} else if ((_format != Format_R11G11B10F && _format != Format_RGBAF) && (newFormat != Format_R11G11B10F && newFormat != Format_RGBAF)) {
return _packedData.convertToFormat((QImage::Format)newFormat);
} else if (_format == Format_PACKED_FLOAT) {
Image newImage(_dims.x, _dims.y, newFormat);
switch (newFormat) {
case Format_RGBAF:
convertToFloatFromPacked(getBits(), _dims.x, _dims.y, getBytesPerLineCount(), gpu::Element::COLOR_R11G11B10, newImage._floatData.data(), _dims.x);
break;
default:
{
auto unpackFunc = getHDRUnpackingFunction();
const glm::uint32* srcIt = reinterpret_cast<const glm::uint32*>(getBits());
for (int y = 0; y < _dims.y; y++) {
for (int x = 0; x < _dims.x; x++) {
auto color = glm::clamp(unpackFunc(*srcIt) * MAX_COLOR_VALUE, 0.0f, 255.0f);
newImage.setPackedPixel(x, y, qRgb(color.r, color.g, color.b));
srcIt++;
}
}
break;
}
}
return newImage;
} else if (_format == Format_RGBAF) {
Image newImage(_dims.x, _dims.y, newFormat);
switch (newFormat) {
case Format_R11G11B10F:
convertToPackedFromFloat(newImage.editBits(), _dims.x, _dims.y, getBytesPerLineCount(), gpu::Element::COLOR_R11G11B10, _floatData.data(), _dims.x);
break;
default:
{
FloatPixels::const_iterator srcIt = _floatData.begin();
for (int y = 0; y < _dims.y; y++) {
for (int x = 0; x < _dims.x; x++) {
auto color = glm::clamp((*srcIt) * MAX_COLOR_VALUE, 0.0f, 255.0f);
newImage.setPackedPixel(x, y, qRgba(color.r, color.g, color.b, color.a));
srcIt++;
}
}
break;
}
}
return newImage;
} else {
Image newImage(_dims.x, _dims.y, newFormat);
assert(newImage.hasFloatFormat());
if (newFormat == Format_RGBAF) {
FloatPixels::iterator dstIt = newImage._floatData.begin();
for (int y = 0; y < _dims.y; y++) {
auto line = (const QRgb*)getScanLine(y);
for (int x = 0; x < _dims.x; x++) {
QRgb pixel = line[x];
*dstIt = glm::vec4(qRed(pixel), qGreen(pixel), qBlue(pixel), qAlpha(pixel)) / MAX_COLOR_VALUE;
dstIt++;
}
}
} else {
auto packFunc = getHDRPackingFunction();
glm::uint32* dstIt = reinterpret_cast<glm::uint32*>( newImage.editBits() );
for (int y = 0; y < _dims.y; y++) {
auto line = (const QRgb*)getScanLine(y);
for (int x = 0; x < _dims.x; x++) {
QRgb pixel = line[x];
*dstIt = packFunc(glm::vec3(qRed(pixel), qGreen(pixel), qBlue(pixel)) / MAX_COLOR_VALUE);
dstIt++;
}
}
}
return newImage;
}
}
void Image::invertPixels() {
_data.invertPixels(QImage::InvertRgba);
assert(_format != Format_PACKED_FLOAT && _format != Format_RGBAF);
_packedData.invertPixels(QImage::InvertRgba);
}
Image Image::getSubImage(QRect rect) const {
return _data.copy(rect);
assert(_format != Format_RGBAF);
return _packedData.copy(rect);
}
Image Image::getMirrored(bool horizontal, bool vertical) const {
return _data.mirrored(horizontal, vertical);
assert(_format != Format_RGBAF);
return _packedData.mirrored(horizontal, vertical);
}

View file

@ -48,38 +48,69 @@ namespace image {
Format_RGBA8888_Premultiplied = QImage::Format_RGBA8888_Premultiplied,
Format_Grayscale8 = QImage::Format_Grayscale8,
Format_R11G11B10F = QImage::Format_RGB30,
Format_PACKED_FLOAT = Format_R11G11B10F
Format_PACKED_FLOAT = Format_R11G11B10F,
// RGBA 32 bit single precision float per component
Format_RGBAF = 100
};
using AspectRatioMode = Qt::AspectRatioMode;
using TransformationMode = Qt::TransformationMode;
Image() {}
Image(int width, int height, Format format) : _data(width, height, (QImage::Format)format) {}
Image(const QImage& data) : _data(data) {}
void operator=(const QImage& image) {
_data = image;
Image() : _dims(0,0) {}
Image(int width, int height, Format format);
Image(const QImage& data) : _packedData(data), _dims(data.width(), data.height()), _format((Format)data.format()) {}
void operator=(const QImage& other) {
_packedData = other;
_floatData.clear();
_dims.x = other.width();
_dims.y = other.height();
_format = (Format)other.format();
}
bool isNull() const { return _data.isNull(); }
Format getFormat() const { return (Format)_data.format(); }
bool hasAlphaChannel() const { return _data.hasAlphaChannel(); }
glm::uint32 getWidth() const { return (glm::uint32)_data.width(); }
glm::uint32 getHeight() const { return (glm::uint32)_data.height(); }
glm::uvec2 getSize() const { return toGlm(_data.size()); }
size_t getByteCount() const { return _data.byteCount(); }
size_t getBytesPerLineCount() const { return _data.bytesPerLine(); }
QRgb getPixel(int x, int y) const { return _data.pixel(x, y); }
void setPixel(int x, int y, QRgb value) {
_data.setPixel(x, y, value);
void operator=(const Image& other) {
if (&other != this) {
_packedData = other._packedData;
_floatData = other._floatData;
_dims = other._dims;
_format = other._format;
}
}
glm::uint8* editScanLine(int y) { return _data.scanLine(y); }
const glm::uint8* getScanLine(int y) const { return _data.scanLine(y); }
const glm::uint8* getBits() const { return _data.constBits(); }
bool isNull() const { return _packedData.isNull() && _floatData.empty(); }
Format getFormat() const { return _format; }
bool hasAlphaChannel() const { return _packedData.hasAlphaChannel() || _format == Format_RGBAF; }
bool hasFloatFormat() const { return _format == Format_R11G11B10F || _format == Format_RGBAF; }
glm::uint32 getWidth() const { return (glm::uint32)_dims.x; }
glm::uint32 getHeight() const { return (glm::uint32)_dims.y; }
glm::uvec2 getSize() const { return glm::uvec2(_dims); }
size_t getByteCount() const;
size_t getBytesPerLineCount() const;
QRgb getPackedPixel(int x, int y) const {
assert(_format != Format_RGBAF);
return _packedData.pixel(x, y);
}
void setPackedPixel(int x, int y, QRgb value) {
assert(_format != Format_RGBAF);
_packedData.setPixel(x, y, value);
}
glm::vec4 getFloatPixel(int x, int y) const {
assert(_format == Format_RGBAF);
return _floatData[x + y*_dims.x];
}
void setFloatPixel(int x, int y, const glm::vec4& value) {
assert(_format == Format_RGBAF);
_floatData[x + y * _dims.x] = value;
}
glm::uint8* editScanLine(int y);
const glm::uint8* getScanLine(int y) const;
glm::uint8* editBits();
const glm::uint8* getBits() const;
Image getScaled(glm::uvec2 newSize, AspectRatioMode ratioMode, TransformationMode transformationMode = Qt::SmoothTransformation) const;
Image getConvertedToFormat(Format newFormat) const;
@ -91,7 +122,13 @@ namespace image {
private:
QImage _data;
using FloatPixels = std::vector<glm::vec4>;
// For QImage supported formats
QImage _packedData;
FloatPixels _floatData;
glm::ivec2 _dims;
Format _format;
};
} // namespace image

View file

@ -33,7 +33,6 @@
using namespace gpu;
#define CPU_MIPMAPS 1
#include <nvtt/nvtt.h>
#undef _CRT_SECURE_NO_WARNINGS
@ -515,21 +514,28 @@ struct MyErrorHandler : public nvtt::ErrorHandler {
}
};
SequentialTaskDispatcher::SequentialTaskDispatcher(const std::atomic<bool>& abortProcessing) : _abortProcessing(abortProcessing) {
}
#if defined(NVTT_API)
class SequentialTaskDispatcher : public nvtt::TaskDispatcher {
public:
SequentialTaskDispatcher(const std::atomic<bool>& abortProcessing = false) : _abortProcessing(abortProcessing) {
}
void SequentialTaskDispatcher::dispatch(nvtt::Task* task, void* context, int count) {
for (int i = 0; i < count; i++) {
if (!_abortProcessing.load()) {
task(context, i);
} else {
break;
const std::atomic<bool>& _abortProcessing;
void dispatch(nvtt::Task* task, void* context, int count) override {
for (int i = 0; i < count; i++) {
if (!_abortProcessing.load()) {
task(context, i);
} else {
break;
}
}
}
}
};
#endif
void image::convertToFloat(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat,
glm::vec4* output, size_t outputLinePixelStride) {
void image::convertToFloatFromPacked(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat,
glm::vec4* output, size_t outputLinePixelStride) {
glm::vec4* outputIt;
auto unpackFunc = getHDRUnpackingFunction(sourceFormat);
@ -548,8 +554,8 @@ void image::convertToFloat(const unsigned char* source, int width, int height, s
}
}
void image::convertFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat,
const glm::vec4* source, size_t srcLinePixelStride) {
void image::convertToPackedFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat,
const glm::vec4* source, size_t srcLinePixelStride) {
const glm::vec4* sourceIt;
auto packFunc = getHDRPackingFunction(outputFormat);
@ -574,11 +580,13 @@ nvtt::OutputHandler* getNVTTCompressionOutputHandler(gpu::Texture* outputTexture
nvtt::InputFormat inputFormat = nvtt::InputFormat_RGBA_32F;
nvtt::WrapMode wrapMode = nvtt::WrapMode_Mirror;
nvtt::AlphaMode alphaMode = nvtt::AlphaMode_None;
bool useNVTT = false;
compressionOptions.setQuality(nvtt::Quality_Production);
// TODO: gles: generate ETC mips instead?
if (outputFormat == gpu::Element::COLOR_COMPRESSED_BCX_HDR_RGB) {
useNVTT = true;
compressionOptions.setFormat(nvtt::Format_BC6);
} else if (outputFormat == gpu::Element::COLOR_RGB9E5) {
compressionOptions.setFormat(nvtt::Format_RGB);
@ -588,13 +596,18 @@ nvtt::OutputHandler* getNVTTCompressionOutputHandler(gpu::Texture* outputTexture
compressionOptions.setFormat(nvtt::Format_RGB);
compressionOptions.setPixelType(nvtt::PixelType_Float);
compressionOptions.setPixelFormat(32, 32, 32, 0);
} else if (outputFormat == gpu::Element::COLOR_SRGBA_32) {
useNVTT = true;
compressionOptions.setFormat(nvtt::Format_RGB);
compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm);
compressionOptions.setPixelFormat(8, 8, 8, 0);
} else {
qCWarning(imagelogging) << "Unknown mip format";
Q_UNREACHABLE();
return nullptr;
}
if (outputFormat == gpu::Element::COLOR_RGB9E5 || outputFormat == gpu::Element::COLOR_R11G11B10) {
if (!useNVTT) {
// Don't use NVTT (at least version 2.1) as it outputs wrong RGB9E5 and R11G11B10F values from floats
return new PackedFloatOutputHandler(outputTexture, face, outputFormat);
} else {
@ -602,28 +615,18 @@ nvtt::OutputHandler* getNVTTCompressionOutputHandler(gpu::Texture* outputTexture
}
}
void generateHDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing, int face) {
// Take a local copy to force move construction
// https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#f18-for-consume-parameters-pass-by-x-and-stdmove-the-parameter
Image localCopy = std::move(image);
void convertToHDRTexture(gpu::Texture* texture, Image&& image, BackendTarget target, int baseMipLevel, bool buildMips, const std::atomic<bool>& abortProcessing, int face) {
assert(image.hasFloatFormat());
assert(localCopy.getFormat() == Image::Format_PACKED_FLOAT);
Image localCopy = image.getConvertedToFormat(Image::Format_RGBAF);
const int width = localCopy.getWidth(), height = localCopy.getHeight();
std::vector<glm::vec4> data;
std::vector<glm::vec4>::iterator dataIt;
auto mipFormat = texture->getStoredMipFormat();
nvtt::InputFormat inputFormat = nvtt::InputFormat_RGBA_32F;
nvtt::WrapMode wrapMode = nvtt::WrapMode_Mirror;
nvtt::AlphaMode alphaMode = nvtt::AlphaMode_None;
data.resize(width * height);
convertToFloat(localCopy.getBits(), width, height, localCopy.getBytesPerLineCount(), GPU_CUBEMAP_HDR_FORMAT, data.data(), width);
// We're done with the localCopy, free up the memory to avoid bloating the heap
localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one.
nvtt::OutputOptions outputOptions;
outputOptions.setOutputHeader(false);
@ -633,12 +636,12 @@ void generateHDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
MyErrorHandler errorHandler;
outputOptions.setErrorHandler(&errorHandler);
nvtt::Context context;
int mipLevel = 0;
int mipLevel = baseMipLevel;
outputOptions.setOutputHandler(outputHandler.get());
nvtt::Surface surface;
surface.setImage(inputFormat, width, height, 1, &(*data.begin()));
surface.setImage(inputFormat, width, height, 1, localCopy.getBits());
surface.setAlphaMode(alphaMode);
surface.setWrapMode(wrapMode);
@ -647,13 +650,15 @@ void generateHDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
context.setTaskDispatcher(&dispatcher);
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
while (surface.canMakeNextMipmap() && !abortProcessing.load()) {
surface.buildNextMipmap(nvtt::MipmapFilter_Box);
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
if (buildMips) {
while (surface.canMakeNextMipmap() && !abortProcessing.load()) {
surface.buildNextMipmap(nvtt::MipmapFilter_Box);
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
}
}
}
void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing, int face) {
void convertToLDRTexture(gpu::Texture* texture, Image&& image, BackendTarget target, int baseMipLevel, bool buildMips, const std::atomic<bool>& abortProcessing, int face) {
// Take a local copy to force move construction
// https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#f18-for-consume-parameters-pass-by-x-and-stdmove-the-parameter
Image localCopy = std::move(image);
@ -665,6 +670,7 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
const int width = localCopy.getWidth(), height = localCopy.getHeight();
auto mipFormat = texture->getStoredMipFormat();
int mipLevel = baseMipLevel;
if (target != BackendTarget::GLES32) {
const void* data = static_cast<const void*>(localCopy.getBits());
@ -677,23 +683,22 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
float inputGamma = 2.2f;
float outputGamma = 2.2f;
nvtt::InputOptions inputOptions;
inputOptions.setTextureLayout(textureType, width, height);
nvtt::Surface surface;
surface.setImage(inputFormat, width, height, 1, data);
surface.setAlphaMode(alphaMode);
surface.setWrapMode(wrapMode);
inputOptions.setMipmapData(data, width, height);
// setMipmapData copies the memory, so free up the memory afterward to avoid bloating the heap
// Surface copies the memory, so free up the memory afterward to avoid bloating the heap
data = nullptr;
localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one.
nvtt::InputOptions inputOptions;
inputOptions.setTextureLayout(textureType, width, height);
inputOptions.setFormat(inputFormat);
inputOptions.setGamma(inputGamma, outputGamma);
inputOptions.setAlphaMode(alphaMode);
inputOptions.setWrapMode(wrapMode);
inputOptions.setRoundMode(roundMode);
inputOptions.setMipmapGeneration(true);
inputOptions.setMipmapFilter(nvtt::MipmapFilter_Box);
nvtt::CompressionOptions compressionOptions;
compressionOptions.setQuality(nvtt::Quality_Production);
@ -777,11 +782,22 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
outputOptions.setErrorHandler(&errorHandler);
SequentialTaskDispatcher dispatcher(abortProcessing);
nvtt::Compressor compressor;
compressor.setTaskDispatcher(&dispatcher);
compressor.process(inputOptions, compressionOptions, outputOptions);
nvtt::Compressor context;
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
if (buildMips) {
while (surface.canMakeNextMipmap() && !abortProcessing.load()) {
surface.buildNextMipmap(nvtt::MipmapFilter_Box);
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
}
}
} else {
int numMips = 1 + (int)log2(std::max(width, height));
int numMips = 1;
if (buildMips) {
numMips += (int)log2(std::max(width, height)) - baseMipLevel;
}
assert(numMips > 0);
Etc::RawImage *mipMaps = new Etc::RawImage[numMips];
Etc::Image::Format etcFormat = Etc::Image::Format::DEFAULT;
@ -815,23 +831,13 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
const float effort = 1.0f;
const int numEncodeThreads = 4;
int encodingTime;
const float MAX_COLOR = 255.0f;
std::vector<vec4> floatData;
floatData.resize(width * height);
for (int y = 0; y < height; y++) {
QRgb *line = (QRgb *)localCopy.editScanLine(y);
for (int x = 0; x < width; x++) {
QRgb &pixel = line[x];
floatData[x + y * width] = vec4(qRed(pixel), qGreen(pixel), qBlue(pixel), qAlpha(pixel)) / MAX_COLOR;
}
if (localCopy.getFormat() != Image::Format_RGBAF) {
localCopy = localCopy.getConvertedToFormat(Image::Format_RGBAF);
}
// free up the memory afterward to avoid bloating the heap
localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one.
Etc::EncodeMipmaps(
(float *)floatData.data(), width, height,
(float *)localCopy.editBits(), width, height,
etcFormat, errorMetric, effort,
numEncodeThreads, numEncodeThreads,
numMips, Etc::FILTER_WRAP_NONE,
@ -841,9 +847,9 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
for (int i = 0; i < numMips; i++) {
if (mipMaps[i].paucEncodingBits.get()) {
if (face >= 0) {
texture->assignStoredMipFace(i, face, mipMaps[i].uiEncodingBitsBytes, static_cast<const gpu::Byte*>(mipMaps[i].paucEncodingBits.get()));
texture->assignStoredMipFace(i+baseMipLevel, face, mipMaps[i].uiEncodingBitsBytes, static_cast<const gpu::Byte*>(mipMaps[i].paucEncodingBits.get()));
} else {
texture->assignStoredMip(i, mipMaps[i].uiEncodingBitsBytes, static_cast<const gpu::Byte*>(mipMaps[i].paucEncodingBits.get()));
texture->assignStoredMip(i + baseMipLevel, mipMaps[i].uiEncodingBitsBytes, static_cast<const gpu::Byte*>(mipMaps[i].paucEncodingBits.get()));
}
}
}
@ -854,22 +860,27 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
#endif
void generateMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing = false, int face = -1) {
#if CPU_MIPMAPS
PROFILE_RANGE(resource_parse, "generateMips");
void convertImageToTexture(gpu::Texture* texture, Image& image, BackendTarget target, int face, int baseMipLevel, bool buildMips, const std::atomic<bool>& abortProcessing) {
PROFILE_RANGE(resource_parse, "convertToTextureWithMips");
if (target == BackendTarget::GLES32) {
generateLDRMips(texture, std::move(image), target, abortProcessing, face);
convertToLDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face);
} else {
if (image.getFormat() == Image::Format_PACKED_FLOAT) {
generateHDRMips(texture, std::move(image), target, abortProcessing, face);
if (image.hasFloatFormat()) {
convertToHDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face);
} else {
generateLDRMips(texture, std::move(image), target, abortProcessing, face);
convertToLDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face);
}
}
#else
texture->setAutoGenerateMips(true);
#endif
}
void convertToTextureWithMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing, int face) {
convertImageToTexture(texture, image, target, face, 0, true, abortProcessing);
}
void convertToTexture(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing, int face, int mipLevel) {
PROFILE_RANGE(resource_parse, "convertToTexture");
convertImageToTexture(texture, image, target, face, mipLevel, false, abortProcessing);
}
void processTextureAlpha(const Image& srcImage, bool& validAlpha, bool& alphaAsMask) {
@ -959,7 +970,7 @@ gpu::TexturePointer TextureUsage::process2DTextureColorFromImage(Image&& srcImag
theTexture->setUsage(usage.build());
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, image.getByteCount(), image.getBits());
generateMips(theTexture.get(), std::move(image), target, abortProcessing);
convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing);
}
return theTexture;
@ -1003,14 +1014,14 @@ Image processBumpMap(Image&& image) {
const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1);
// surrounding pixels
const QRgb topLeft = localCopy.getPixel(iPrevClamped, jPrevClamped);
const QRgb top = localCopy.getPixel(iPrevClamped, j);
const QRgb topRight = localCopy.getPixel(iPrevClamped, jNextClamped);
const QRgb right = localCopy.getPixel(i, jNextClamped);
const QRgb bottomRight = localCopy.getPixel(iNextClamped, jNextClamped);
const QRgb bottom = localCopy.getPixel(iNextClamped, j);
const QRgb bottomLeft = localCopy.getPixel(iNextClamped, jPrevClamped);
const QRgb left = localCopy.getPixel(i, jPrevClamped);
const QRgb topLeft = localCopy.getPackedPixel(iPrevClamped, jPrevClamped);
const QRgb top = localCopy.getPackedPixel(iPrevClamped, j);
const QRgb topRight = localCopy.getPackedPixel(iPrevClamped, jNextClamped);
const QRgb right = localCopy.getPackedPixel(i, jNextClamped);
const QRgb bottomRight = localCopy.getPackedPixel(iNextClamped, jNextClamped);
const QRgb bottom = localCopy.getPackedPixel(iNextClamped, j);
const QRgb bottomLeft = localCopy.getPackedPixel(iNextClamped, jPrevClamped);
const QRgb left = localCopy.getPackedPixel(i, jPrevClamped);
// take their gray intensities
// since it's a grayscale image, the value of each component RGB is the same
@ -1033,12 +1044,13 @@ Image processBumpMap(Image&& image) {
// convert to rgb from the value obtained computing the filter
QRgb qRgbValue = qRgba(mapComponent(v.z), mapComponent(v.y), mapComponent(v.x), 1.0);
result.setPixel(i, j, qRgbValue);
result.setPackedPixel(i, j, qRgbValue);
}
}
return result;
}
gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(Image&& srcImage, const std::string& srcImageName,
bool compress, BackendTarget target, bool isBumpMap,
const std::atomic<bool>& abortProcessing) {
@ -1073,7 +1085,7 @@ gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(Image&& src
theTexture->setSource(srcImageName);
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, image.getByteCount(), image.getBits());
generateMips(theTexture.get(), std::move(image), target, abortProcessing);
convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing);
}
return theTexture;
@ -1113,7 +1125,7 @@ gpu::TexturePointer TextureUsage::process2DTextureGrayscaleFromImage(Image&& src
theTexture->setSource(srcImageName);
theTexture->setStoredMipFormat(formatMip);
theTexture->assignStoredMip(0, image.getByteCount(), image.getBits());
generateMips(theTexture.get(), std::move(image), target, abortProcessing);
convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing);
}
return theTexture;
@ -1475,13 +1487,18 @@ Image convertToHDRFormat(Image&& srcImage, gpu::Element format) {
return hdrImage;
}
void convolveForGGX(const std::vector<Image>& faces, gpu::Element faceFormat, gpu::Texture* texture, const std::atomic<bool>& abortProcessing = false) {
void convolveForGGX(const std::vector<Image>& faces, gpu::Element faceFormat, gpu::Texture* texture, BackendTarget target, const std::atomic<bool>& abortProcessing = false) {
PROFILE_RANGE(resource_parse, "convolveForGGX");
CubeMap source(faces, faceFormat, texture->getNumMips(), abortProcessing);
CubeMap output(texture->getWidth(), texture->getHeight(), texture->getNumMips());
source.convolveForGGX(output, abortProcessing);
output.copyTo(texture, abortProcessing);
for (int face = 0; face < 6; face++) {
for (gpu::uint16 mipLevel = 0; mipLevel < output.getMipCount(); mipLevel++) {
convertToTexture(texture, output.getFaceImage(mipLevel, face), target, abortProcessing, face, mipLevel);
}
}
}
gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName,
@ -1584,12 +1601,12 @@ gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcIm
if (options & CUBE_GGX_CONVOLVE) {
// Performs and convolution AND mip map generation
convolveForGGX(faces, GPU_CUBEMAP_HDR_FORMAT, theTexture.get(), abortProcessing);
convolveForGGX(faces, GPU_CUBEMAP_HDR_FORMAT, theTexture.get(), target, abortProcessing);
} else {
// Create mip maps and compress to final format in one go
for (uint8 face = 0; face < faces.size(); ++face) {
// Force building the mip maps right now on CPU if we are convolving for GGX later on
generateMips(theTexture.get(), std::move(faces[face]), target, abortProcessing, face);
convertToTextureWithMips(theTexture.get(), std::move(faces[face]), target, abortProcessing, face);
}
}
}

View file

@ -23,9 +23,9 @@ namespace image {
std::function<gpu::uint32(const glm::vec3&)> getHDRPackingFunction();
std::function<glm::vec3(gpu::uint32)> getHDRUnpackingFunction();
void convertToFloat(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat,
void convertToFloatFromPacked(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat,
glm::vec4* output, size_t outputLinePixelStride);
void convertFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat,
void convertToPackedFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat,
const glm::vec4* source, size_t srcLinePixelStride);
namespace TextureUsage {
@ -102,18 +102,8 @@ gpu::TexturePointer processImage(std::shared_ptr<QIODevice> content, const std::
int maxNumPixels, TextureUsage::Type textureType,
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing = false);
#if defined(NVTT_API)
class SequentialTaskDispatcher : public nvtt::TaskDispatcher {
public:
SequentialTaskDispatcher(const std::atomic<bool>& abortProcessing = false);
const std::atomic<bool>& _abortProcessing;
void dispatch(nvtt::Task* task, void* context, int count) override;
};
nvtt::OutputHandler* getNVTTCompressionOutputHandler(gpu::Texture* outputTexture, int face, nvtt::CompressionOptions& compressOptions);
#endif
void convertToTextureWithMips(gpu::Texture* texture, Image&& image, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing = false, int face = -1);
void convertToTexture(gpu::Texture* texture, Image&& image, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing = false, int face = -1, int mipLevel = 0);
} // namespace image

View file

@ -41,7 +41,7 @@ float getMipLevelFromRoughness(float roughness, float lodCount) {
// This should match the value in the CubeMap::convolveForGGX method (CubeMap.cpp)
float ROUGHNESS_1_MIP_RESOLUTION = 1.5;
float deltaLod = lodCount - ROUGHNESS_1_MIP_RESOLUTION;
return (sqrt(6.0*roughness+0.25)-0.5)*deltaLod*0.5;
return deltaLod * (sqrt(1.0+24.0*roughness)-1.0) / 4.0;
}
vec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {