mirror of
https://github.com/lubosz/overte.git
synced 2025-04-07 04:42:49 +02:00
Before merge with HDR
This commit is contained in:
parent
745d41e679
commit
7aaf3da11e
4 changed files with 176 additions and 1723 deletions
78
libraries/image/src/image/Image.cpp
Normal file
78
libraries/image/src/image/Image.cpp
Normal file
|
@ -0,0 +1,78 @@
|
|||
#include "Image.h"
|
||||
#include "ImageLogging.h"
|
||||
#include "TextureProcessing.h"
|
||||
|
||||
#include <nvtt/nvtt.h>
|
||||
|
||||
using namespace image;
|
||||
|
||||
Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, TransformationMode transformMode) const {
|
||||
if (_data.format() == Image::Format_PACKED_FLOAT) {
|
||||
// Start by converting to full float
|
||||
glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()];
|
||||
auto unpackFunc = getHDRUnpackingFunction();
|
||||
auto floatDataIt = floatPixels;
|
||||
for (auto lineNb = 0; lineNb < getHeight(); lineNb++) {
|
||||
const glm::uint32* srcPixelIt = reinterpret_cast<const glm::uint32*>(getScanLine(lineNb));
|
||||
const glm::uint32* srcPixelEnd = srcPixelIt + getWidth();
|
||||
|
||||
while (srcPixelIt < srcPixelEnd) {
|
||||
*floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f);
|
||||
++srcPixelIt;
|
||||
++floatDataIt;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform filtered resize with NVTT
|
||||
static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats");
|
||||
nvtt::Surface surface;
|
||||
surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels);
|
||||
delete[] floatPixels;
|
||||
|
||||
nvtt::ResizeFilter filter = nvtt::ResizeFilter_Kaiser;
|
||||
if (transformMode == Qt::TransformationMode::FastTransformation) {
|
||||
filter = nvtt::ResizeFilter_Box;
|
||||
}
|
||||
surface.resize(dstSize.x, dstSize.y, 1, nvtt::ResizeFilter_Box);
|
||||
|
||||
// And convert back to original format
|
||||
QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT);
|
||||
|
||||
auto packFunc = getHDRPackingFunction();
|
||||
auto srcRedIt = reinterpret_cast<const float*>(surface.channel(0));
|
||||
auto srcGreenIt = reinterpret_cast<const float*>(surface.channel(1));
|
||||
auto srcBlueIt = reinterpret_cast<const float*>(surface.channel(2));
|
||||
for (auto lineNb = 0; lineNb < dstSize.y; lineNb++) {
|
||||
glm::uint32* dstPixelIt = reinterpret_cast<glm::uint32*>(resizedImage.scanLine(lineNb));
|
||||
glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x;
|
||||
|
||||
while (dstPixelIt < dstPixelEnd) {
|
||||
*dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt));
|
||||
++srcRedIt;
|
||||
++srcGreenIt;
|
||||
++srcBlueIt;
|
||||
++dstPixelIt;
|
||||
}
|
||||
}
|
||||
return resizedImage;
|
||||
} else {
|
||||
return _data.scaled(fromGlm(dstSize), ratioMode, transformMode);
|
||||
}
|
||||
}
|
||||
|
||||
Image Image::getConvertedToFormat(Format newFormat) const {
|
||||
assert(getFormat() != Format_PACKED_FLOAT);
|
||||
return _data.convertToFormat((QImage::Format)newFormat);
|
||||
}
|
||||
|
||||
void Image::invertPixels() {
|
||||
_data.invertPixels(QImage::InvertRgba);
|
||||
}
|
||||
|
||||
Image Image::getSubImage(QRect rect) const {
|
||||
return _data.copy(rect);
|
||||
}
|
||||
|
||||
Image Image::getMirrored(bool horizontal, bool vertical) const {
|
||||
return _data.mirrored(horizontal, vertical);
|
||||
}
|
98
libraries/image/src/image/Image.h
Normal file
98
libraries/image/src/image/Image.h
Normal file
|
@ -0,0 +1,98 @@
|
|||
#pragma once
|
||||
//
|
||||
// Image.h
|
||||
// image/src/Image
|
||||
//
|
||||
// Created by Olivier Prat on 29/3/2019.
|
||||
// Copyright 2019 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_image_Image_h
|
||||
#define hifi_image_Image_h
|
||||
|
||||
#include <QImage>
|
||||
|
||||
#include "ColorChannel.h"
|
||||
|
||||
#include <glm/fwd.hpp>
|
||||
#include <glm/vec2.hpp>
|
||||
#include <GLMHelpers.h>
|
||||
|
||||
namespace image {
|
||||
|
||||
class Image {
|
||||
public:
|
||||
|
||||
enum Format {
|
||||
Format_Invalid = QImage::Format_Invalid,
|
||||
Format_Mono = QImage::Format_Mono,
|
||||
Format_MonoLSB = QImage::Format_MonoLSB,
|
||||
Format_Indexed8 = QImage::Format_Indexed8,
|
||||
Format_RGB32 = QImage::Format_RGB32,
|
||||
Format_ARGB32 = QImage::Format_ARGB32,
|
||||
Format_ARGB32_Premultiplied = QImage::Format_ARGB32_Premultiplied,
|
||||
Format_RGB16 = QImage::Format_RGB16,
|
||||
Format_ARGB8565_Premultiplied = QImage::Format_ARGB8565_Premultiplied,
|
||||
Format_RGB666 = QImage::Format_RGB666,
|
||||
Format_ARGB6666_Premultiplied = QImage::Format_ARGB6666_Premultiplied,
|
||||
Format_RGB555 = QImage::Format_RGB555,
|
||||
Format_ARGB8555_Premultiplied = QImage::Format_ARGB8555_Premultiplied,
|
||||
Format_RGB888 = QImage::Format_RGB888,
|
||||
Format_RGB444 = QImage::Format_RGB444,
|
||||
Format_ARGB4444_Premultiplied = QImage::Format_ARGB4444_Premultiplied,
|
||||
Format_RGBX8888 = QImage::Format_RGBX8888,
|
||||
Format_RGBA8888 = QImage::Format_RGBA8888,
|
||||
Format_RGBA8888_Premultiplied = QImage::Format_RGBA8888_Premultiplied,
|
||||
Format_Grayscale8 = QImage::Format_Grayscale8,
|
||||
Format_R11G11B10F = QImage::Format_RGB30,
|
||||
Format_PACKED_FLOAT = Format_R11G11B10F
|
||||
};
|
||||
|
||||
using AspectRatioMode = Qt::AspectRatioMode;
|
||||
using TransformationMode = Qt::TransformationMode;
|
||||
|
||||
Image() {}
|
||||
Image(int width, int height, Format format) : _data(width, height, (QImage::Format)format) {}
|
||||
Image(const QImage& data) : _data(data) {}
|
||||
void operator=(const QImage& image) {
|
||||
_data = image;
|
||||
}
|
||||
|
||||
bool isNull() const { return _data.isNull(); }
|
||||
|
||||
Format getFormat() const { return (Format)_data.format(); }
|
||||
bool hasAlphaChannel() const { return _data.hasAlphaChannel(); }
|
||||
|
||||
glm::uint32 getWidth() const { return (glm::uint32)_data.width(); }
|
||||
glm::uint32 getHeight() const { return (glm::uint32)_data.height(); }
|
||||
glm::uvec2 getSize() const { return toGlm(_data.size()); }
|
||||
size_t getByteCount() const { return _data.byteCount(); }
|
||||
|
||||
QRgb getPixel(int x, int y) const { return _data.pixel(x, y); }
|
||||
void setPixel(int x, int y, QRgb value) {
|
||||
_data.setPixel(x, y, value);
|
||||
}
|
||||
|
||||
glm::uint8* editScanLine(int y) { return _data.scanLine(y); }
|
||||
const glm::uint8* getScanLine(int y) const { return _data.scanLine(y); }
|
||||
const glm::uint8* getBits() const { return _data.constBits(); }
|
||||
|
||||
Image getScaled(glm::uvec2 newSize, AspectRatioMode ratioMode, TransformationMode transformationMode = Qt::SmoothTransformation) const;
|
||||
Image getConvertedToFormat(Format newFormat) const;
|
||||
Image getSubImage(QRect rect) const;
|
||||
Image getMirrored(bool horizontal, bool vertical) const;
|
||||
|
||||
// Inplace transformations
|
||||
void invertPixels();
|
||||
|
||||
private:
|
||||
|
||||
QImage _data;
|
||||
};
|
||||
|
||||
} // namespace image
|
||||
|
||||
#endif // hifi_image_Image_h
|
File diff suppressed because it is too large
Load diff
|
@ -1,125 +0,0 @@
|
|||
//
|
||||
// Image.h
|
||||
// image/src/image
|
||||
//
|
||||
// Created by Clement Brisset on 4/5/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_image_Image_h
|
||||
#define hifi_image_Image_h
|
||||
|
||||
#include <QVariant>
|
||||
#include <QImage>
|
||||
#include <nvtt/nvtt.h>
|
||||
|
||||
#include <gpu/Texture.h>
|
||||
|
||||
#include "ColorChannel.h"
|
||||
|
||||
class QByteArray;
|
||||
|
||||
namespace image {
|
||||
|
||||
extern const QImage::Format QIMAGE_HDRFORMAT;
|
||||
|
||||
std::function<gpu::uint32(const glm::vec3&)> getHDRPackingFunction();
|
||||
std::function<glm::vec3(gpu::uint32)> getHDRUnpackingFunction();
|
||||
void convertToFloat(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat,
|
||||
glm::vec4* output, size_t outputLinePixelStride);
|
||||
void convertFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat,
|
||||
const glm::vec4* source, size_t srcLinePixelStride);
|
||||
|
||||
namespace TextureUsage {
|
||||
|
||||
enum Type {
|
||||
DEFAULT_TEXTURE,
|
||||
STRICT_TEXTURE,
|
||||
ALBEDO_TEXTURE,
|
||||
NORMAL_TEXTURE,
|
||||
BUMP_TEXTURE,
|
||||
SPECULAR_TEXTURE,
|
||||
METALLIC_TEXTURE = SPECULAR_TEXTURE, // for now spec and metallic texture are the same, converted to grey
|
||||
ROUGHNESS_TEXTURE,
|
||||
GLOSS_TEXTURE,
|
||||
EMISSIVE_TEXTURE,
|
||||
SKY_TEXTURE,
|
||||
AMBIENT_TEXTURE,
|
||||
OCCLUSION_TEXTURE,
|
||||
SCATTERING_TEXTURE = OCCLUSION_TEXTURE,
|
||||
LIGHTMAP_TEXTURE,
|
||||
UNUSED_TEXTURE
|
||||
};
|
||||
|
||||
using TextureLoader = std::function<gpu::TexturePointer(QImage&&, const std::string&, bool, gpu::BackendTarget, const std::atomic<bool>&)>;
|
||||
TextureLoader getTextureLoaderForType(Type type, const QVariantMap& options = QVariantMap());
|
||||
|
||||
gpu::TexturePointer create2DTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createStrict2DTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createAlbedoTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createEmissiveTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createNormalTextureFromNormalImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createNormalTextureFromBumpImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createRoughnessTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createRoughnessTextureFromGlossImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createMetallicTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createCubeTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createCubeTextureAndIrradianceFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createAmbientCubeTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createAmbientCubeTextureAndIrradianceFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createLightmapTextureFromImage(QImage&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer process2DTextureColorFromImage(QImage&& srcImage, const std::string& srcImageName, bool compress,
|
||||
gpu::BackendTarget target, bool isStrict, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer process2DTextureNormalMapFromImage(QImage&& srcImage, const std::string& srcImageName, bool compress,
|
||||
gpu::BackendTarget target, bool isBumpMap, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer process2DTextureGrayscaleFromImage(QImage&& srcImage, const std::string& srcImageName, bool compress,
|
||||
gpu::BackendTarget target, bool isInvertedPixels, const std::atomic<bool>& abortProcessing);
|
||||
|
||||
enum CubeTextureOptions {
|
||||
CUBE_DEFAULT = 0x0,
|
||||
CUBE_GENERATE_IRRADIANCE = 0x1,
|
||||
CUBE_GGX_CONVOLVE = 0x2
|
||||
};
|
||||
gpu::TexturePointer processCubeTextureColorFromImage(QImage&& srcImage, const std::string& srcImageName, bool compress,
|
||||
gpu::BackendTarget target, int option, const std::atomic<bool>& abortProcessing);
|
||||
|
||||
} // namespace TextureUsage
|
||||
|
||||
const QStringList getSupportedFormats();
|
||||
|
||||
gpu::TexturePointer processImage(std::shared_ptr<QIODevice> content, const std::string& url, ColorChannel sourceChannel,
|
||||
int maxNumPixels, TextureUsage::Type textureType,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing = false);
|
||||
|
||||
#if defined(NVTT_API)
|
||||
class SequentialTaskDispatcher : public nvtt::TaskDispatcher {
|
||||
public:
|
||||
SequentialTaskDispatcher(const std::atomic<bool>& abortProcessing);
|
||||
|
||||
const std::atomic<bool>& _abortProcessing;
|
||||
|
||||
void dispatch(nvtt::Task* task, void* context, int count) override;
|
||||
};
|
||||
|
||||
nvtt::OutputHandler* getNVTTCompressionOutputHandler(gpu::Texture* outputTexture, int face, nvtt::CompressionOptions& compressOptions);
|
||||
#endif
|
||||
} // namespace image
|
||||
|
||||
#endif // hifi_image_Image_h
|
Loading…
Reference in a new issue