First take at using the gpu::Texture in the Model rendering instead of the previous pass with raw glTexture

This commit is contained in:
Sam Gateau 2015-01-20 16:02:26 -08:00
parent 4a2a9f92a7
commit 4302db109f
15 changed files with 911 additions and 126 deletions

View file

@ -23,6 +23,7 @@ Batch::Batch() :
_resources(),
_data(),
_buffers(),
_textures(),
_streamFormats(),
_transforms()
{
@ -38,6 +39,7 @@ void Batch::clear() {
_resources.clear();
_data.clear();
_buffers.clear();
_textures.clear();
_streamFormats.clear();
_transforms.clear();
}
@ -171,3 +173,14 @@ void Batch::setUniformBuffer(uint32 slot, const BufferView& view) {
}
void Batch::setUniformTexture(uint32 slot, const TexturePointer& texture) {
ADD_COMMAND(setUniformTexture);
_params.push_back(_textures.cache(texture));
_params.push_back(slot);
}
void Batch::setUniformTexture(uint32 slot, const TextureView& view) {
setUniformTexture(slot, view._texture);
}

View file

@ -19,6 +19,7 @@
#include <vector>
#include "Stream.h"
#include "Texture.h"
#if defined(NSIGHT_FOUND)
#include "nvToolsExt.h"
@ -92,6 +93,10 @@ public:
void setUniformBuffer(uint32 slot, const BufferPointer& buffer, Offset offset, Offset size);
void setUniformBuffer(uint32 slot, const BufferView& view); // not a command, just a shortcut from a BufferView
void setUniformTexture(uint32 slot, const TexturePointer& view);
void setUniformTexture(uint32 slot, const TextureView& view); // not a command, just a shortcut from a TextureView
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
@ -148,10 +153,6 @@ public:
void _glColor4f(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
void _glMaterialf(GLenum face, GLenum pname, GLfloat param);
void _glMaterialfv(GLenum face, GLenum pname, const GLfloat *params);
enum Command {
COMMAND_draw = 0,
COMMAND_drawIndexed,
@ -167,6 +168,7 @@ public:
COMMAND_setProjectionTransform,
COMMAND_setUniformBuffer,
COMMAND_setUniformTexture,
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long
@ -219,10 +221,7 @@ public:
COMMAND_glEnableVertexAttribArray,
COMMAND_glDisableVertexAttribArray,
COMMAND_glColor4f,
COMMAND_glMaterialf,
COMMAND_glMaterialfv,
COMMAND_glColor4f,
NUM_COMMANDS,
};
@ -292,6 +291,7 @@ public:
};
typedef Cache<BufferPointer>::Vector BufferCaches;
typedef Cache<TexturePointer>::Vector TextureCaches;
typedef Cache<Stream::FormatPointer>::Vector StreamFormatCaches;
typedef Cache<Transform>::Vector TransformCaches;
@ -330,6 +330,7 @@ public:
Bytes _data;
BufferCaches _buffers;
TextureCaches _textures;
StreamFormatCaches _streamFormats;
TransformCaches _transforms;

View file

@ -14,6 +14,7 @@
#include <assert.h>
#include "Resource.h"
#include "Texture.h"
namespace gpu {
@ -39,6 +40,17 @@ public:
void syncGPUObject(const Buffer& buffer);
template< typename T >
static void setGPUObject(const Texture& texture, T* to) {
texture.setGPUObject(reinterpret_cast<GPUObject*>(to));
}
template< typename T >
static T* getGPUObject(const Texture& texture) {
return reinterpret_cast<T*>(texture.getGPUObject());
}
void syncGPUObject(const Texture& texture);
protected:
};

View file

@ -95,6 +95,7 @@ enum Semantic {
RAW = 0, // used as RAW memory
RGB,
RGBA,
BGRA,
XYZ,
XYZW,
POS_XYZ,
@ -102,10 +103,12 @@ enum Semantic {
QUAT,
DIR_XYZ,
UV,
R8,
INDEX, //used by index buffer of a mesh
PART, // used by part buffer of a mesh
DEPTH, // Depth buffer
DEPTH_STENCIL, // Depth Stencil buffer
NUM_SEMANTICS,
};
@ -124,7 +127,7 @@ public:
_dimension(SCALAR),
_type(INT8)
{}
Semantic getSemantic() const { return (Semantic)_semantic; }
Dimension getDimension() const { return (Dimension)_dimension; }
@ -135,11 +138,21 @@ public:
uint32 getSize() const { return DIMENSION_COUNT[_dimension] * TYPE_SIZE[_type]; }
protected:
uint16 getRaw() const { return *((uint16*) (this)); }
protected:
uint8 _semantic;
uint8 _dimension : 4;
uint8 _type : 4;
};
static bool operator ==(const Element& left, const Element& right) {
return left.getRaw() == right.getRaw();
}
static bool operator !=(const Element& left, const Element& right) {
return left.getRaw() != right.getRaw();
}
};

View file

@ -1,6 +1,6 @@
//
// GLBackend.cpp
// interface/src/gpu
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
@ -32,6 +32,7 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::GLBackend::do_setProjectionTransform),
(&::gpu::GLBackend::do_setUniformBuffer),
(&::gpu::GLBackend::do_setUniformTexture),
(&::gpu::GLBackend::do_glEnable),
(&::gpu::GLBackend::do_glDisable),
@ -82,9 +83,6 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::GLBackend::do_glDisableVertexAttribArray),
(&::gpu::GLBackend::do_glColor4f),
(&::gpu::GLBackend::do_glMaterialf),
(&::gpu::GLBackend::do_glMaterialfv),
};
static const GLenum _primitiveToGLmode[NUM_PRIMITIVES] = {
@ -174,8 +172,8 @@ void GLBackend::checkGLError() {
}
}
//#define CHECK_GL_ERROR() ::gpu::GLBackend::checkGLError()
#define CHECK_GL_ERROR()
#define CHECK_GL_ERROR() ::gpu::GLBackend::checkGLError()
//#define CHECK_GL_ERROR()
void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
updateInput();
@ -508,6 +506,20 @@ void GLBackend::do_setUniformBuffer(Batch& batch, uint32 paramOffset) {
CHECK_GL_ERROR();
}
void GLBackend::do_setUniformTexture(Batch& batch, uint32 paramOffset) {
GLuint slot = batch._params[paramOffset + 1]._uint;
TexturePointer uniformTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
#if defined(Q_OS_MAC)
#elif defined(Q_OS_WIN)
GLuint to = getTextureID(*uniformTexture);
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(GL_TEXTURE_2D, to);
#else
#endif
CHECK_GL_ERROR();
}
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
@ -1077,40 +1089,6 @@ void GLBackend::do_glColor4f(Batch& batch, uint32 paramOffset) {
CHECK_GL_ERROR();
}
void Batch::_glMaterialf(GLenum face, GLenum pname, GLfloat param) {
ADD_COMMAND_GL(glMaterialf);
_params.push_back(param);
_params.push_back(pname);
_params.push_back(face);
DO_IT_NOW(_glMaterialf, 3);
}
void GLBackend::do_glMaterialf(Batch& batch, uint32 paramOffset) {
glMaterialf(
batch._params[paramOffset + 2]._uint,
batch._params[paramOffset + 1]._uint,
batch._params[paramOffset + 0]._float);
CHECK_GL_ERROR();
}
void Batch::_glMaterialfv(GLenum face, GLenum pname, const GLfloat *params) {
ADD_COMMAND_GL(glMaterialfv);
_params.push_back(cacheData(4 * sizeof(float), params));
_params.push_back(pname);
_params.push_back(face);
DO_IT_NOW(_glMaterialfv, 3);
}
void GLBackend::do_glMaterialfv(Batch& batch, uint32 paramOffset) {
glMaterialfv(
batch._params[paramOffset + 2]._uint,
batch._params[paramOffset + 1]._uint,
(const GLfloat*)batch.editData(batch._params[paramOffset + 0]._uint));
CHECK_GL_ERROR();
}
GLBackend::GLBuffer::GLBuffer() :
_stamp(0),
_buffer(0),
@ -1156,3 +1134,4 @@ GLuint GLBackend::getBufferID(const Buffer& buffer) {
GLBackend::syncGPUObject(buffer);
return Backend::getGPUObject<GLBackend::GLBuffer>(buffer)->_buffer;
}

View file

@ -1,6 +1,6 @@
//
// GLBackend.h
// interface/src/gpu
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 10/27/2014.
// Copyright 2014 High Fidelity, Inc.
@ -42,9 +42,21 @@ public:
~GLBuffer();
};
static void syncGPUObject(const Buffer& buffer);
static GLuint getBufferID(const Buffer& buffer);
class GLTexture {
public:
Stamp _storageStamp;
Stamp _contentStamp;
GLuint _texture;
GLuint _size;
GLTexture();
~GLTexture();
};
static void syncGPUObject(const Texture& texture);
static GLuint getTextureID(const Texture& texture);
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
static const int MAX_NUM_INPUT_BUFFERS = 16;
@ -124,6 +136,7 @@ protected:
// Shader Stage
void do_setUniformBuffer(Batch& batch, uint32 paramOffset);
void do_setUniformTexture(Batch& batch, uint32 paramOffset);
void updateShader();
struct ShaderStageState {
@ -188,9 +201,6 @@ protected:
void do_glColor4f(Batch& batch, uint32 paramOffset);
void do_glMaterialf(Batch& batch, uint32 paramOffset);
void do_glMaterialfv(Batch& batch, uint32 paramOffset);
typedef void (GLBackend::*CommandCall)(Batch&, uint32);
static CommandCall _commandCalls[Batch::NUM_COMMANDS];

View file

@ -0,0 +1,194 @@
//
// GLBackendTexture.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 1/19/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLBackend.cpp"
GLBackend::GLTexture::GLTexture() :
_storageStamp(0),
_contentStamp(0),
_texture(0),
_size(0)
{}
GLBackend::GLTexture::~GLTexture() {
if (_texture != 0) {
glDeleteTextures(1, &_texture);
}
}
class GLTexelFormat {
public:
GLenum internalFormat;
GLenum format;
GLenum type;
static GLTexelFormat evalGLTexelFormat(const Element& pixel) {
GLTexelFormat texel = {GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE};
switch(pixel.getDimension()) {
case gpu::SCALAR: {
texel.format = GL_RED;
texel.type = _elementTypeToGLType[pixel.getType()];
switch(pixel.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RED;
break;
case gpu::DEPTH:
texel.internalFormat = GL_DEPTH_COMPONENT;
break;
default:
qDebug() << "Unknown combination of texel format";
}
}
break;
case gpu::VEC2: {
texel.format = GL_RG;
texel.type = _elementTypeToGLType[pixel.getType()];
switch(pixel.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RG;
break;
case gpu::DEPTH_STENCIL:
texel.internalFormat = GL_DEPTH_STENCIL;
break;
default:
qDebug() << "Unknown combination of texel format";
}
}
break;
case gpu::VEC3: {
texel.format = GL_RGB;
texel.type = _elementTypeToGLType[pixel.getType()];
switch(pixel.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
texel.internalFormat = GL_RGB;
break;
default:
qDebug() << "Unknown combination of texel format";
}
}
break;
case gpu::VEC4: {
texel.format = GL_RGBA;
texel.type = _elementTypeToGLType[pixel.getType()];
switch(pixel.getSemantic()) {
case gpu::RGB:
texel.internalFormat = GL_RGB;
break;
case gpu::RGBA:
texel.internalFormat = GL_RGBA;
break;
default:
qDebug() << "Unknown combination of texel format";
}
}
break;
default:
qDebug() << "Unknown combination of texel format";
}
return texel;
}
};
void GLBackend::syncGPUObject(const Texture& texture) {
GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(texture);
// If GPU object already created and in sync
bool needUpdate = false;
if (object && (object->_storageStamp == texture.getStamp())) {
// If gpu object info is in sync with sysmem version
if (object->_contentStamp >= texture.getDataStamp()) {
// Then all good, GPU object is ready to be used
return;
} else {
// Need to update the content of the GPU object from the source sysmem of the texture
needUpdate = true;
}
}
// need to have a gpu object?
if (!object) {
object = new GLTexture();
glGenTextures(1, &object->_texture);
CHECK_GL_ERROR();
Backend::setGPUObject(texture, object);
}
// GO through the process of allocating the correct storage and/or update the content
switch (texture.getType()) {
case Texture::TEX_2D: {
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(texture.getTexelFormat());
if (needUpdate) {
if (texture.isSysmemMipAvailable(0)) {
GLint boundTex = -1;
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
glBindTexture(GL_TEXTURE_2D, object->_texture);
glTexSubImage2D(GL_TEXTURE_2D, 0,
texelFormat.internalFormat, texture.getWidth(), texture.getHeight(), 0,
texelFormat.format, texelFormat.type, texture.readMip<Resource::Byte>(0));
if (texture.isAutogenerateMips()) {
glGenerateMipmap(GL_TEXTURE_2D);
}
glBindTexture(GL_TEXTURE_2D, boundTex);
object->_contentStamp = texture.getDataStamp();
}
} else {
const GLvoid* bytes = 0;
if (texture.isSysmemMipAvailable(0)) {
bytes = texture.readMip<Resource::Byte>(0);
object->_contentStamp = texture.getDataStamp();
}
GLint boundTex = -1;
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
glBindTexture(GL_TEXTURE_2D, object->_texture);
glTexImage2D(GL_TEXTURE_2D, 0,
texelFormat.internalFormat, texture.getWidth(), texture.getHeight(), 0,
texelFormat.format, texelFormat.type, bytes);
if (bytes && texture.isAutogenerateMips()) {
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
}
glBindTexture(GL_TEXTURE_2D, boundTex);
object->_storageStamp = texture.getStamp();
object->_size = texture.getSize();
}
}
break;
default:
qDebug() << "GLBackend::syncGPUObject(const Texture&) case for Texture Type " << texture.getType() << " not supported";
}
CHECK_GL_ERROR();
}
GLuint GLBackend::getTextureID(const Texture& texture) {
GLBackend::syncGPUObject(texture);
return Backend::getGPUObject<GLBackend::GLTexture>(texture)->_texture;
}

View file

@ -1,6 +1,6 @@
//
// Resource.cpp
// interface/src/gpu
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 10/8/2014.
// Copyright 2014 High Fidelity, Inc.

View file

@ -1,6 +1,6 @@
//
// Resource.h
// interface/src/gpu
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 10/8/2014.
// Copyright 2014 High Fidelity, Inc.
@ -79,18 +79,20 @@ protected:
// Access the byte array.
// The edit version allow to map data.
inline const Byte* readData() const { return _data; }
inline Byte* editData() { _stamp++; return _data; }
const Byte* readData() const { return _data; }
Byte* editData() { _stamp++; return _data; }
template< typename T > const T* read() const { return reinterpret_cast< T* > ( _data ); }
template< typename T > T* edit() { _stamp++; return reinterpret_cast< T* > ( _data ); }
// Access the current version of the sysmem, used to compare if copies are in sync
inline Stamp getStamp() const { return _stamp; }
Stamp getStamp() const { return _stamp; }
static Size allocateMemory(Byte** memAllocated, Size size);
static void deallocateMemory(Byte* memDeallocated, Size size);
bool isAvailable() const { return (_data != 0); }
private:
Stamp _stamp;
Size _size;
@ -136,9 +138,9 @@ public:
protected:
Sysmem* _sysmem;
Sysmem* _sysmem = NULL;
mutable GPUObject* _gpuObject;
mutable GPUObject* _gpuObject = NULL;
// This shouldn't be used by anything else than the Backend class with the proper casting.
void setGPUObject(GPUObject* gpuObject) const { _gpuObject = gpuObject; }
@ -363,64 +365,6 @@ public:
}
};
// TODO: For now TextureView works with Buffer as a place holder for the Texture.
// The overall logic should be about the same except that the Texture will be a real GL Texture under the hood
class TextureView {
public:
typedef Resource::Size Size;
typedef int Index;
BufferPointer _buffer;
Size _offset;
Size _size;
Element _element;
uint16 _stride;
TextureView() :
_buffer(NULL),
_offset(0),
_size(0),
_element(gpu::VEC3, gpu::UINT8, gpu::RGB),
_stride(1)
{};
TextureView(const Element& element) :
_buffer(NULL),
_offset(0),
_size(0),
_element(element),
_stride(uint16(element.getSize()))
{};
// create the BufferView and own the Buffer
TextureView(Buffer* newBuffer, const Element& element) :
_buffer(newBuffer),
_offset(0),
_size(newBuffer->getSize()),
_element(element),
_stride(uint16(element.getSize()))
{};
TextureView(const BufferPointer& buffer, const Element& element) :
_buffer(buffer),
_offset(0),
_size(buffer->getSize()),
_element(element),
_stride(uint16(element.getSize()))
{};
TextureView(const BufferPointer& buffer, Size offset, Size size, const Element& element) :
_buffer(buffer),
_offset(offset),
_size(size),
_element(element),
_stride(uint16(element.getSize()))
{};
~TextureView() {}
TextureView(const TextureView& view) = default;
TextureView& operator=(const TextureView& view) = default;
};
};
#endif

280
libraries/gpu/src/gpu/Texture.cpp Executable file
View file

@ -0,0 +1,280 @@
//
// Texture.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 1/17/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "Texture.h"
#include <QDebug>
using namespace gpu;
Texture::Pixels::Pixels(const Element& format, Size size, const Byte* bytes) :
_sysmem(size, bytes),
_format(format) {
}
Texture::Pixels::~Pixels() {
}
Texture* Texture::create1D(const Element& texelFormat, uint16 width) {
return create(TEX_1D, texelFormat, width, 1, 1, 1, 1);
}
Texture* Texture::create2D(const Element& texelFormat, uint16 width, uint16 height) {
return create(TEX_2D, texelFormat, width, height, 1, 1, 1);
}
Texture* Texture::create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth) {
return create(TEX_3D, texelFormat, width, height, depth, 1, 1);
}
Texture* Texture::createCube(const Element& texelFormat, uint16 width) {
return create(TEX_CUBE, texelFormat, width, width, 1, 1, 1);
}
Texture* Texture::create(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices)
{
Texture* tex = new Texture();
tex->_type = type;
tex->_texelFormat = texelFormat;
tex->_maxMip = 0;
tex->resize(type, width, height, depth, numSamples, numSlices);
return tex;
}
Texture::Texture():
Resource(),
_stamp(0),
_size(0),
_width(1),
_height(1),
_depth(1),
_numSamples(1),
_numSlices(1),
_maxMip(0),
_autoGenerateMips(false)
{
}
Texture::~Texture()
{
}
Texture::Size Texture::resize(Type type, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices) {
if (width && height && depth && numSamples && numSlices) {
bool changed = false;
if (_numSlices != numSlices) {
_numSlices = numSlices;
changed = true;
}
numSamples = evalNumSamplesUsed(numSamples);
if ((_type >= TEX_2D) && (_numSamples != numSamples)) {
_numSamples = numSamples;
changed = true;
}
if (_width != width) {
_width = width;
changed = true;
}
if ((_type >= TEX_2D) && (_height != height)) {
_height = height;
changed = true;
}
if ((_type >= TEX_3D) && (_depth != depth)) {
_depth = depth;
changed = true;
}
const int DIM_SIZE[] = {1, 1, 1, 6};
int size = DIM_SIZE[_type] *_width * _height * _depth * _numSamples * _texelFormat.getSize();
if (changed || (size != getSize())) {
_size = size;
_mips.clear();
_stamp++;
}
}
return _size;
}
Texture::Size Texture::resize1D(uint16 width, uint16 numSamples) {
return resize(TEX_1D, width, 1, 1, numSamples, 1);
}
Texture::Size Texture::resize2D(uint16 width, uint16 height, uint16 numSamples) {
return resize(TEX_2D, width, height, 1, numSamples, 1);
}
Texture::Size Texture::resize3D(uint16 width, uint16 height, uint16 depth, uint16 numSamples) {
return resize(TEX_3D, width, height, depth, numSamples, 1);
}
Texture::Size Texture::resizeCube(uint16 width, uint16 numSamples) {
return resize(TEX_CUBE, width, 1, 1, numSamples, 1);
}
// Reformat, unless auto mips mode would destroy all the sub mips
Texture::Size Texture::reformat(const Element& texelFormat) {
if (texelFormat != _texelFormat) {
_texelFormat = texelFormat;
const int DIM_SIZE[] = {1, 1, 1, 6};
int size = DIM_SIZE[_type] * _width * _height * _depth * _numSamples * _texelFormat.getSize();
if (size != getSize()) {
_size = size;
_mips.clear();
}
_stamp++;
}
return _size;
}
bool Texture::isColorRenderTarget() const {
return (_texelFormat.getSemantic() == gpu::RGBA);
}
bool Texture::isDepthStencilRenderTarget() const {
return (_texelFormat.getSemantic() == gpu::DEPTH) || (_texelFormat.getSemantic() == gpu::DEPTH_STENCIL);
}
uint16 Texture::evalDimNumMips(uint16 size) {
double largerDim = size;
double val = log(largerDim)/log(2.0);
return 1 + (uint16) val;
}
// The number mips that the texture could have if all existed
// = log2(max(width, height, depth))
uint16 Texture::evalNumMips() const {
double largerDim = std::max(std::max(_width, _height), _depth);
double val = log(largerDim)/log(2.0);
return 1 + (uint16) val;
}
uint16 Texture::maxMip() const {
return _maxMip;
}
void Texture::allocateStoredMip(uint16 level) {
if (level >= _mips.size()) {
_mips.resize(level+1, 0);
_maxMip = level;
_stamp++;
}
if (!_mips[level]) {
_mips[level] = PixelsPointer(new Pixels());
_stamp++;
}
}
bool Texture::assignStoredMip(uint16 level, const Element& format, Size size, const Byte* bytes) {
// Check that level accessed make sense
if (level != 0) {
if (_autoGenerateMips) {
return false;
}
if (level >= evalNumMips()) {
return false;
}
}
// THen check that the mem buffer passed make sense with its format
if (size == evalStoredMipSize(level, format)) {
// Ok we should be able to do that...
allocateStoredMip(level);
_mips[level]->_format = format;
_mips[level]->_sysmem.setData(size, bytes);
_stamp++;
return true;
}
return false;
}
uint16 Texture::autoGenerateMips(uint16 maxMip) {
_autoGenerateMips = true;
_maxMip = std::min((uint16) (evalNumMips() - 1), maxMip);
_stamp++;
return _maxMip;
}
uint16 Texture::getStoredMipWidth(uint16 level) const {
const Pixels* mip = accessStoredMip(level);
if (mip && mip->_sysmem.getSize()) {
return evalMipWidth(level);
} else {
return 0;
}
}
uint16 Texture::getStoredMipHeight(uint16 level) const {
const Pixels* mip = accessStoredMip(level);
if (mip && mip->_sysmem.getSize()) {
return evalMipHeight(level);
} else {
return 0;
}
}
uint16 Texture::getStoredMipDepth(uint16 level) const {
const Pixels* mip = accessStoredMip(level);
if (mip && mip->_sysmem.getSize()) {
return evalMipDepth(level);
} else {
return 0;
}
}
uint32 Texture::getStoredMipNumTexels(uint16 level) const {
const Pixels* mip = accessStoredMip(level);
if (mip && mip->_sysmem.getSize()) {
return evalMipWidth(level) * evalMipHeight(level) * evalMipDepth(level);
} else {
return 0;
}
}
uint32 Texture::getStoredMipSize(uint16 level) const {
const Pixels* mip = accessStoredMip(level);
if (mip && mip->_sysmem.getSize()) {
return evalMipWidth(level) * evalMipHeight(level) * evalMipDepth(level) * getTexelFormat().getSize();
} else {
return 0;
}
}
uint16 Texture::evalNumSamplesUsed(uint16 numSamplesTried)
{
uint16 sample = numSamplesTried;
if (numSamplesTried <= 1)
sample = 1;
else if (numSamplesTried < 4)
sample = 2;
else if (numSamplesTried < 8)
sample = 4;
else if (numSamplesTried < 16)
sample = 8;
else
sample = 8;
return sample;
}

278
libraries/gpu/src/gpu/Texture.h Executable file
View file

@ -0,0 +1,278 @@
//
// Texture.h
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 1/16/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_Texture_h
#define hifi_gpu_Texture_h
#include "Resource.h"
#include <memory>
namespace gpu {
class Texture : public Resource {
public:
class Pixels {
public:
Pixels() {}
Pixels(const Pixels& pixels) = default;
Pixels(const Element& format, Size size, const Byte* bytes);
~Pixels();
Sysmem _sysmem;
Element _format;
};
typedef std::shared_ptr<Pixels> PixelsPointer;
enum Type {
TEX_1D = 0,
TEX_2D,
TEX_3D,
TEX_CUBE,
};
static Texture* create1D(const Element& texelFormat, uint16 width);
static Texture* create2D(const Element& texelFormat, uint16 width, uint16 height);
static Texture* create3D(const Element& texelFormat, uint16 width, uint16 height, uint16 depth);
static Texture* createCube(const Element& texelFormat, uint16 width);
Texture(const Texture& buf); // deep copy of the sysmem texture
Texture& operator=(const Texture& buf); // deep copy of the sysmem texture
~Texture();
const Stamp getStamp() const { return _stamp; }
const Stamp getDataStamp(uint16 level = 0) const {
const Pixels* mip = accessStoredMip(level);
if (mip) {
return mip->_sysmem.getStamp();
}
return getStamp();
}
// The size in bytes of data stored in the texture
Size getSize() const { return _size; }
// Resize, unless auto mips mode would destroy all the sub mips
Size resize1D(uint16 width, uint16 numSamples);
Size resize2D(uint16 width, uint16 height, uint16 numSamples);
Size resize3D(uint16 width, uint16 height, uint16 depth, uint16 numSamples);
Size resizeCube(uint16 width, uint16 numSamples);
// Reformat, unless auto mips mode would destroy all the sub mips
Size reformat(const Element& texelFormat);
// Size and format
Type getType() const { return _type; }
bool isColorRenderTarget() const;
bool isDepthStencilRenderTarget() const;
const Element& getTexelFormat() const { return _texelFormat; }
bool hasBorder() const { return false; }
uint16 getWidth() const { return _width; }
uint16 getHeight() const { return _height; }
uint16 getDepth() const { return _depth; }
uint32 getRowPitch() const { return getWidth() * getTexelFormat().getSize(); }
uint32 getNumTexels() const { return _width * _height * _depth; }
uint16 getNumSlices() const { return _numSlices; }
uint16 getNumSamples() const { return _numSamples; }
//---------------------------------------------------------------------
// Sub Mips manipulation
// The number mips that a dimension could haves
// = 1 + log2(size)
static uint16 evalDimNumMips(uint16 size);
// The number mips that the texture could have if all existed
// = 1 + log2(max(width, height, depth))
uint16 evalNumMips() const;
// Eval the size that the mips level SHOULD have
// not the one stored in the Texture
uint16 evalMipWidth(uint16 level) const { return std::max(_width >> level, 1); }
uint16 evalMipHeight(uint16 level) const { return std::max(_height >> level, 1); }
uint16 evalMipDepth(uint16 level) const { return std::max(_depth >> level, 1); }
uint32 evalMipNumTexels(uint16 level) const { return evalMipWidth(level) * evalMipHeight(level) * evalMipDepth(level); }
uint32 evalMipSize(uint16 level) const { return evalMipNumTexels(level) * getTexelFormat().getSize(); }
uint32 evalStoredMipSize(uint16 level, const Element& format) const { return evalMipNumTexels(level) * format.getSize(); }
uint32 evalTotalSize() const {
uint32 size = 0;
uint16 minMipLevel = 0;
uint16 maxMipLevel = maxMip();
for (uint16 l = minMipLevel; l <= maxMipLevel; l++) {
size += evalMipSize(l);
}
return size * getNumSlices();
}
// max mip is in the range [ 1 if no sub mips, log2(max(width, height, depth))]
// if autoGenerateMip is on => will provide the maxMIp level specified
// else provide the deepest mip level provided through assignMip
uint16 maxMip() const;
// Generate the mips automatically
// But the sysmem version is not available
// Only works for the standard formats
// Specify the maximum Mip level available
// 0 is the default one
// 1 is the first level
// ...
// nbMips - 1 is the last mip level
//
// If -1 then all the mips are generated
//
// Return the totalnumber of mips that will be available
uint16 autoGenerateMips(uint16 maxMip);
bool isAutogenerateMips() const { return _autoGenerateMips; }
// Manually allocate the mips down until the specified maxMip
// this is just allocating the sysmem version of it
// in case autoGen is on, this doesn't allocate
// Explicitely assign mip data for a certain level
// If Bytes is NULL then simply allocate the space so mip sysmem can be accessed
bool assignStoredMip(uint16 level, const Element& format, Size size, const Byte* bytes);
template< typename T > T* editMip(uint16 level) {
Pixels* mip = accessStoredMip(level);
if (mip) {
return mip->sysmem.edit<T>();
}
return 0;
}
template< typename T > const T* readMip(uint16 level) const {
const Pixels* mip = accessStoredMip(level);
if (mip) {
return mip->sysmem.read<T>();
}
return 0;
}
bool isSysmemMipAvailable(uint16 level) const {
const Pixels* mip = accessStoredMip(level);
if (mip) {
return mip->_sysmem.isAvailable();
}
return false;
}
// access sizes for the stored mips
uint16 getStoredMipWidth(uint16 level) const;
uint16 getStoredMipHeight(uint16 level) const;
uint16 getStoredMipDepth(uint16 level) const;
uint32 getStoredMipNumTexels(uint16 level) const;
uint32 getStoredMipSize(uint16 level) const;
static uint16 evalNumSamplesUsed(uint16 numSamplesTried);
protected:
std::vector<PixelsPointer> _mips;
Stamp _stamp;
uint32 _size;
Element _texelFormat;
uint16 _width;
uint16 _height;
uint16 _depth;
uint16 _numSlices;
uint16 _numSamples;
uint16 _maxMip;
Type _type;
bool _autoGenerateMips;
static Texture* create(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices);
Texture();
Size resize(Type type, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices);
// Access the the sub mips
const Pixels* Texture::accessStoredMip(uint16 level) const {
if (level > _mips.size()) {
return 0;
} else {
return _mips[level].get();
}
}
// Access the the sub mips
Pixels* Texture::accessStoredMip(uint16 level) {
if (level > _mips.size()) {
return 0;
} else {
return _mips[level].get();
}
}
void allocateStoredMip(uint16 level);
mutable GPUObject* _gpuObject = NULL;
// This shouldn't be used by anything else than the Backend class with the proper casting.
void setGPUObject(GPUObject* gpuObject) const { _gpuObject = gpuObject; }
GPUObject* getGPUObject() const { return _gpuObject; }
friend class Backend;
};
typedef QSharedPointer<Texture> TexturePointer;
typedef std::vector< TexturePointer > Textures;
// TODO: For now TextureView works with Buffer as a place holder for the Texture.
// The overall logic should be about the same except that the Texture will be a real GL Texture under the hood
class TextureView {
public:
typedef Resource::Size Size;
TexturePointer _texture = TexturePointer(NULL);
uint16 _subresource = 0;
Element _element = Element(gpu::VEC4, gpu::UINT8, gpu::RGBA);
TextureView() {};
TextureView(const Element& element) :
_element(element)
{};
// create the TextureView and own the Texture
TextureView(Texture* newTexture, const Element& element) :
_texture(newTexture),
_subresource(0),
_element(element)
{};
TextureView(const TexturePointer& texture, const Element& element) :
_texture(texture),
_subresource(0),
_element(element)
{};
~TextureView() {}
TextureView(const TextureView& view) = default;
TextureView& operator=(const TextureView& view) = default;
};
};
#endif

View file

@ -17,6 +17,7 @@
#include <glm/glm.hpp>
#include "gpu/Resource.h"
#include "gpu/Texture.h"
namespace model {
typedef gpu::BufferView UniformBufferView;

View file

@ -2224,7 +2224,10 @@ void Model::pickPrograms(gpu::Batch& batch, RenderMode mode, bool translucent, f
}
GLBATCH(glUseProgram)(activeProgram->programId());
GLBATCH(glUniform1f)(activeLocations->alphaThreshold, alphaThreshold);
if ((activeLocations->alphaThreshold > -1) && (mode != SHADOW_RENDER_MODE)) {
GLBATCH(glUniform1f)(activeLocations->alphaThreshold, alphaThreshold);
}
}
int Model::renderMeshesForModelsInScene(gpu::Batch& batch, RenderMode mode, bool translucent, float alphaThreshold,
@ -2414,9 +2417,12 @@ int Model::renderMeshesFromList(QVector<int>& list, gpu::Batch& batch, RenderMod
}
static bool showDiffuse = true;
if (showDiffuse && diffuseMap) {
GLBATCH(glBindTexture)(GL_TEXTURE_2D, diffuseMap->getID());
// GLBATCH(glBindTexture)(GL_TEXTURE_2D, diffuseMap->getID());
batch.setUniformTexture(0, diffuseMap->getGPUTexture());
} else {
GLBATCH(glBindTexture)(GL_TEXTURE_2D, textureCache->getWhiteTextureID());
// GLBATCH(glBindTexture)(GL_TEXTURE_2D, textureCache->getWhiteTextureID());
batch.setUniformTexture(0, textureCache->getWhiteTexture());
}
if (locations->texcoordMatrices >= 0) {

View file

@ -29,6 +29,8 @@ TextureCache::TextureCache() :
_permutationNormalTextureID(0),
_whiteTextureID(0),
_blueTextureID(0),
_whiteTexture(0),
_blueTexture(0),
_primaryDepthTextureID(0),
_primaryNormalTextureID(0),
_primarySpecularTextureID(0),
@ -176,6 +178,14 @@ GLuint TextureCache::getWhiteTextureID() {
return _whiteTextureID;
}
const gpu::TexturePointer& TextureCache::getWhiteTexture() {
if (_whiteTexture.isNull()) {
_whiteTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
_whiteTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
}
return _whiteTexture;
}
GLuint TextureCache::getBlueTextureID() {
if (_blueTextureID == 0) {
glGenTextures(1, &_blueTextureID);
@ -186,6 +196,15 @@ GLuint TextureCache::getBlueTextureID() {
return _blueTextureID;
}
const gpu::TexturePointer& TextureCache::getBlueTexture() {
if (_blueTexture.isNull()) {
_blueTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
_blueTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
}
return _blueTexture;
}
/// Extra data for creating textures.
class TextureExtra {
public:
@ -547,6 +566,16 @@ void NetworkTexture::setImage(const QImage& image, bool translucent, const QColo
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
if (image.hasAlphaChannel()) {
_gpuTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), image.width(), image.height()));
_gpuTexture->assignStoredMip(0, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::BGRA), image.byteCount(), image.constBits());
_gpuTexture->autoGenerateMips(-1);
} else {
_gpuTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB), image.width(), image.height()));
_gpuTexture->assignStoredMip(0, gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB), image.byteCount(), image.constBits());
_gpuTexture->autoGenerateMips(-1);
}
}
void NetworkTexture::imageLoaded(const QImage& image) {
@ -586,6 +615,16 @@ QSharedPointer<Texture> DilatableNetworkTexture::getDilatedTexture(float dilatio
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
if (dilatedImage.hasAlphaChannel()) {
texture->_gpuTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), dilatedImage.width(), dilatedImage.height()));
texture->_gpuTexture->assignStoredMip(0, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::BGRA), dilatedImage.byteCount(), dilatedImage.constBits());
texture->_gpuTexture->autoGenerateMips(-1);
} else {
texture->_gpuTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB), dilatedImage.width(), dilatedImage.height()));
texture->_gpuTexture->assignStoredMip(0, gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB), dilatedImage.byteCount(), dilatedImage.constBits());
texture->_gpuTexture->autoGenerateMips(-1);
}
}
_dilatedTextures.insert(dilation, texture);

View file

@ -13,6 +13,7 @@
#define hifi_TextureCache_h
#include <gpu/GPUConfig.h>
#include <gpu/Texture.h>
#include <QImage>
#include <QMap>
@ -50,9 +51,16 @@ public:
/// Returns the ID of an opaque white texture (useful for a default).
GLuint getWhiteTextureID();
/// Returns an opaque white texture (useful for a default).
const gpu::TexturePointer& getWhiteTexture();
/// Returns the ID of a pale blue texture (useful for a normal map).
GLuint getBlueTextureID();
/// Returns the ID of a pale blue texture (useful for a normal map).
/// Returns an opaque white texture (useful for a default).
const gpu::TexturePointer& getBlueTexture();
/// Loads a texture from the specified URL.
NetworkTexturePointer getTexture(const QUrl& url, TextureType type = DEFAULT_TEXTURE, bool dilatable = false,
const QByteArray& content = QByteArray());
@ -104,6 +112,8 @@ private:
GLuint _permutationNormalTextureID;
GLuint _whiteTextureID;
GLuint _blueTextureID;
gpu::TexturePointer _whiteTexture;
gpu::TexturePointer _blueTexture;
QHash<QUrl, QWeakPointer<NetworkTexture> > _dilatableNetworkTextures;
@ -124,14 +134,19 @@ private:
/// A simple object wrapper for an OpenGL texture.
class Texture {
public:
friend class TextureCache;
friend class DilatableNetworkTexture;
Texture();
~Texture();
GLuint getID() const { return _id; }
const gpu::TexturePointer& getGPUTexture() const { return _gpuTexture; }
protected:
gpu::TexturePointer _gpuTexture;
private:
GLuint _id;
};