mirror of
https://github.com/Armored-Dragon/overte.git
synced 2025-03-11 16:13:16 +01:00
Merge pull request #7688 from jherico/texture_derez
Reducing GPU texture memory footprint
This commit is contained in:
commit
c410242fb2
22 changed files with 794 additions and 257 deletions
6
cmake/externals/glew/CMakeLists.txt
vendored
6
cmake/externals/glew/CMakeLists.txt
vendored
|
@ -7,9 +7,9 @@ endif ()
|
|||
include(ExternalProject)
|
||||
ExternalProject_Add(
|
||||
${EXTERNAL_NAME}
|
||||
URL http://hifi-public.s3.amazonaws.com/dependencies/glew_simple2.zip
|
||||
URL_MD5 f05d858e8203c32b689da208ad8b39db
|
||||
CONFIGURE_COMMAND CMAKE_ARGS ${ANDROID_CMAKE_ARGS} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
|
||||
URL http://hifi-public.s3.amazonaws.com/dependencies/glew_simple_1.13.0.zip
|
||||
URL_MD5 73f833649e904257b35bf4e84f8bdfb5
|
||||
CONFIGURE_COMMAND CMAKE_ARGS ${ANDROID_CMAKE_ARGS} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
|
||||
LOG_DOWNLOAD 1
|
||||
LOG_CONFIGURE 1
|
||||
LOG_BUILD 1
|
||||
|
|
|
@ -3,35 +3,18 @@ float aspect(vec2 v) {
|
|||
}
|
||||
|
||||
vec3 aspectCorrectedTexture() {
|
||||
vec2 uv = _position.xy;
|
||||
vec2 uv;
|
||||
|
||||
if (abs(_position.y) > 0.4999) {
|
||||
uv = _position.xz;
|
||||
} else if (abs(_position.z) > 0.4999) {
|
||||
uv = _position.xy;
|
||||
} else {
|
||||
uv = _position.yz;
|
||||
}
|
||||
uv += 0.5;
|
||||
uv.y = 1.0 - uv.y;
|
||||
|
||||
float targetAspect = iWorldScale.x / iWorldScale.y;
|
||||
float sourceAspect = aspect(iChannelResolution[0].xy);
|
||||
float aspectCorrection = sourceAspect / targetAspect;
|
||||
if (aspectCorrection > 1.0) {
|
||||
float offset = aspectCorrection - 1.0;
|
||||
float halfOffset = offset / 2.0;
|
||||
uv.y -= halfOffset;
|
||||
uv.y *= aspectCorrection;
|
||||
} else {
|
||||
float offset = 1.0 - aspectCorrection;
|
||||
float halfOffset = offset / 2.0;
|
||||
uv.x -= halfOffset;
|
||||
uv.x /= aspectCorrection;
|
||||
}
|
||||
|
||||
if (any(lessThan(uv, vec2(0.0)))) {
|
||||
return vec3(0.0);
|
||||
}
|
||||
|
||||
if (any(greaterThan(uv, vec2(1.0)))) {
|
||||
return vec3(0.0);
|
||||
}
|
||||
|
||||
vec4 color = texture(iChannel0, uv);
|
||||
return color.rgb * max(0.5, sourceAspect) * max(0.9, fract(iWorldPosition.x));
|
||||
return texture(iChannel0, uv).rgb;
|
||||
}
|
||||
|
||||
float getProceduralColors(inout vec3 diffuse, inout vec3 specular, inout float shininess) {
|
||||
|
|
|
@ -42,7 +42,7 @@ function createItems(count) {
|
|||
name: TEST_ENTITY_NAME,
|
||||
position: AUSTIN.avatarRelativePosition(AUSTIN.randomPositionXZ({ x: 0, y: 0, z: -2 }, RADIUS)),
|
||||
color: { r: 255, g: 255, b: 255 },
|
||||
dimensions: AUSTIN.randomDimensions(),
|
||||
dimensions: { x: 0.5, y: 0.5, z: 0.5 }, //AUSTIN.randomDimensions(),
|
||||
lifetime: ENTITY_LIFETIME,
|
||||
userData: JSON.stringify({
|
||||
ProceduralEntity: {
|
||||
|
|
|
@ -14,8 +14,8 @@ var qml = Script.resolvePath('stats.qml');
|
|||
var window = new OverlayWindow({
|
||||
title: 'Render Stats',
|
||||
source: qml,
|
||||
width: 300,
|
||||
height: 200
|
||||
width: 320,
|
||||
height: 720
|
||||
});
|
||||
window.setPosition(500, 50);
|
||||
window.closed.connect(function() { Script.stop(); });
|
|
@ -360,6 +360,41 @@ Menu::Menu() {
|
|||
resolutionGroup->addAction(addCheckableActionToQMenuAndActionHash(resolutionMenu, MenuOption::RenderResolutionThird, 0, false));
|
||||
resolutionGroup->addAction(addCheckableActionToQMenuAndActionHash(resolutionMenu, MenuOption::RenderResolutionQuarter, 0, false));
|
||||
|
||||
//const QString = "Automatic Texture Memory";
|
||||
//const QString = "64 MB";
|
||||
//const QString = "256 MB";
|
||||
//const QString = "512 MB";
|
||||
//const QString = "1024 MB";
|
||||
//const QString = "2048 MB";
|
||||
|
||||
// Developer > Render > Resolution
|
||||
MenuWrapper* textureMenu = renderOptionsMenu->addMenu(MenuOption::RenderMaxTextureMemory);
|
||||
QActionGroup* textureGroup = new QActionGroup(textureMenu);
|
||||
textureGroup->setExclusive(true);
|
||||
textureGroup->addAction(addCheckableActionToQMenuAndActionHash(textureMenu, MenuOption::RenderMaxTextureAutomatic, 0, true));
|
||||
textureGroup->addAction(addCheckableActionToQMenuAndActionHash(textureMenu, MenuOption::RenderMaxTexture64MB, 0, false));
|
||||
textureGroup->addAction(addCheckableActionToQMenuAndActionHash(textureMenu, MenuOption::RenderMaxTexture256MB, 0, false));
|
||||
textureGroup->addAction(addCheckableActionToQMenuAndActionHash(textureMenu, MenuOption::RenderMaxTexture512MB, 0, false));
|
||||
textureGroup->addAction(addCheckableActionToQMenuAndActionHash(textureMenu, MenuOption::RenderMaxTexture1024MB, 0, false));
|
||||
textureGroup->addAction(addCheckableActionToQMenuAndActionHash(textureMenu, MenuOption::RenderMaxTexture2048MB, 0, false));
|
||||
connect(textureGroup, &QActionGroup::triggered, [textureGroup] {
|
||||
auto checked = textureGroup->checkedAction();
|
||||
auto text = checked->text();
|
||||
gpu::Context::Size newMaxTextureMemory { 0 };
|
||||
if (MenuOption::RenderMaxTexture64MB == text) {
|
||||
newMaxTextureMemory = MB_TO_BYTES(64);
|
||||
} else if (MenuOption::RenderMaxTexture256MB == text) {
|
||||
newMaxTextureMemory = MB_TO_BYTES(256);
|
||||
} else if (MenuOption::RenderMaxTexture512MB == text) {
|
||||
newMaxTextureMemory = MB_TO_BYTES(512);
|
||||
} else if (MenuOption::RenderMaxTexture1024MB == text) {
|
||||
newMaxTextureMemory = MB_TO_BYTES(1024);
|
||||
} else if (MenuOption::RenderMaxTexture2048MB == text) {
|
||||
newMaxTextureMemory = MB_TO_BYTES(2048);
|
||||
}
|
||||
gpu::Texture::setAllowedGPUMemoryUsage(newMaxTextureMemory);
|
||||
});
|
||||
|
||||
// Developer > Render > LOD Tools
|
||||
addActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::LodTools, 0, dialogsManager.data(), SLOT(lodTools()));
|
||||
|
||||
|
|
|
@ -150,6 +150,13 @@ namespace MenuOption {
|
|||
const QString RenderFocusIndicator = "Show Eye Focus";
|
||||
const QString RenderLookAtTargets = "Show Look-at Targets";
|
||||
const QString RenderLookAtVectors = "Show Look-at Vectors";
|
||||
const QString RenderMaxTextureMemory = "Maximum Texture Memory";
|
||||
const QString RenderMaxTextureAutomatic = "Automatic Texture Memory";
|
||||
const QString RenderMaxTexture64MB = "64 MB";
|
||||
const QString RenderMaxTexture256MB = "256 MB";
|
||||
const QString RenderMaxTexture512MB = "512 MB";
|
||||
const QString RenderMaxTexture1024MB = "1024 MB";
|
||||
const QString RenderMaxTexture2048MB = "2048 MB";
|
||||
const QString RenderResolution = "Scale Resolution";
|
||||
const QString RenderResolutionOne = "1";
|
||||
const QString RenderResolutionTwoThird = "2/3";
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <assert.h>
|
||||
#include <memory>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include "Forward.h"
|
||||
|
||||
namespace gpu {
|
||||
|
||||
|
@ -37,28 +37,6 @@ private:
|
|||
friend class Backend;
|
||||
};
|
||||
|
||||
typedef int Stamp;
|
||||
|
||||
typedef unsigned int uint32;
|
||||
typedef int int32;
|
||||
typedef unsigned short uint16;
|
||||
typedef short int16;
|
||||
typedef unsigned char uint8;
|
||||
typedef char int8;
|
||||
|
||||
typedef unsigned char Byte;
|
||||
|
||||
typedef size_t Offset;
|
||||
|
||||
typedef glm::mat4 Mat4;
|
||||
typedef glm::mat3 Mat3;
|
||||
typedef glm::vec4 Vec4;
|
||||
typedef glm::ivec4 Vec4i;
|
||||
typedef glm::vec3 Vec3;
|
||||
typedef glm::vec2 Vec2;
|
||||
typedef glm::ivec2 Vec2i;
|
||||
typedef glm::uvec2 Vec2u;
|
||||
|
||||
// Description of a scalar type
|
||||
enum Type {
|
||||
|
||||
|
|
|
@ -9,69 +9,77 @@
|
|||
#ifndef hifi_gpu_Forward_h
|
||||
#define hifi_gpu_Forward_h
|
||||
|
||||
#include <stdint.h>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
|
||||
namespace gpu {
|
||||
class Batch;
|
||||
class Backend;
|
||||
class Context;
|
||||
typedef std::shared_ptr<Context> ContextPointer;
|
||||
using ContextPointer = std::shared_ptr<Context>;
|
||||
class GPUObject;
|
||||
|
||||
typedef int Stamp;
|
||||
typedef uint32_t uint32;
|
||||
typedef int32_t int32;
|
||||
typedef uint16_t uint16;
|
||||
typedef int16_t int16;
|
||||
typedef uint8_t uint8;
|
||||
typedef int8_t int8;
|
||||
using Stamp = int;
|
||||
using uint32 = uint32_t;
|
||||
using int32 = int32_t;
|
||||
using uint16 = uint16_t;
|
||||
using int16 = int16_t;
|
||||
using uint8 = uint8_t;
|
||||
using int8 = int8_t;
|
||||
|
||||
typedef uint8 Byte;
|
||||
typedef uint32 Offset;
|
||||
typedef std::vector<Offset> Offsets;
|
||||
using Byte = uint8;
|
||||
using Offset = size_t;
|
||||
using Offsets = std::vector<Offset>;
|
||||
|
||||
typedef glm::mat4 Mat4;
|
||||
typedef glm::mat3 Mat3;
|
||||
typedef glm::vec4 Vec4;
|
||||
typedef glm::ivec4 Vec4i;
|
||||
typedef glm::vec3 Vec3;
|
||||
typedef glm::vec2 Vec2;
|
||||
typedef glm::ivec2 Vec2i;
|
||||
typedef glm::uvec2 Vec2u;
|
||||
using Mat4 = glm::mat4;
|
||||
using Mat3 = glm::mat3;
|
||||
using Vec4 = glm::vec4;
|
||||
using Vec4i = glm::ivec4;
|
||||
using Vec3 = glm::vec3;
|
||||
using Vec2 = glm::vec2;
|
||||
using Vec2i = glm::ivec2;
|
||||
using Vec2u = glm::uvec2;
|
||||
using Vec3u = glm::uvec3;
|
||||
using Vec3u = glm::uvec3;
|
||||
|
||||
class Element;
|
||||
typedef Element Format;
|
||||
using Format = Element;
|
||||
class Swapchain;
|
||||
typedef std::shared_ptr<Swapchain> SwapchainPointer;
|
||||
using SwapchainPointer = std::shared_ptr<Swapchain>;
|
||||
class Framebuffer;
|
||||
typedef std::shared_ptr<Framebuffer> FramebufferPointer;
|
||||
using FramebufferPointer = std::shared_ptr<Framebuffer>;
|
||||
class Pipeline;
|
||||
typedef std::shared_ptr<Pipeline> PipelinePointer;
|
||||
typedef std::vector<PipelinePointer> Pipelines;
|
||||
using PipelinePointer = std::shared_ptr<Pipeline>;
|
||||
using Pipelines = std::vector<PipelinePointer>;
|
||||
class Query;
|
||||
typedef std::shared_ptr<Query> QueryPointer;
|
||||
typedef std::vector<QueryPointer> Queries;
|
||||
using QueryPointer = std::shared_ptr<Query>;
|
||||
using Queries = std::vector<QueryPointer>;
|
||||
class Resource;
|
||||
class Buffer;
|
||||
typedef std::shared_ptr<Buffer> BufferPointer;
|
||||
typedef std::vector<BufferPointer> Buffers;
|
||||
using BufferPointer = std::shared_ptr<Buffer>;
|
||||
using Buffers = std::vector<BufferPointer>;
|
||||
class BufferView;
|
||||
class Shader;
|
||||
typedef Shader::Pointer ShaderPointer;
|
||||
typedef std::vector<ShaderPointer> Shaders;
|
||||
using ShaderPointer = std::shared_ptr<Shader>;
|
||||
using Shaders = std::vector<ShaderPointer>;
|
||||
class State;
|
||||
typedef std::shared_ptr<State> StatePointer;
|
||||
typedef std::vector<StatePointer> States;
|
||||
using StatePointer = std::shared_ptr<State>;
|
||||
using States = std::vector<StatePointer>;
|
||||
class Stream;
|
||||
class BufferStream;
|
||||
typedef std::shared_ptr<BufferStream> BufferStreamPointer;
|
||||
using BufferStreamPointer = std::shared_ptr<BufferStream>;
|
||||
class Texture;
|
||||
class SphericalHarmonics;
|
||||
typedef std::shared_ptr<SphericalHarmonics> SHPointer;
|
||||
using SHPointer = std::shared_ptr<SphericalHarmonics>;
|
||||
class Sampler;
|
||||
class Texture;
|
||||
typedef std::shared_ptr<Texture> TexturePointer;
|
||||
typedef std::vector<TexturePointer> Textures;
|
||||
using TexturePointer = std::shared_ptr<Texture>;
|
||||
using Textures = std::vector<TexturePointer>;
|
||||
class TextureView;
|
||||
typedef std::vector<TextureView> TextureViews;
|
||||
using TextureViews = std::vector<TextureView>;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include <queue>
|
||||
#include <list>
|
||||
#include <glm/gtc/type_ptr.hpp>
|
||||
#include <GPUIdent.h>
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
#if defined(NSIGHT_FOUND)
|
||||
#include "nvToolsExt.h"
|
||||
|
@ -86,10 +88,18 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
|||
void GLBackend::init() {
|
||||
static std::once_flag once;
|
||||
std::call_once(once, [] {
|
||||
QString vendor{ (const char*)glGetString(GL_VENDOR) };
|
||||
QString renderer{ (const char*)glGetString(GL_RENDERER) };
|
||||
qCDebug(gpulogging) << "GL Version: " << QString((const char*) glGetString(GL_VERSION));
|
||||
qCDebug(gpulogging) << "GL Shader Language Version: " << QString((const char*) glGetString(GL_SHADING_LANGUAGE_VERSION));
|
||||
qCDebug(gpulogging) << "GL Vendor: " << QString((const char*) glGetString(GL_VENDOR));
|
||||
qCDebug(gpulogging) << "GL Renderer: " << QString((const char*) glGetString(GL_RENDERER));
|
||||
qCDebug(gpulogging) << "GL Vendor: " << vendor;
|
||||
qCDebug(gpulogging) << "GL Renderer: " << renderer;
|
||||
GPUIdent* gpu = GPUIdent::getInstance(vendor, renderer);
|
||||
// From here on, GPUIdent::getInstance()->getMumble() should efficiently give the same answers.
|
||||
qCDebug(gpulogging) << "GPU:";
|
||||
qCDebug(gpulogging) << "\tcard:" << gpu->getName();
|
||||
qCDebug(gpulogging) << "\tdriver:" << gpu->getDriver();
|
||||
qCDebug(gpulogging) << "\tdedicated memory:" << gpu->getMemory() << "MB";
|
||||
|
||||
glewExperimental = true;
|
||||
GLenum err = glewInit();
|
||||
|
@ -117,6 +127,50 @@ void GLBackend::init() {
|
|||
});
|
||||
}
|
||||
|
||||
Context::Size GLBackend::getDedicatedMemory() {
|
||||
static Context::Size dedicatedMemory { 0 };
|
||||
static std::once_flag once;
|
||||
std::call_once(once, [&] {
|
||||
#ifdef Q_OS_WIN
|
||||
if (!dedicatedMemory && wglGetGPUIDsAMD && wglGetGPUInfoAMD) {
|
||||
UINT maxCount = wglGetGPUIDsAMD(0, 0);
|
||||
std::vector<UINT> ids;
|
||||
ids.resize(maxCount);
|
||||
wglGetGPUIDsAMD(maxCount, &ids[0]);
|
||||
GLuint memTotal;
|
||||
wglGetGPUInfoAMD(ids[0], WGL_GPU_RAM_AMD, GL_UNSIGNED_INT, sizeof(GLuint), &memTotal);
|
||||
dedicatedMemory = MB_TO_BYTES(memTotal);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!dedicatedMemory) {
|
||||
GLint atiGpuMemory[4];
|
||||
// not really total memory, but close enough if called early enough in the application lifecycle
|
||||
glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, atiGpuMemory);
|
||||
if (GL_NO_ERROR == glGetError()) {
|
||||
dedicatedMemory = KB_TO_BYTES(atiGpuMemory[0]);
|
||||
}
|
||||
}
|
||||
|
||||
if (!dedicatedMemory) {
|
||||
GLint nvGpuMemory { 0 };
|
||||
glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &nvGpuMemory);
|
||||
if (GL_NO_ERROR == glGetError()) {
|
||||
dedicatedMemory = KB_TO_BYTES(nvGpuMemory);
|
||||
}
|
||||
}
|
||||
|
||||
if (!dedicatedMemory) {
|
||||
auto gpuIdent = GPUIdent::getInstance();
|
||||
if (gpuIdent && gpuIdent->isValid()) {
|
||||
dedicatedMemory = MB_TO_BYTES(gpuIdent->getMemory());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return dedicatedMemory;
|
||||
}
|
||||
|
||||
Backend* GLBackend::createBackend() {
|
||||
return new GLBackend();
|
||||
}
|
||||
|
|
|
@ -37,6 +37,8 @@ class GLBackend : public Backend {
|
|||
explicit GLBackend(bool syncCache);
|
||||
GLBackend();
|
||||
public:
|
||||
static Context::Size getDedicatedMemory();
|
||||
|
||||
virtual ~GLBackend();
|
||||
|
||||
virtual void render(Batch& batch);
|
||||
|
@ -82,11 +84,35 @@ public:
|
|||
const Stamp _storageStamp;
|
||||
Stamp _contentStamp { 0 };
|
||||
const GLenum _target;
|
||||
const uint16 _maxMip;
|
||||
const uint16 _minMip;
|
||||
const bool _transferrable;
|
||||
|
||||
GLTexture(const gpu::Texture& gpuTexture);
|
||||
struct DownsampleSource {
|
||||
using Pointer = std::shared_ptr<DownsampleSource>;
|
||||
DownsampleSource(GLTexture& oldTexture);
|
||||
~DownsampleSource();
|
||||
const GLuint _texture;
|
||||
const uint16 _minMip;
|
||||
const uint16 _maxMip;
|
||||
};
|
||||
|
||||
DownsampleSource::Pointer _downsampleSource;
|
||||
|
||||
GLTexture(bool transferrable, const gpu::Texture& gpuTexture);
|
||||
GLTexture(GLTexture& originalTexture, const gpu::Texture& gpuTexture);
|
||||
~GLTexture();
|
||||
|
||||
// Return a floating point value indicating how much of the allowed
|
||||
// texture memory we are currently consuming. A value of 0 indicates
|
||||
// no texture memory usage, while a value of 1 indicates all available / allowed memory
|
||||
// is consumed. A value above 1 indicates that there is a problem.
|
||||
static float getMemoryPressure();
|
||||
|
||||
void withPreservedTexture(std::function<void()> f);
|
||||
|
||||
void createTexture();
|
||||
void allocateStorage();
|
||||
|
||||
GLuint size() const { return _size; }
|
||||
GLuint virtualSize() const { return _virtualSize; }
|
||||
|
@ -118,26 +144,34 @@ public:
|
|||
// Is the texture in a state where it can be rendered with no work?
|
||||
bool isReady() const;
|
||||
|
||||
// Is this texture pushing us over the memory limit?
|
||||
bool isOverMaxMemory() const;
|
||||
|
||||
// Move the image bits from the CPU to the GPU
|
||||
void transfer() const;
|
||||
|
||||
// Execute any post-move operations that must occur only on the main thread
|
||||
void postTransfer();
|
||||
|
||||
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
|
||||
|
||||
static const size_t CUBE_NUM_FACES = 6;
|
||||
static const GLenum CUBE_FACE_LAYOUT[6];
|
||||
|
||||
private:
|
||||
friend class GLTextureTransferHelper;
|
||||
|
||||
GLTexture(bool transferrable, const gpu::Texture& gpuTexture, bool init);
|
||||
// at creation the true texture is created in GL
|
||||
// it becomes public only when ready.
|
||||
GLuint _privateTexture{ 0 };
|
||||
|
||||
void setSize(GLuint size);
|
||||
void setVirtualSize(GLuint size);
|
||||
const std::vector<GLenum>& getFaceTargets() const;
|
||||
|
||||
GLuint _size; // true size as reported by the gl api
|
||||
GLuint _virtualSize; // theorical size as expected
|
||||
GLuint _numLevels{ 0 };
|
||||
void setSize(GLuint size);
|
||||
|
||||
const GLuint _virtualSize; // theorical size as expected
|
||||
GLuint _size { 0 }; // true size as reported by the gl api
|
||||
|
||||
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
|
||||
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
//
|
||||
#include "GPULogging.h"
|
||||
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
#include <QtCore/QThread>
|
||||
|
||||
#include "GLBackendShared.h"
|
||||
|
@ -35,44 +37,166 @@ GLenum gpuToGLTextureType(const Texture& texture) {
|
|||
}
|
||||
|
||||
GLuint allocateSingleTexture() {
|
||||
Backend::incrementTextureGPUCount();
|
||||
GLuint result;
|
||||
glGenTextures(1, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// FIXME placeholder for texture memory over-use
|
||||
#define DEFAULT_MAX_MEMORY_MB 256
|
||||
|
||||
float GLBackend::GLTexture::getMemoryPressure() {
|
||||
// Check for an explicit memory limit
|
||||
auto availableTextureMemory = Texture::getAllowedGPUMemoryUsage();
|
||||
|
||||
// If no memory limit has been set, use a percentage of the total dedicated memory
|
||||
if (!availableTextureMemory) {
|
||||
auto totalGpuMemory = GLBackend::getDedicatedMemory();
|
||||
|
||||
// If no limit has been explicitly set, and the dedicated memory can't be determined,
|
||||
// just use a fallback fixed value of 256 MB
|
||||
if (!totalGpuMemory) {
|
||||
totalGpuMemory = MB_TO_BYTES(DEFAULT_MAX_MEMORY_MB);
|
||||
}
|
||||
|
||||
// Allow 75% of all available GPU memory to be consumed by textures
|
||||
// FIXME overly conservative?
|
||||
availableTextureMemory = (totalGpuMemory >> 2) * 3;
|
||||
}
|
||||
|
||||
// Return the consumed texture memory divided by the available texture memory.
|
||||
auto consumedGpuMemory = Context::getTextureGPUMemoryUsage();
|
||||
return (float)consumedGpuMemory / (float)availableTextureMemory;
|
||||
}
|
||||
|
||||
GLBackend::GLTexture::DownsampleSource::DownsampleSource(GLTexture& oldTexture) :
|
||||
_texture(oldTexture._privateTexture),
|
||||
_minMip(oldTexture._minMip),
|
||||
_maxMip(oldTexture._maxMip)
|
||||
{
|
||||
// Take ownership of the GL texture
|
||||
oldTexture._texture = oldTexture._privateTexture = 0;
|
||||
}
|
||||
|
||||
GLBackend::GLTexture::DownsampleSource::~DownsampleSource() {
|
||||
if (_texture) {
|
||||
Backend::decrementTextureGPUCount();
|
||||
glDeleteTextures(1, &_texture);
|
||||
}
|
||||
}
|
||||
|
||||
const GLenum GLBackend::GLTexture::CUBE_FACE_LAYOUT[6] = {
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
|
||||
};
|
||||
|
||||
// Create the texture and allocate storage
|
||||
GLBackend::GLTexture::GLTexture(const Texture& texture) :
|
||||
static std::map<uint16, size_t> _textureCountByMips;
|
||||
static uint16 _currentMaxMipCount { 0 };
|
||||
|
||||
GLBackend::GLTexture::GLTexture(bool transferrable, const Texture& texture, bool init) :
|
||||
_storageStamp(texture.getStamp()),
|
||||
_target(gpuToGLTextureType(texture)),
|
||||
_size(0),
|
||||
_virtualSize(0),
|
||||
_numLevels(texture.maxMip() + 1),
|
||||
_maxMip(texture.maxMip()),
|
||||
_minMip(texture.minMip()),
|
||||
_transferrable(transferrable),
|
||||
_virtualSize(texture.evalTotalSize()),
|
||||
_size(_virtualSize),
|
||||
_gpuTexture(texture)
|
||||
{
|
||||
Backend::incrementTextureGPUCount();
|
||||
Backend::setGPUObject(texture, this);
|
||||
{
|
||||
Q_UNUSED(init);
|
||||
|
||||
|
||||
// updateSize();
|
||||
GLuint virtualSize = _gpuTexture.evalTotalSize();
|
||||
setVirtualSize(virtualSize);
|
||||
setSize(virtualSize);
|
||||
if (_transferrable) {
|
||||
uint16 mipCount = usedMipLevels();
|
||||
_currentMaxMipCount = std::max(_currentMaxMipCount, mipCount);
|
||||
if (!_textureCountByMips.count(mipCount)) {
|
||||
_textureCountByMips[mipCount] = 1;
|
||||
} else {
|
||||
++_textureCountByMips[mipCount];
|
||||
}
|
||||
} else {
|
||||
withPreservedTexture([&] {
|
||||
createTexture();
|
||||
});
|
||||
_contentStamp = _gpuTexture.getDataStamp();
|
||||
postTransfer();
|
||||
}
|
||||
|
||||
Backend::updateTextureGPUMemoryUsage(0, _size);
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::createTexture() {
|
||||
_privateTexture = allocateSingleTexture();
|
||||
// Create the texture and allocate storage
|
||||
GLBackend::GLTexture::GLTexture(bool transferrable, const Texture& texture) :
|
||||
GLTexture(transferrable, texture, true)
|
||||
{
|
||||
Backend::setGPUObject(texture, this);
|
||||
}
|
||||
|
||||
GLsizei width = _gpuTexture.getWidth();
|
||||
GLsizei height = _gpuTexture.getHeight();
|
||||
// Create the texture and copy from the original higher resolution version
|
||||
GLBackend::GLTexture::GLTexture(GLTexture& originalTexture, const gpu::Texture& texture) :
|
||||
GLTexture(originalTexture._transferrable, texture, true)
|
||||
{
|
||||
if (!originalTexture._texture) {
|
||||
qFatal("Invalid original texture");
|
||||
}
|
||||
Q_ASSERT(_minMip >= originalTexture._minMip);
|
||||
// Our downsampler takes ownership of the texture
|
||||
_downsampleSource = std::make_shared<DownsampleSource>(originalTexture);
|
||||
_texture = _downsampleSource->_texture;
|
||||
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuTexture.getTexelFormat());
|
||||
// Set the GPU object last because that implicitly destroys the originalTexture object
|
||||
Backend::setGPUObject(texture, this);
|
||||
}
|
||||
|
||||
GLBackend::GLTexture::~GLTexture() {
|
||||
if (_privateTexture != 0) {
|
||||
Backend::decrementTextureGPUCount();
|
||||
glDeleteTextures(1, &_privateTexture);
|
||||
}
|
||||
|
||||
if (_transferrable) {
|
||||
uint16 mipCount = usedMipLevels();
|
||||
Q_ASSERT(_textureCountByMips.count(mipCount));
|
||||
auto& numTexturesForMipCount = _textureCountByMips[mipCount];
|
||||
--numTexturesForMipCount;
|
||||
if (0 == numTexturesForMipCount) {
|
||||
_textureCountByMips.erase(mipCount);
|
||||
if (mipCount == _currentMaxMipCount) {
|
||||
_currentMaxMipCount = _textureCountByMips.rbegin()->first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Backend::updateTextureGPUMemoryUsage(_size, 0);
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
|
||||
}
|
||||
|
||||
const std::vector<GLenum>& GLBackend::GLTexture::getFaceTargets() const {
|
||||
static std::vector<GLenum> cubeFaceTargets {
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
|
||||
};
|
||||
static std::vector<GLenum> faceTargets {
|
||||
GL_TEXTURE_2D
|
||||
};
|
||||
switch (_target) {
|
||||
case GL_TEXTURE_2D:
|
||||
return faceTargets;
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
return cubeFaceTargets;
|
||||
default:
|
||||
Q_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
Q_UNREACHABLE();
|
||||
return faceTargets;
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::withPreservedTexture(std::function<void()> f) {
|
||||
GLint boundTex = -1;
|
||||
switch (_target) {
|
||||
case GL_TEXTURE_2D:
|
||||
|
@ -88,47 +212,46 @@ void GLBackend::GLTexture::createTexture() {
|
|||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
glBindTexture(_target, _privateTexture);
|
||||
|
||||
(void)CHECK_GL_ERROR();
|
||||
// Fixme: this usage of TexStorage doesn;t work wtih compressed texture, altuogh it should.
|
||||
// GO through the process of allocating the correct storage
|
||||
if (GLEW_VERSION_4_2 && !_gpuTexture.getTexelFormat().isCompressed()) {
|
||||
glTexStorage2D(_target, _numLevels, texelFormat.internalFormat, width, height);
|
||||
(void)CHECK_GL_ERROR();
|
||||
} else {
|
||||
glTexParameteri(_target, GL_TEXTURE_BASE_LEVEL, 0);
|
||||
glTexParameteri(_target, GL_TEXTURE_MAX_LEVEL, _numLevels - 1);
|
||||
for (uint16_t l = 0; l < _numLevels; l++) {
|
||||
if (_gpuTexture.getType() == gpu::Texture::TEX_CUBE) {
|
||||
for (size_t face = 0; face < CUBE_NUM_FACES; face++) {
|
||||
glTexImage2D(CUBE_FACE_LAYOUT[face], l, texelFormat.internalFormat, width, height, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
}
|
||||
} else {
|
||||
glTexImage2D(_target, l, texelFormat.internalFormat, width, height, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
}
|
||||
width = std::max(1, (width / 2));
|
||||
height = std::max(1, (height / 2));
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
syncSampler(_gpuTexture.getSampler(), _gpuTexture.getType(), this);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
f();
|
||||
|
||||
glBindTexture(_target, boundTex);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
GLBackend::GLTexture::~GLTexture() {
|
||||
if (_privateTexture != 0) {
|
||||
glDeleteTextures(1, &_privateTexture);
|
||||
void GLBackend::GLTexture::createTexture() {
|
||||
_privateTexture = allocateSingleTexture();
|
||||
|
||||
glBindTexture(_target, _privateTexture);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
allocateStorage();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
syncSampler(_gpuTexture.getSampler(), _gpuTexture.getType(), this);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::allocateStorage() {
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuTexture.getTexelFormat());
|
||||
glTexParameteri(_target, GL_TEXTURE_BASE_LEVEL, 0);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glTexParameteri(_target, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
if (GLEW_VERSION_4_2 && !_gpuTexture.getTexelFormat().isCompressed()) {
|
||||
// Get the dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuTexture.evalMipDimensions(_minMip);
|
||||
glTexStorage2D(_target, usedMipLevels(), texelFormat.internalFormat, dimensions.x, dimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
} else {
|
||||
for (uint16_t l = _minMip; l < _maxMip; l++) {
|
||||
// Get the mip level dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuTexture.evalMipDimensions(l);
|
||||
for (GLenum target : getFaceTargets()) {
|
||||
glTexImage2D(target, l - _minMip, texelFormat.internalFormat, dimensions.x, dimensions.y, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Backend::updateTextureGPUMemoryUsage(_size, 0);
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
|
||||
Backend::decrementTextureGPUCount();
|
||||
}
|
||||
|
||||
|
||||
|
@ -137,16 +260,10 @@ void GLBackend::GLTexture::setSize(GLuint size) {
|
|||
_size = size;
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::setVirtualSize(GLuint size) {
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, size);
|
||||
_virtualSize = size;
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::updateSize() {
|
||||
GLuint virtualSize = _gpuTexture.evalTotalSize();
|
||||
setVirtualSize(virtualSize);
|
||||
setSize(_virtualSize);
|
||||
if (!_texture) {
|
||||
setSize(virtualSize);
|
||||
return;
|
||||
}
|
||||
|
||||
if (_gpuTexture.getTexelFormat().isCompressed()) {
|
||||
|
@ -161,7 +278,7 @@ void GLBackend::GLTexture::updateSize() {
|
|||
(void)CHECK_GL_ERROR();
|
||||
|
||||
if (gpuSize) {
|
||||
for (GLuint level = 0; level < _numLevels; level++) {
|
||||
for (GLuint level = _minMip; level < _maxMip; level++) {
|
||||
GLint levelSize{ 0 };
|
||||
glGetTexLevelParameteriv(proxyType, level, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &levelSize);
|
||||
levelSize *= numFaces;
|
||||
|
@ -172,24 +289,32 @@ void GLBackend::GLTexture::updateSize() {
|
|||
gpuSize += levelSize;
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
setSize(gpuSize);
|
||||
} else {
|
||||
setSize(virtualSize);
|
||||
}
|
||||
|
||||
} else {
|
||||
setSize(virtualSize);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool GLBackend::GLTexture::isInvalid() const {
|
||||
return _storageStamp < _gpuTexture.getStamp();
|
||||
}
|
||||
|
||||
bool GLBackend::GLTexture::isOutdated() const {
|
||||
return _contentStamp < _gpuTexture.getDataStamp();
|
||||
return GLTexture::Idle == _syncState && _contentStamp < _gpuTexture.getDataStamp();
|
||||
}
|
||||
|
||||
bool GLBackend::GLTexture::isOverMaxMemory() const {
|
||||
// FIXME switch to using the max mip count used from the previous frame
|
||||
if (usedMipLevels() < _currentMaxMipCount) {
|
||||
return false;
|
||||
}
|
||||
Q_ASSERT(usedMipLevels() == _currentMaxMipCount);
|
||||
|
||||
if (getMemoryPressure() < 1.0f) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GLBackend::GLTexture::isReady() const {
|
||||
|
@ -203,23 +328,28 @@ bool GLBackend::GLTexture::isReady() const {
|
|||
auto syncState = _syncState.load();
|
||||
|
||||
if (isOutdated()) {
|
||||
return Pending == syncState;
|
||||
return Idle != syncState;
|
||||
}
|
||||
|
||||
return Idle == syncState;
|
||||
if (Idle != syncState) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Move content bits from the CPU to the GPU for a given mip / face
|
||||
void GLBackend::GLTexture::transferMip(uint16_t mipLevel, uint8_t face) const {
|
||||
auto mip = _gpuTexture.accessStoredMipFace(mipLevel, face);
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuTexture.getTexelFormat(), mip->getFormat());
|
||||
//GLenum target = getFaceTargets()[face];
|
||||
GLenum target = _target == GL_TEXTURE_2D ? GL_TEXTURE_2D : CUBE_FACE_LAYOUT[face];
|
||||
uvec2 size = uvec2(_gpuTexture.getWidth(), _gpuTexture.getHeight());
|
||||
size >>= mipLevel;
|
||||
auto size = _gpuTexture.evalMipDimensions(mipLevel);
|
||||
glTexSubImage2D(target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
// This should never happen on the main thread
|
||||
// Move content bits from the CPU to the GPU
|
||||
void GLBackend::GLTexture::transfer() const {
|
||||
PROFILE_RANGE(__FUNCTION__);
|
||||
|
@ -229,15 +359,39 @@ void GLBackend::GLTexture::transfer() const {
|
|||
return;
|
||||
}
|
||||
|
||||
//_secretTexture
|
||||
glBindTexture(_target, _privateTexture);
|
||||
// glBindTexture(_target, _texture);
|
||||
// GO through the process of allocating the correct storage and/or update the content
|
||||
switch (_gpuTexture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i)) {
|
||||
transferMip(i);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
if (_downsampleSource) {
|
||||
GLuint fbo { 0 };
|
||||
glGenFramebuffers(1, &fbo);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
|
||||
(void)CHECK_GL_ERROR();
|
||||
// Find the distance between the old min mip and the new one
|
||||
uint16 mipOffset = _minMip - _downsampleSource->_minMip;
|
||||
for (uint16 i = _minMip; i <= _maxMip; ++i) {
|
||||
uint16 targetMip = i - _minMip;
|
||||
uint16 sourceMip = targetMip + mipOffset;
|
||||
Vec3u dimensions = _gpuTexture.evalMipDimensions(i);
|
||||
for (GLenum target : getFaceTargets()) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, _downsampleSource->_texture, sourceMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glCopyTexSubImage2D(target, targetMip, 0, 0, 0, 0, dimensions.x, dimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
|
||||
glDeleteFramebuffers(1, &fbo);
|
||||
} else {
|
||||
// GO through the process of allocating the correct storage and/or update the content
|
||||
switch (_gpuTexture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
{
|
||||
for (uint16_t i = _minMip; i <= _maxMip; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i)) {
|
||||
transferMip(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -256,8 +410,8 @@ void GLBackend::GLTexture::transfer() const {
|
|||
default:
|
||||
qCWarning(gpulogging) << __FUNCTION__ << " case for Texture Type " << _gpuTexture.getType() << " not supported";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (_gpuTexture.isAutogenerateMips()) {
|
||||
glGenerateMipmap(_target);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
@ -271,6 +425,8 @@ void GLBackend::GLTexture::postTransfer() {
|
|||
// The public gltexture becaomes available
|
||||
_texture = _privateTexture;
|
||||
|
||||
_downsampleSource.reset();
|
||||
|
||||
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
|
||||
switch (_gpuTexture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
|
@ -307,39 +463,38 @@ GLBackend::GLTexture* GLBackend::syncGPUObject(const TexturePointer& texturePoin
|
|||
|
||||
// If the object hasn't been created, or the object definition is out of date, drop and re-create
|
||||
GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(texture);
|
||||
if (object && object->isReady()) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// Object isn't ready, check what we need to do...
|
||||
|
||||
// Create the texture if need be (force re-creation if the storage stamp changes
|
||||
// for easier use of immutable storage)
|
||||
if (!object || object->isInvalid()) {
|
||||
// This automatically destroys the old texture
|
||||
object = new GLTexture(texture);
|
||||
// This automatically any previous texture
|
||||
object = new GLTexture(needTransfer, texture);
|
||||
}
|
||||
|
||||
// Object maybe doens't neet to be tranasferred after creation
|
||||
if (!needTransfer) {
|
||||
object->createTexture();
|
||||
object->_contentStamp = texturePointer->getDataStamp();
|
||||
if (!object->_transferrable) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// If we just did a transfer, return the object after doing post-transfer work
|
||||
if (GLTexture::Transferred == object->getSyncState()) {
|
||||
object->postTransfer();
|
||||
return object;
|
||||
}
|
||||
|
||||
// Object might be outdated, if so, start the transfer
|
||||
// (outdated objects that are already in transfer will have reported 'true' for ready()
|
||||
if (object->isOutdated()) {
|
||||
Backend::incrementTextureGPUTransferCount();
|
||||
if (object->isReady()) {
|
||||
// Do we need to reduce texture memory usage?
|
||||
if (object->isOverMaxMemory() && texturePointer->incremementMinMip()) {
|
||||
// This automatically destroys the old texture
|
||||
object = new GLTexture(*object, texture);
|
||||
_textureTransferHelper->transferTexture(texturePointer);
|
||||
}
|
||||
} else if (object->isOutdated()) {
|
||||
// Object might be outdated, if so, start the transfer
|
||||
// (outdated objects that are already in transfer will have reported 'true' for ready()
|
||||
_textureTransferHelper->transferTexture(texturePointer);
|
||||
}
|
||||
|
||||
if (GLTexture::Transferred == object->getSyncState()) {
|
||||
Backend::decrementTextureGPUTransferCount();
|
||||
object->postTransfer();
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
|
@ -359,8 +514,14 @@ GLuint GLBackend::getTextureID(const TexturePointer& texture, bool sync) {
|
|||
} else {
|
||||
object = Backend::getGPUObject<GLBackend::GLTexture>(*texture);
|
||||
}
|
||||
if (object && object->getSyncState() == GLTexture::Idle) {
|
||||
return object->_texture;
|
||||
if (object) {
|
||||
if (object->getSyncState() == GLTexture::Idle) {
|
||||
return object->_texture;
|
||||
} else if (object->_downsampleSource) {
|
||||
return object->_downsampleSource->_texture;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
@ -425,7 +586,7 @@ void GLBackend::syncSampler(const Sampler& sampler, Texture::Type type, const GL
|
|||
glTexParameteri(object->_target, GL_TEXTURE_WRAP_R, wrapModes[sampler.getWrapModeW()]);
|
||||
|
||||
glTexParameterfv(object->_target, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
|
||||
glTexParameteri(object->_target, GL_TEXTURE_BASE_LEVEL, sampler.getMipOffset());
|
||||
glTexParameteri(object->_target, GL_TEXTURE_BASE_LEVEL, (uint16)sampler.getMipOffset());
|
||||
glTexParameterf(object->_target, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
|
||||
glTexParameterf(object->_target, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
|
||||
glTexParameterf(object->_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
|
||||
|
|
|
@ -11,11 +11,8 @@
|
|||
#include "GLBackendShared.h"
|
||||
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
|
||||
#include <gl/OffscreenGLCanvas.h>
|
||||
#include <gl/QOpenGLContextWrapper.h>
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
using namespace gpu;
|
||||
|
@ -46,12 +43,20 @@ GLTextureTransferHelper::~GLTextureTransferHelper() {
|
|||
|
||||
void GLTextureTransferHelper::transferTexture(const gpu::TexturePointer& texturePointer) {
|
||||
GLBackend::GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(*texturePointer);
|
||||
Backend::incrementTextureGPUTransferCount();
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
TextureTransferPackage package{ texturePointer, 0};
|
||||
GLsync fence { 0 };
|
||||
//fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||
//glFlush();
|
||||
|
||||
TextureTransferPackage package { texturePointer, fence };
|
||||
object->setSyncState(GLBackend::GLTexture::Pending);
|
||||
queueItem(package);
|
||||
#else
|
||||
object->transfer();
|
||||
object->withPreservedTexture([&] {
|
||||
do_transfer(*object);
|
||||
});
|
||||
object->_contentStamp = texturePointer->getDataStamp();
|
||||
object->setSyncState(GLBackend::GLTexture::Transferred);
|
||||
#endif
|
||||
}
|
||||
|
@ -70,6 +75,12 @@ void GLTextureTransferHelper::shutdown() {
|
|||
#endif
|
||||
}
|
||||
|
||||
void GLTextureTransferHelper::do_transfer(GLBackend::GLTexture& texture) {
|
||||
texture.createTexture();
|
||||
texture.transfer();
|
||||
texture.updateSize();
|
||||
Backend::decrementTextureGPUTransferCount();
|
||||
}
|
||||
|
||||
bool GLTextureTransferHelper::processQueueItems(const Queue& messages) {
|
||||
for (auto package : messages) {
|
||||
|
@ -79,14 +90,16 @@ bool GLTextureTransferHelper::processQueueItems(const Queue& messages) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (package.fence) {
|
||||
glClientWaitSync(package.fence, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED);
|
||||
glDeleteSync(package.fence);
|
||||
package.fence = 0;
|
||||
}
|
||||
|
||||
GLBackend::GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(*texturePointer);
|
||||
object->createTexture();
|
||||
|
||||
object->transfer();
|
||||
|
||||
object->updateSize();
|
||||
|
||||
do_transfer(*object);
|
||||
glBindTexture(object->_target, 0);
|
||||
|
||||
auto writeSync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||
glClientWaitSync(writeSync, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED);
|
||||
glDeleteSync(writeSync);
|
||||
|
|
|
@ -32,33 +32,10 @@ protected:
|
|||
void setup() override;
|
||||
void shutdown() override;
|
||||
bool processQueueItems(const Queue& messages) override;
|
||||
void transferTextureSynchronous(const gpu::Texture& texture);
|
||||
void do_transfer(GLBackend::GLTexture& texturePointer);
|
||||
|
||||
private:
|
||||
QSharedPointer<OffscreenGLCanvas> _canvas;
|
||||
};
|
||||
|
||||
template <typename F>
|
||||
void withPreservedTexture(GLenum target, F f) {
|
||||
GLint boundTex = -1;
|
||||
switch (target) {
|
||||
case GL_TEXTURE_2D:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
|
||||
break;
|
||||
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
|
||||
break;
|
||||
|
||||
default:
|
||||
qFatal("Unsupported texture type");
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
f();
|
||||
|
||||
glBindTexture(target, boundTex);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -12,6 +12,9 @@
|
|||
#include "Texture.h"
|
||||
|
||||
#include <glm/gtc/constants.hpp>
|
||||
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
#include "GPULogging.h"
|
||||
#include "Context.h"
|
||||
|
||||
|
@ -21,6 +24,7 @@ static int TexturePointerMetaTypeId = qRegisterMetaType<TexturePointer>();
|
|||
|
||||
std::atomic<uint32_t> Texture::_textureCPUCount{ 0 };
|
||||
std::atomic<Texture::Size> Texture::_textureCPUMemoryUsage{ 0 };
|
||||
std::atomic<Texture::Size> Texture::_allowedCPUMemoryUsage { 0 };
|
||||
|
||||
void Texture::updateTextureCPUMemoryUsage(Size prevObjectSize, Size newObjectSize) {
|
||||
if (prevObjectSize == newObjectSize) {
|
||||
|
@ -57,6 +61,15 @@ uint32_t Texture::getTextureGPUTransferCount() {
|
|||
return Context::getTextureGPUTransferCount();
|
||||
}
|
||||
|
||||
Texture::Size Texture::getAllowedGPUMemoryUsage() {
|
||||
return _allowedCPUMemoryUsage;
|
||||
}
|
||||
|
||||
void Texture::setAllowedGPUMemoryUsage(Size size) {
|
||||
qDebug() << "New MAX texture memory " << BYTES_TO_MB(size) << " MB";
|
||||
_allowedCPUMemoryUsage = size;
|
||||
}
|
||||
|
||||
uint8 Texture::NUM_FACES_PER_TYPE[NUM_TYPES] = { 1, 1, 1, 6 };
|
||||
|
||||
Texture::Pixels::Pixels(const Element& format, Size size, const Byte* bytes) :
|
||||
|
@ -333,10 +346,6 @@ uint16 Texture::evalNumMips() const {
|
|||
return 1 + (uint16) val;
|
||||
}
|
||||
|
||||
uint16 Texture::maxMip() const {
|
||||
return _maxMip;
|
||||
}
|
||||
|
||||
bool Texture::assignStoredMip(uint16 level, const Element& format, Size size, const Byte* bytes) {
|
||||
// Check that level accessed make sense
|
||||
if (level != 0) {
|
||||
|
@ -870,3 +879,18 @@ bool TextureSource::isDefined() const {
|
|||
}
|
||||
}
|
||||
|
||||
bool Texture::setMinMip(uint16 newMinMip) {
|
||||
uint16 oldMinMip = _minMip;
|
||||
_minMip = std::min(std::max(_minMip, newMinMip), _maxMip);
|
||||
return oldMinMip != _minMip;
|
||||
}
|
||||
|
||||
bool Texture::incremementMinMip(uint16 count) {
|
||||
return setMinMip(_minMip + count);
|
||||
}
|
||||
|
||||
Vec3u Texture::evalMipDimensions(uint16 level) const {
|
||||
auto dimensions = getDimensions();
|
||||
dimensions >>= level;
|
||||
return glm::max(dimensions, Vec3u(1));
|
||||
}
|
||||
|
|
|
@ -11,14 +11,15 @@
|
|||
#ifndef hifi_gpu_Texture_h
|
||||
#define hifi_gpu_Texture_h
|
||||
|
||||
#include "Resource.h"
|
||||
|
||||
#include <algorithm> //min max and more
|
||||
#include <bitset>
|
||||
|
||||
#include <QMetaType>
|
||||
#include <QUrl>
|
||||
|
||||
#include "Forward.h"
|
||||
#include "Resource.h"
|
||||
|
||||
namespace gpu {
|
||||
|
||||
// THe spherical harmonics is a nice tool for cubemap, so if required, the irradiance SH can be automatically generated
|
||||
|
@ -141,6 +142,7 @@ protected:
|
|||
class Texture : public Resource {
|
||||
static std::atomic<uint32_t> _textureCPUCount;
|
||||
static std::atomic<Size> _textureCPUMemoryUsage;
|
||||
static std::atomic<Size> _allowedCPUMemoryUsage;
|
||||
static void updateTextureCPUMemoryUsage(Size prevObjectSize, Size newObjectSize);
|
||||
public:
|
||||
static uint32_t getTextureCPUCount();
|
||||
|
@ -149,6 +151,8 @@ public:
|
|||
static Size getTextureGPUMemoryUsage();
|
||||
static Size getTextureGPUVirtualMemoryUsage();
|
||||
static uint32_t getTextureGPUTransferCount();
|
||||
static Size getAllowedGPUMemoryUsage();
|
||||
static void setAllowedGPUMemoryUsage(Size size);
|
||||
|
||||
class Usage {
|
||||
public:
|
||||
|
@ -313,6 +317,7 @@ public:
|
|||
const Element& getTexelFormat() const { return _texelFormat; }
|
||||
bool hasBorder() const { return false; }
|
||||
|
||||
Vec3u getDimensions() const { return Vec3u(_width, _height, _depth); }
|
||||
uint16 getWidth() const { return _width; }
|
||||
uint16 getHeight() const { return _height; }
|
||||
uint16 getDepth() const { return _depth; }
|
||||
|
@ -346,6 +351,8 @@ public:
|
|||
|
||||
// Eval the size that the mips level SHOULD have
|
||||
// not the one stored in the Texture
|
||||
static const uint MIN_DIMENSION = 1;
|
||||
Vec3u evalMipDimensions(uint16 level) const;
|
||||
uint16 evalMipWidth(uint16 level) const { return std::max(_width >> level, 1); }
|
||||
uint16 evalMipHeight(uint16 level) const { return std::max(_height >> level, 1); }
|
||||
uint16 evalMipDepth(uint16 level) const { return std::max(_depth >> level, 1); }
|
||||
|
@ -363,7 +370,7 @@ public:
|
|||
|
||||
uint32 evalTotalSize() const {
|
||||
uint32 size = 0;
|
||||
uint16 minMipLevel = 0;
|
||||
uint16 minMipLevel = minMip();
|
||||
uint16 maxMipLevel = maxMip();
|
||||
for (uint16 l = minMipLevel; l <= maxMipLevel; l++) {
|
||||
size += evalMipSize(l);
|
||||
|
@ -371,10 +378,19 @@ public:
|
|||
return size * getNumSlices();
|
||||
}
|
||||
|
||||
// max mip is in the range [ 1 if no sub mips, log2(max(width, height, depth))]
|
||||
// max mip is in the range [ 0 if no sub mips, log2(max(width, height, depth))]
|
||||
// if autoGenerateMip is on => will provide the maxMIp level specified
|
||||
// else provide the deepest mip level provided through assignMip
|
||||
uint16 maxMip() const;
|
||||
uint16 maxMip() const { return _maxMip; }
|
||||
|
||||
uint16 minMip() const { return _minMip; }
|
||||
|
||||
uint16 mipLevels() const { return _maxMip + 1; }
|
||||
|
||||
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
|
||||
|
||||
bool setMinMip(uint16 newMinMip);
|
||||
bool incremementMinMip(uint16 count = 1);
|
||||
|
||||
// Generate the mips automatically
|
||||
// But the sysmem version is not available
|
||||
|
@ -451,7 +467,8 @@ protected:
|
|||
uint16 _numSamples = 1;
|
||||
uint16 _numSlices = 1;
|
||||
|
||||
uint16 _maxMip = 0;
|
||||
uint16 _maxMip { 0 };
|
||||
uint16 _minMip { 0 };
|
||||
|
||||
Type _type = TEX_1D;
|
||||
|
||||
|
|
|
@ -120,9 +120,12 @@ QSharedPointer<Resource> ResourceCacheSharedItems::getHighestPendingRequest() {
|
|||
}
|
||||
|
||||
ResourceCache::ResourceCache(QObject* parent) : QObject(parent) {
|
||||
auto& domainHandler = DependencyManager::get<NodeList>()->getDomainHandler();
|
||||
connect(&domainHandler, &DomainHandler::disconnectedFromDomain,
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
if (nodeList) {
|
||||
auto& domainHandler = nodeList->getDomainHandler();
|
||||
connect(&domainHandler, &DomainHandler::disconnectedFromDomain,
|
||||
this, &ResourceCache::clearATPAssets, Qt::DirectConnection);
|
||||
}
|
||||
}
|
||||
|
||||
ResourceCache::~ResourceCache() {
|
||||
|
|
|
@ -3,5 +3,9 @@ set(TARGET_NAME shared)
|
|||
# TODO: there isn't really a good reason to have Script linked here - let's get what is requiring it out (RegisteredMetaTypes.cpp)
|
||||
setup_hifi_library(Gui Network Script Widgets)
|
||||
|
||||
if (WIN32)
|
||||
target_link_libraries(${TARGET_NAME} Wbemuuid.lib)
|
||||
endif()
|
||||
|
||||
target_zlib()
|
||||
target_nsight()
|
||||
|
|
153
libraries/shared/src/GPUIdent.cpp
Normal file
153
libraries/shared/src/GPUIdent.cpp
Normal file
|
@ -0,0 +1,153 @@
|
|||
//
|
||||
// GPUIdent.cpp
|
||||
// libraries/shared/src
|
||||
//
|
||||
// Created by Howard Stearns on 4/16/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
|
||||
#include <QtCore/QtGlobal>
|
||||
|
||||
#ifdef Q_OS_WIN
|
||||
#include <atlbase.h>
|
||||
#include <Wbemidl.h>
|
||||
|
||||
#elif defined(Q_OS_MAC)
|
||||
#include <OpenGL/OpenGL.h>
|
||||
#endif
|
||||
|
||||
#include "SharedLogging.h"
|
||||
#include "GPUIdent.h"
|
||||
|
||||
GPUIdent GPUIdent::_instance {};
|
||||
|
||||
GPUIdent* GPUIdent::ensureQuery(const QString& vendor, const QString& renderer) {
|
||||
// Expects vendor and render to be supplied on first call. Results are cached and the arguments can then be dropped.
|
||||
// Too bad OpenGL doesn't seem to have a way to get the specific device id.
|
||||
if (_isQueried) {
|
||||
return this;
|
||||
}
|
||||
_isQueried = true; // Don't try again, even if not _isValid;
|
||||
#if (defined Q_OS_MAC)
|
||||
GLuint cglDisplayMask = -1; // Iterate over all of them.
|
||||
CGLRendererInfoObj rendererInfo;
|
||||
GLint rendererInfoCount;
|
||||
CGLError err = CGLQueryRendererInfo(cglDisplayMask, &rendererInfo, &rendererInfoCount);
|
||||
GLint j, numRenderers = 0, deviceVRAM, bestVRAM = 0;
|
||||
err = CGLQueryRendererInfo(cglDisplayMask, &rendererInfo, &numRenderers);
|
||||
if (0 == err) {
|
||||
// Iterate over all of them and use the figure for the one with the most VRAM,
|
||||
// on the assumption that this is the one that will actually be used.
|
||||
CGLDescribeRenderer(rendererInfo, 0, kCGLRPRendererCount, &numRenderers);
|
||||
for (j = 0; j < numRenderers; j++) {
|
||||
CGLDescribeRenderer(rendererInfo, j, kCGLRPVideoMemoryMegabytes, &deviceVRAM);
|
||||
if (deviceVRAM > bestVRAM) {
|
||||
bestVRAM = deviceVRAM;
|
||||
}
|
||||
}
|
||||
}
|
||||
_dedicatedMemoryMB = bestVRAM;
|
||||
CGLDestroyRendererInfo(rendererInfo);
|
||||
|
||||
#elif defined(Q_OS_WIN)
|
||||
// COM must be initialized already using CoInitialize. E.g., by the audio subsystem.
|
||||
CComPtr<IWbemLocator> spLoc = NULL;
|
||||
HRESULT hr = CoCreateInstance(CLSID_WbemLocator, 0, CLSCTX_SERVER, IID_IWbemLocator, (LPVOID *)&spLoc);
|
||||
if (hr != S_OK || spLoc == NULL) {
|
||||
qCDebug(shared) << "Unable to connect to WMI";
|
||||
return this;
|
||||
}
|
||||
|
||||
CComBSTR bstrNamespace(_T("\\\\.\\root\\CIMV2"));
|
||||
CComPtr<IWbemServices> spServices;
|
||||
|
||||
// Connect to CIM
|
||||
hr = spLoc->ConnectServer(bstrNamespace, NULL, NULL, 0, NULL, 0, 0, &spServices);
|
||||
if (hr != WBEM_S_NO_ERROR) {
|
||||
qCDebug(shared) << "Unable to connect to CIM";
|
||||
return this;
|
||||
}
|
||||
|
||||
// Switch the security level to IMPERSONATE so that provider will grant access to system-level objects.
|
||||
hr = CoSetProxyBlanket(spServices, RPC_C_AUTHN_WINNT, RPC_C_AUTHZ_NONE, NULL, RPC_C_AUTHN_LEVEL_CALL, RPC_C_IMP_LEVEL_IMPERSONATE, NULL, EOAC_DEFAULT);
|
||||
if (hr != S_OK) {
|
||||
qCDebug(shared) << "Unable to authorize access to system objects.";
|
||||
return this;
|
||||
}
|
||||
|
||||
// Get the vid controller
|
||||
CComPtr<IEnumWbemClassObject> spEnumInst = NULL;
|
||||
hr = spServices->CreateInstanceEnum(CComBSTR("Win32_VideoController"), WBEM_FLAG_SHALLOW, NULL, &spEnumInst);
|
||||
if (hr != WBEM_S_NO_ERROR || spEnumInst == NULL) {
|
||||
qCDebug(shared) << "Unable to reach video controller.";
|
||||
return this;
|
||||
}
|
||||
|
||||
// I'd love to find a better way to learn which graphics adapter is the one we're using.
|
||||
// Alas, no combination of vendor, renderer, and adapter name seems to be a substring of the others.
|
||||
// Here we get a list of words that we'll match adapter names against. Most matches wins.
|
||||
// Alas, this won't work when someone has multiple variants of the same card installed.
|
||||
QRegExp wordMatcher{ "\\W" };
|
||||
QStringList words;
|
||||
words << vendor.toUpper().split(wordMatcher) << renderer.toUpper().split(wordMatcher);
|
||||
words.removeAll("");
|
||||
words.removeDuplicates();
|
||||
int bestCount = 0;
|
||||
|
||||
ULONG uNumOfInstances = 0;
|
||||
CComPtr<IWbemClassObject> spInstance = NULL;
|
||||
hr = spEnumInst->Next(WBEM_INFINITE, 1, &spInstance, &uNumOfInstances);
|
||||
while (hr == S_OK && spInstance && uNumOfInstances) {
|
||||
// Get properties from the object
|
||||
CComVariant var;
|
||||
|
||||
hr = spInstance->Get(CComBSTR(_T("Name")), 0, &var, 0, 0);
|
||||
if (hr != S_OK) {
|
||||
qCDebug(shared) << "Unable to get video name";
|
||||
continue;
|
||||
}
|
||||
char sString[256];
|
||||
WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, sString, sizeof(sString), NULL, NULL);
|
||||
QStringList adapterWords = QString(sString).toUpper().split(wordMatcher);
|
||||
adapterWords.removeAll("");
|
||||
adapterWords.removeDuplicates();
|
||||
int count = 0;
|
||||
for (const QString& adapterWord : adapterWords) {
|
||||
if (words.contains(adapterWord)) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
if (count > bestCount) {
|
||||
bestCount = count;
|
||||
_name = sString;
|
||||
|
||||
hr = spInstance->Get(CComBSTR(_T("DriverVersion")), 0, &var, 0, 0);
|
||||
if (hr == S_OK) {
|
||||
char sString[256];
|
||||
WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, sString, sizeof(sString), NULL, NULL);
|
||||
_driver = sString;
|
||||
}
|
||||
else {
|
||||
qCDebug(shared) << "Unable to get video driver";
|
||||
}
|
||||
|
||||
hr = spInstance->Get(CComBSTR(_T("AdapterRAM")), 0, &var, 0, 0);
|
||||
if (hr == S_OK) {
|
||||
var.ChangeType(CIM_UINT64); // We're going to receive some integral type, but it might not be uint.
|
||||
// We might be hosed here. The parameter is documented to be UINT32, but that's only 4 GB!
|
||||
const ULONGLONG BYTES_PER_MEGABYTE = 1024 * 1024;
|
||||
_dedicatedMemoryMB = (uint) (var.ullVal / BYTES_PER_MEGABYTE);
|
||||
}
|
||||
else {
|
||||
qCDebug(shared) << "Unable to get video AdapterRAM";
|
||||
}
|
||||
|
||||
_isValid = true;
|
||||
}
|
||||
hr = spEnumInst->Next(WBEM_INFINITE, 1, &spInstance, &uNumOfInstances);
|
||||
}
|
||||
#endif
|
||||
return this;
|
||||
}
|
36
libraries/shared/src/GPUIdent.h
Normal file
36
libraries/shared/src/GPUIdent.h
Normal file
|
@ -0,0 +1,36 @@
|
|||
//
|
||||
// GPUIdent.h
|
||||
// libraries/shared/src
|
||||
//
|
||||
// Provides information about the GPU
|
||||
//
|
||||
// Created by Howard Stearns on 4/16/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_GPUIdent_h
|
||||
#define hifi_GPUIdent_h
|
||||
|
||||
class GPUIdent
|
||||
{
|
||||
public:
|
||||
unsigned int getMemory() { return _dedicatedMemoryMB; }
|
||||
QString getName() { return _name; }
|
||||
QString getDriver() { return _driver; }
|
||||
bool isValid() { return _isValid; }
|
||||
// E.g., GPUIdent::getInstance()->getMemory();
|
||||
static GPUIdent* getInstance(const QString& vendor = "", const QString& renderer = "") { return _instance.ensureQuery(vendor, renderer); }
|
||||
private:
|
||||
uint _dedicatedMemoryMB { 0 };
|
||||
QString _name { "" };
|
||||
QString _driver { "" };
|
||||
bool _isQueried { false };
|
||||
bool _isValid { false };
|
||||
static GPUIdent _instance;
|
||||
GPUIdent* ensureQuery(const QString& vendor, const QString& renderer);
|
||||
};
|
||||
|
||||
#endif // hifi_GPUIdent_h
|
|
@ -44,4 +44,13 @@ const int BYTES_PER_KILOBYTE = 1000;
|
|||
const int BYTES_PER_KILOBIT = BYTES_PER_KILOBYTE / BITS_IN_BYTE;
|
||||
const int KILO_PER_MEGA = 1000;
|
||||
|
||||
#define KB_TO_BYTES_SHIFT 10
|
||||
#define MB_TO_BYTES_SHIFT 20
|
||||
|
||||
#define MB_TO_BYTES(X) ((size_t)(X) << MB_TO_BYTES_SHIFT)
|
||||
#define KB_TO_BYTES(X) ((size_t)(X) << KB_TO_BYTES_SHIFT)
|
||||
|
||||
#define BYTES_TO_MB(X) (X >> MB_TO_BYTES_SHIFT)
|
||||
#define BYTES_TO_KB(X) (X >> KB_TO_BYTES_SHIFT)
|
||||
|
||||
#endif // hifi_NumericalConstants_h
|
||||
|
|
33
libraries/shared/src/shared/OnceEvery.h
Normal file
33
libraries/shared/src/shared/OnceEvery.h
Normal file
|
@ -0,0 +1,33 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/04/19
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <stdint.h>
|
||||
#include <functional>
|
||||
|
||||
#include "../SharedUtil.h"
|
||||
#include "../NumericalConstants.h"
|
||||
|
||||
template <size_t MS = 1000>
|
||||
class OnceEvery {
|
||||
public:
|
||||
OnceEvery(std::function<void()> f) : _f(f) { }
|
||||
|
||||
bool maybeExecute() {
|
||||
uint64_t now = usecTimestampNow();
|
||||
if ((now - _lastRun) > (MS * USECS_PER_MSEC)) {
|
||||
_f();
|
||||
_lastRun = now;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t _lastRun { 0 };
|
||||
std::function<void()> _f;
|
||||
};
|
|
@ -445,7 +445,7 @@ public:
|
|||
positionView = gpu::BufferView(vertexBuffer, 0, vertexBuffer->getSize(), SHAPE_VERTEX_STRIDE, POSITION_ELEMENT);
|
||||
textureView = gpu::BufferView(vertexBuffer, SHAPE_TEXTURES_OFFSET, vertexBuffer->getSize(), SHAPE_VERTEX_STRIDE, TEXTURE_ELEMENT);
|
||||
texture = DependencyManager::get<TextureCache>()->getImageTexture("C:/Users/bdavis/Git/openvr/samples/bin/cube_texture.png");
|
||||
//texture = DependencyManager::get<TextureCache>()->getImageTexture("H:/test.png");
|
||||
// texture = DependencyManager::get<TextureCache>()->getImageTexture("H:/test.png");
|
||||
//texture = DependencyManager::get<TextureCache>()->getImageTexture("H:/crate_blue.fbm/lambert8SG_Normal_OpenGL.png");
|
||||
|
||||
auto shader = makeShader(VERTEX_SHADER, FRAGMENT_SHADER, gpu::Shader::BindingSet {});
|
||||
|
@ -456,6 +456,14 @@ public:
|
|||
vertexFormat->setAttribute(gpu::Stream::POSITION);
|
||||
vertexFormat->setAttribute(gpu::Stream::TEXCOORD);
|
||||
});
|
||||
|
||||
static auto start = usecTimestampNow();
|
||||
auto now = usecTimestampNow();
|
||||
if ((now - start) > USECS_PER_SECOND * 1) {
|
||||
start = now;
|
||||
texture->incremementMinMip();
|
||||
}
|
||||
|
||||
batch.setPipeline(pipeline);
|
||||
batch.setInputBuffer(gpu::Stream::POSITION, positionView);
|
||||
batch.setInputBuffer(gpu::Stream::TEXCOORD, textureView);
|
||||
|
@ -493,7 +501,7 @@ public:
|
|||
|
||||
//drawFloorGrid(batch);
|
||||
//drawSimpleShapes(batch);
|
||||
drawCenterShape(batch);
|
||||
//drawCenterShape(batch);
|
||||
drawTerrain(batch);
|
||||
|
||||
_context->render(batch);
|
||||
|
|
Loading…
Reference in a new issue