Merge pull request #5473 from samcake/punk

GLCORE : Merged upstream master into core
This commit is contained in:
Brad Davis 2015-07-31 16:21:04 -07:00
commit d9d0f8dcfd
54 changed files with 519 additions and 318 deletions

View file

@ -70,7 +70,7 @@ void Agent::handleOctreePacket(QSharedPointer<NLPacket> packet, SharedNodePointe
// pull out the piggybacked packet and create a new QSharedPointer<NLPacket> for it
int piggyBackedSizeWithHeader = packet->getPayloadSize() - statsMessageLength;
std::unique_ptr<char> buffer = std::unique_ptr<char>(new char[piggyBackedSizeWithHeader]);
auto buffer = std::unique_ptr<char[]>(new char[piggyBackedSizeWithHeader]);
memcpy(buffer.get(), packet->getPayload() + statsMessageLength, piggyBackedSizeWithHeader);
auto newPacket = NLPacket::fromReceivedPacket(std::move(buffer), piggyBackedSizeWithHeader, packet->getSenderSockAddr());
@ -107,6 +107,7 @@ void Agent::handleAudioPacket(QSharedPointer<NLPacket> packet) {
}
const QString AGENT_LOGGING_NAME = "agent";
const int PING_INTERVAL = 1000;
void Agent::run() {
ThreadedAssignment::commonInit(AGENT_LOGGING_NAME, NodeType::Agent);
@ -118,6 +119,10 @@ void Agent::run() {
<< NodeType::EntityServer
);
_pingTimer = new QTimer(this);
connect(_pingTimer, SIGNAL(timeout()), SLOT(sendPingRequests()));
_pingTimer->start(PING_INTERVAL);
// figure out the URL for the script for this agent assignment
QUrl scriptURL;
if (_payload.isEmpty()) {
@ -193,7 +198,27 @@ void Agent::run() {
void Agent::aboutToFinish() {
_scriptEngine.stop();
_pingTimer->stop();
delete _pingTimer;
// our entity tree is going to go away so tell that to the EntityScriptingInterface
DependencyManager::get<EntityScriptingInterface>()->setEntityTree(NULL);
}
void Agent::sendPingRequests() {
auto nodeList = DependencyManager::get<NodeList>();
nodeList->eachMatchingNode([](const SharedNodePointer& node)->bool {
switch (node->getType()) {
case NodeType::AvatarMixer:
case NodeType::AudioMixer:
case NodeType::EntityServer:
return true;
default:
return false;
}
}, [nodeList](const SharedNodePointer& node) {
nodeList->sendPacket(nodeList->constructPingPacket(), *node);
});
}

View file

@ -58,11 +58,13 @@ private slots:
void handleAudioPacket(QSharedPointer<NLPacket> packet);
void handleOctreePacket(QSharedPointer<NLPacket> packet, SharedNodePointer senderNode);
void handleJurisdictionPacket(QSharedPointer<NLPacket> packet, SharedNodePointer senderNode);
void sendPingRequests();
private:
ScriptEngine _scriptEngine;
EntityEditPacketSender _entityEditSender;
EntityTreeHeadlessViewer _entityViewer;
QTimer* _pingTimer;
MixedAudioStream _receivedAudioStream;
float _lastReceivedAudioLoudness;

View file

@ -927,9 +927,9 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
const QString USE_STDEV_FOR_DESIRED_CALC_JSON_KEY = "use_stdev_for_desired_calc";
_streamSettings._useStDevForJitterCalc = audioBufferGroupObject[USE_STDEV_FOR_DESIRED_CALC_JSON_KEY].toBool();
if (_streamSettings._useStDevForJitterCalc) {
qDebug() << "Using Philip's stdev method for jitter calc if dynamic jitter buffers enabled";
qDebug() << "Using stdev method for jitter calc if dynamic jitter buffers enabled";
} else {
qDebug() << "Using Fred's max-gap method for jitter calc if dynamic jitter buffers enabled";
qDebug() << "Using max-gap method for jitter calc if dynamic jitter buffers enabled";
}
const QString WINDOW_STARVE_THRESHOLD_JSON_KEY = "window_starve_threshold";

View file

@ -38,5 +38,19 @@ if (WIN32)
find_package_handle_standard_args(GLEW DEFAULT_MSG GLEW_INCLUDE_DIRS GLEW_LIBRARIES GLEW_DLL_PATH)
add_paths_to_fixup_libs(${GLEW_DLL_PATH})
elseif (APPLE)
else ()
find_path(GLEW_INCLUDE_DIR GL/glew.h)
find_library(GLEW_LIBRARY NAMES GLEW glew32 glew glew32s PATH_SUFFIXES lib64)
set(GLEW_INCLUDE_DIRS ${GLEW_INCLUDE_DIR})
set(GLEW_LIBRARIES ${GLEW_LIBRARY})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GLEW
REQUIRED_VARS GLEW_INCLUDE_DIR GLEW_LIBRARY)
mark_as_advanced(GLEW_INCLUDE_DIR GLEW_LIBRARY)
endif ()

View file

@ -50,7 +50,7 @@ void IceServer::processDatagrams() {
while (_serverSocket.hasPendingDatagrams()) {
// setup a buffer to read the packet into
int packetSizeWithHeader = _serverSocket.pendingDatagramSize();
std::unique_ptr<char> buffer = std::unique_ptr<char>(new char[packetSizeWithHeader]);
auto buffer = std::unique_ptr<char[]>(new char[packetSizeWithHeader]);
_serverSocket.readDatagram(buffer.get(), packetSizeWithHeader,
sendingSockAddr.getAddressPointer(), sendingSockAddr.getPortPointer());

View file

@ -218,33 +218,13 @@ else (APPLE)
"${PROJECT_SOURCE_DIR}/resources"
$<TARGET_FILE_DIR:${TARGET_NAME}>/resources
)
find_package(OpenGL REQUIRED)
if (${OPENGL_INCLUDE_DIR})
include_directories(SYSTEM "${OPENGL_INCLUDE_DIR}")
endif ()
target_link_libraries(${TARGET_NAME} "${OPENGL_LIBRARY}")
# link target to external libraries
if (WIN32)
add_dependency_external_projects(glew)
find_package(GLEW REQUIRED)
target_include_directories(${TARGET_NAME} PRIVATE ${GLEW_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARIES} wsock32.lib opengl32.lib Winmm.lib)
if (USE_NSIGHT)
# try to find the Nsight package and add it to the build if we find it
find_package(NSIGHT)
if (NSIGHT_FOUND)
include_directories(${NSIGHT_INCLUDE_DIRS})
add_definitions(-DNSIGHT_FOUND)
target_link_libraries(${TARGET_NAME} "${NSIGHT_LIBRARIES}")
endif ()
endif()
# target_link_libraries(${TARGET_NAME} wsock32.lib Winmm.lib)
target_link_libraries(${TARGET_NAME} wsock32.lib Winmm.lib)
else (WIN32)
# Nothing else required on linux apparently
endif()
endif (APPLE)

View file

@ -772,8 +772,9 @@ void Application::initializeGL() {
}
#endif
// Where the gpuContext is created and where the TRUE Backend is created and assigned
_gpuContext = std::make_shared<gpu::Context>(new gpu::GLBackend());
// Where the gpuContext is initialized and where the TRUE Backend is created and assigned
gpu::Context::init<gpu::GLBackend>();
_gpuContext = std::make_shared<gpu::Context>();
initDisplay();
qCDebug(interfaceapp, "Initialized Display.");

View file

@ -13,7 +13,6 @@
#define hifi_GLCanvas_h
#include <QDebug>
#include <gpu/GPUConfig.h>
#include <QGLWidget>
#include <QTimer>

View file

@ -106,7 +106,7 @@ bool ModelPackager::loadModel() {
}
qCDebug(interfaceapp) << "Reading FBX file : " << _fbxInfo.filePath();
QByteArray fbxContents = fbx.readAll();
_geometry = readFBX(fbxContents, QVariantHash());
_geometry = readFBX(fbxContents, QVariantHash(), _fbxInfo.filePath());
// make sure we have some basic mappings
populateBasicMapping(_mapping, _fbxInfo.filePath(), _geometry);

View file

@ -14,8 +14,6 @@
#include <mutex>
#include <QElapsedTimer>
#include <gpu/GPUConfig.h>
#include <gpu/Batch.h>
#include <gpu/Context.h>
#include <NumericalConstants.h>
#include <DependencyManager.h>
@ -208,8 +206,6 @@ void Stars::render(RenderArgs* renderArgs, float alpha) {
batch._glUniform1f(_timeSlot, secs);
geometryCache->renderUnitCube(batch);
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
static const size_t VERTEX_STRIDE = sizeof(StarVertex);
size_t offset = offsetof(StarVertex, position);
gpu::BufferView posView(vertexBuffer, offset, vertexBuffer->getSize(), VERTEX_STRIDE, positionElement);
@ -218,10 +214,11 @@ void Stars::render(RenderArgs* renderArgs, float alpha) {
// Render the stars
batch.setPipeline(_starsPipeline);
batch.setInputFormat(streamFormat);
batch.setInputBuffer(VERTICES_SLOT, posView);
batch.setInputBuffer(COLOR_SLOT, colView);
batch.draw(gpu::Primitive::POINTS, STARFIELD_NUM_STARS);
renderArgs->_context->render(batch);
}

View file

@ -11,16 +11,16 @@
//
#include "OculusManager.h"
#include <gpu/GPUConfig.h>
#include <glm/glm.hpp>
#include <QDesktopWidget>
#include <QGuiApplication>
#include <gpu/GPUConfig.h>
#include <QScreen>
#include <CursorManager.h>
#include <QOpenGLTimerQuery>
#include <QGLWidget>
#include <CursorManager.h>
#include <glm/glm.hpp>
#include <avatar/AvatarManager.h>
#include <avatar/MyAvatar.h>

View file

@ -9,13 +9,14 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "TV3DManager.h"
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include "gpu/GLBackend.h"
#include "Application.h"
#include <RenderArgs.h>
#include "TV3DManager.h"
#include "Application.h"
#include "Menu.h"
int TV3DManager::_screenWidth = 1;
@ -63,6 +64,7 @@ void TV3DManager::setFrustum(Camera& whichCamera) {
}
void TV3DManager::configureCamera(Camera& whichCamera, int screenWidth, int screenHeight) {
#ifdef THIS_CURRENTLY_BROKEN_WAITING_FOR_DISPLAY_PLUGINS
if (screenHeight == 0) {
screenHeight = 1; // prevent divide by 0
}
@ -72,6 +74,7 @@ void TV3DManager::configureCamera(Camera& whichCamera, int screenWidth, int scre
setFrustum(whichCamera);
glViewport (0, 0, _screenWidth, _screenHeight); // sets drawing viewport
#endif
}
void TV3DManager::display(RenderArgs* renderArgs, Camera& whichCamera) {

View file

@ -17,6 +17,7 @@
#include <glm/glm.hpp>
class Camera;
class RenderArgs;
struct eyeFrustum {
double left;

View file

@ -57,7 +57,7 @@ void OctreePacketProcessor::processPacket(QSharedPointer<NLPacket> packet, Share
if (piggybackBytes) {
// construct a new packet from the piggybacked one
std::unique_ptr<char> buffer = std::unique_ptr<char>(new char[piggybackBytes]);
auto buffer = std::unique_ptr<char[]>(new char[piggybackBytes]);
memcpy(buffer.get(), packet->getPayload() + statsMessageLength, piggybackBytes);
auto newPacket = NLPacket::fromReceivedPacket(std::move(buffer), piggybackBytes, packet->getSenderSockAddr());

View file

@ -62,7 +62,7 @@ void AnimationReader::run() {
QSharedPointer<Resource> animation = _animation.toStrongRef();
if (!animation.isNull()) {
QMetaObject::invokeMethod(animation.data(), "setGeometry",
Q_ARG(const FBXGeometry&, readFBX(_reply->readAll(), QVariantHash())));
Q_ARG(const FBXGeometry&, readFBX(_reply->readAll(), QVariantHash(), _reply->property("url").toString())));
}
_reply->deleteLater();
}

View file

@ -35,7 +35,7 @@
#include <PolyVoxCore/Material.h>
#include "model/Geometry.h"
#include "gpu/GLBackend.h"
#include "gpu/Context.h"
#include "EntityTreeRenderer.h"
#include "RenderablePolyVoxEntityItem.h"

View file

@ -8,7 +8,6 @@
#include "RenderableWebEntityItem.h"
#include <gpu/GPUConfig.h>
#include <QMouseEvent>
#include <QQuickItem>
#include <QQuickWindow>
@ -24,7 +23,7 @@
#include <GLMHelpers.h>
#include <PathUtils.h>
#include <TextureCache.h>
#include <gpu/GLBackend.h>
#include <gpu/Context.h>
#include "EntityTreeRenderer.h"

View file

@ -114,7 +114,8 @@ _glowLevelChanged(false),
_localRenderAlphaChanged(false),
_defaultSettings(true),
_naturalDimensions(1.0f, 1.0f, 1.0f)
_naturalDimensions(1.0f, 1.0f, 1.0f),
_naturalPosition(0.0f, 0.0f, 0.0f)
{
}
@ -128,6 +129,11 @@ void EntityItemProperties::setSittingPoints(const QVector<SittingPoint>& sitting
}
}
void EntityItemProperties::calculateNaturalPosition(const glm::vec3& min, const glm::vec3& max) {
glm::vec3 halfDimension = (max - min) / 2.0f;
_naturalPosition = max - halfDimension;
}
bool EntityItemProperties::animationSettingsChanged() const {
return _animationSettingsChanged;
}
@ -378,6 +384,7 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
COPY_PROPERTY_TO_QSCRIPTVALUE(dimensions);
if (!skipDefaults) {
COPY_PROPERTY_TO_QSCRIPTVALUE(naturalDimensions); // gettable, but not settable
COPY_PROPERTY_TO_QSCRIPTVALUE(naturalPosition);
}
COPY_PROPERTY_TO_QSCRIPTVALUE(rotation);
COPY_PROPERTY_TO_QSCRIPTVALUE(velocity);

View file

@ -192,7 +192,10 @@ public:
const glm::vec3& getNaturalDimensions() const { return _naturalDimensions; }
void setNaturalDimensions(const glm::vec3& value) { _naturalDimensions = value; }
const glm::vec3& getNaturalPosition() const { return _naturalPosition; }
void calculateNaturalPosition(const glm::vec3& min, const glm::vec3& max);
const QStringList& getTextureNames() const { return _textureNames; }
void setTextureNames(const QStringList& value) { _textureNames = value; }
@ -232,6 +235,7 @@ private:
QVector<SittingPoint> _sittingPoints;
QStringList _textureNames;
glm::vec3 _naturalDimensions;
glm::vec3 _naturalPosition;
};
Q_DECLARE_METATYPE(EntityItemProperties);

View file

@ -118,6 +118,7 @@ EntityItemProperties EntityScriptingInterface::getEntityProperties(QUuid identit
results.setSittingPoints(geometry->sittingPoints);
Extents meshExtents = geometry->getUnscaledMeshExtents();
results.setNaturalDimensions(meshExtents.maximum - meshExtents.minimum);
results.calculateNaturalPosition(meshExtents.minimum, meshExtents.maximum);
}
}

View file

@ -155,9 +155,9 @@ void ParticleEffectEntityItem::computeAndUpdateDimensions() {
float yMin = std::min(yApex, yEnd);
// times 2 because dimensions are diameters not radii.
glm::vec3 dims(2.0f * std::max(fabs(xMin), fabs(xMax)),
2.0f * std::max(fabs(yMin), fabs(yMax)),
2.0f * std::max(fabs(zMin), fabs(zMax)));
glm::vec3 dims(2.0f * std::max(fabsf(xMin), fabsf(xMax)),
2.0f * std::max(fabsf(yMin), fabsf(yMax)),
2.0f * std::max(fabsf(zMin), fabsf(zMax)));
EntityItem::setDimensions(dims);
}

View file

@ -17,6 +17,7 @@
#include <QTextStream>
#include <QtDebug>
#include <QtEndian>
#include <QFileInfo>
#include <glm/gtc/quaternion.hpp>
#include <glm/gtx/quaternion.hpp>
@ -1454,9 +1455,21 @@ void buildModelMesh(ExtractedMesh& extracted) {
}
#endif // USE_MODEL_MESH
QByteArray fileOnUrl(const QByteArray& filenameString, const QString& url) {
QString path = QFileInfo(url).path();
QByteArray filename = filenameString;
QFileInfo checkFile(path + "/" + filename.replace('\\', '/'));
//check if the file exists at the RelativeFileName
if (checkFile.exists() && checkFile.isFile()) {
filename = filename.replace('\\', '/');
} else {
// there is no texture at the fbx dir with the filename added. Assume it is in the fbx dir.
filename = filename.mid(qMax(filename.lastIndexOf('\\'), filename.lastIndexOf('/')) + 1);
}
return filename;
}
FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping, bool loadLightmaps, float lightmapLevel) {
FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping, const QString& url, bool loadLightmaps, float lightmapLevel) {
QHash<QString, ExtractedMesh> meshes;
QHash<QString, QString> modelIDsToNames;
QHash<QString, int> meshIDsToMeshIndices;
@ -1781,9 +1794,8 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping,
TextureParam tex;
foreach (const FBXNode& subobject, object.children) {
if (subobject.name == "RelativeFilename") {
// trim off any path information
QByteArray filename = subobject.properties.at(0).toByteArray();
filename = filename.mid(qMax(filename.lastIndexOf('\\'), filename.lastIndexOf('/')) + 1);
filename = fileOnUrl(filename, url);
textureFilenames.insert(getID(object.properties), filename);
} else if (subobject.name == "TextureName") {
// trim the name from the timestamp
@ -1857,7 +1869,7 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping,
foreach (const FBXNode& subobject, object.children) {
if (subobject.name == "RelativeFilename") {
filename = subobject.properties.at(0).toByteArray();
filename = filename.mid(qMax(filename.lastIndexOf('\\'), filename.lastIndexOf('/')) + 1);
filename = fileOnUrl(filename, url);
} else if (subobject.name == "Content" && !subobject.properties.isEmpty()) {
content = subobject.properties.at(0).toByteArray();
@ -2717,12 +2729,12 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping,
return geometry;
}
FBXGeometry readFBX(const QByteArray& model, const QVariantHash& mapping, bool loadLightmaps, float lightmapLevel) {
FBXGeometry readFBX(const QByteArray& model, const QVariantHash& mapping, const QString& url, bool loadLightmaps, float lightmapLevel) {
QBuffer buffer(const_cast<QByteArray*>(&model));
buffer.open(QIODevice::ReadOnly);
return readFBX(&buffer, mapping, loadLightmaps, lightmapLevel);
return readFBX(&buffer, mapping, url, loadLightmaps, lightmapLevel);
}
FBXGeometry readFBX(QIODevice* device, const QVariantHash& mapping, bool loadLightmaps, float lightmapLevel) {
return extractFBXGeometry(parseFBX(device), mapping, loadLightmaps, lightmapLevel);
FBXGeometry readFBX(QIODevice* device, const QVariantHash& mapping, const QString& url, bool loadLightmaps, float lightmapLevel) {
return extractFBXGeometry(parseFBX(device), mapping, url, loadLightmaps, lightmapLevel);
}

View file

@ -271,10 +271,10 @@ Q_DECLARE_METATYPE(FBXGeometry)
/// Reads FBX geometry from the supplied model and mapping data.
/// \exception QString if an error occurs in parsing
FBXGeometry readFBX(const QByteArray& model, const QVariantHash& mapping, bool loadLightmaps = true, float lightmapLevel = 1.0f);
FBXGeometry readFBX(const QByteArray& model, const QVariantHash& mapping, const QString& url = "", bool loadLightmaps = true, float lightmapLevel = 1.0f);
/// Reads FBX geometry from the supplied model and mapping data.
/// \exception QString if an error occurs in parsing
FBXGeometry readFBX(QIODevice* device, const QVariantHash& mapping, bool loadLightmaps = true, float lightmapLevel = 1.0f);
FBXGeometry readFBX(QIODevice* device, const QVariantHash& mapping, const QString& url = "", bool loadLightmaps = true, float lightmapLevel = 1.0f);
#endif // hifi_FBXReader_h

View file

@ -21,23 +21,26 @@ elseif (WIN32)
if (USE_NSIGHT)
# try to find the Nsight package and add it to the build if we find it
# note that this will also enable NSIGHT profilers in all the projects linking gpu
find_package(NSIGHT)
if (NSIGHT_FOUND)
include_directories(${NSIGHT_INCLUDE_DIRS})
add_definitions(-DNSIGHT_FOUND)
target_include_directories(${TARGET_NAME} PUBLIC ${NSIGHT_INCLUDE_DIRS})
target_compile_definitions(${TARGET_NAME} PUBLIC NSIGHT_FOUND)
target_link_libraries(${TARGET_NAME} "${NSIGHT_LIBRARIES}")
endif ()
endif()
elseif (ANDROID)
target_link_libraries(${TARGET_NAME} "-lGLESv3" "-lEGL")
else ()
find_package(GLEW REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
find_package(OpenGL REQUIRED)
if (${OPENGL_INCLUDE_DIR})
include_directories(SYSTEM "${OPENGL_INCLUDE_DIR}")
endif ()
target_link_libraries(${TARGET_NAME} "${OPENGL_LIBRARY}")
target_include_directories(${TARGET_NAME} PUBLIC ${OPENGL_INCLUDE_DIR})
target_link_libraries(${TARGET_NAME} "${GLEW_LIBRARIES}" "${OPENGL_LIBRARY}")
endif (APPLE)

View file

@ -8,13 +8,9 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <string.h>
#include "Batch.h"
#include "GPUConfig.h"
#include <QDebug>
#include <GLMHelpers.h>
#if defined(NSIGHT_FOUND)
#include "nvToolsExt.h"
@ -292,15 +288,3 @@ void Batch::resetStages() {
ADD_COMMAND(resetStages);
}
void push_back(Batch::Params& params, const vec3& v) {
params.push_back(v.x);
params.push_back(v.y);
params.push_back(v.z);
}
void push_back(Batch::Params& params, const vec4& v) {
params.push_back(v.x);
params.push_back(v.y);
params.push_back(v.z);
params.push_back(v.a);
}

View file

@ -11,8 +11,16 @@
<@if not GPU_CONFIG_SLH@>
<@def GPU_CONFIG_SLH@>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def GPU_TRANSFORM_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 410 core@>
<@if GLPROFILE == PC_GL @>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def GPU_TRANSFORM_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 410 core@>
<@elif GLPROFILE == MAC_GL @>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def GPU_TRANSFORM_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 410 core@>
<@else@>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def GPU_TRANSFORM_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 410 core@>
<@endif@>

View file

@ -10,13 +10,16 @@
//
#include "Context.h"
// this include should disappear! as soon as the gpu::Context is in place
#include "GLBackend.h"
using namespace gpu;
Context::Context(Backend* backend) :
_backend(backend) {
Context::CreateBackend Context::_createBackendCallback = nullptr;
Context::MakeProgram Context::_makeProgramCallback = nullptr;
std::once_flag Context::_initialized;
Context::Context() {
if (_createBackendCallback) {
_backend.reset(_createBackendCallback());
}
}
Context::Context(const Context& context) {
@ -26,8 +29,8 @@ Context::~Context() {
}
bool Context::makeProgram(Shader& shader, const Shader::BindingSet& bindings) {
if (shader.isProgram()) {
return GLBackend::makeProgram(shader, bindings);
if (shader.isProgram() && _makeProgramCallback) {
return _makeProgramCallback(shader, bindings);
}
return false;
}

View file

@ -12,6 +12,7 @@
#define hifi_gpu_Context_h
#include <assert.h>
#include <mutex>
#include "Batch.h"
@ -26,13 +27,12 @@ namespace gpu {
class Backend {
public:
virtual~ Backend() {};
virtual void render(Batch& batch) = 0;
virtual void syncCache() = 0;
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) = 0;
class TransformObject {
public:
Mat4 _model;
@ -118,7 +118,21 @@ protected:
class Context {
public:
Context(Backend* backend);
typedef Backend* (*CreateBackend)();
typedef bool (*MakeProgram)(Shader& shader, const Shader::BindingSet& bindings);
// This one call must happen before any context is created or used (Shader::MakeProgram) in order to setup the Backend and any singleton data needed
template <class T>
static void init() {
std::call_once(_initialized, [] {
_createBackendCallback = T::createBackend;
_makeProgramCallback = T::makeProgram;
T::init();
});
}
Context();
~Context();
void render(Batch& batch);
@ -132,13 +146,17 @@ public:
protected:
Context(const Context& context);
std::unique_ptr<Backend> _backend;
// This function can only be called by "static Shader::makeProgram()"
// makeProgramShader(...) make a program shader ready to be used in a Batch.
// It compiles the sub shaders, link them and defines the Slots and their bindings.
// If the shader passed is not a program, nothing happens.
static bool makeProgram(Shader& shader, const Shader::BindingSet& bindings = Shader::BindingSet());
static bool makeProgram(Shader& shader, const Shader::BindingSet& bindings);
std::unique_ptr<Backend> _backend;
static CreateBackend _createBackendCallback;
static MakeProgram _makeProgramCallback;
static std::once_flag _initialized;
friend class Shader;
};

View file

@ -8,9 +8,9 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <mutex>
#include "GPULogging.h"
#include "GLBackendShared.h"
#include <mutex>
#include <glm/gtc/type_ptr.hpp>
using namespace gpu;
@ -63,12 +63,7 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::GLBackend::do_glLineWidth),
};
GLBackend::GLBackend() :
_input(),
_transform(),
_pipeline(),
_output()
{
void GLBackend::init() {
static std::once_flag once;
std::call_once(once, [] {
qCDebug(gpulogging) << "GL Version: " << QString((const char*) glGetString(GL_VERSION));
@ -94,6 +89,13 @@ GLBackend::GLBackend() :
#endif
#if defined(Q_OS_LINUX)
GLenum err = glewInit();
if (GLEW_OK != err) {
/* Problem: glewInit failed, something is seriously wrong. */
qCDebug(gpulogging, "Error: %s\n", glewGetErrorString(err));
}
qCDebug(gpulogging, "Status: Using GLEW %s\n", glewGetString(GLEW_VERSION));
// TODO: Write the correct code for Linux...
/* if (wglewGetExtension("WGL_EXT_swap_control")) {
int swapInterval = wglGetSwapIntervalEXT();
@ -101,7 +103,18 @@ GLBackend::GLBackend() :
}*/
#endif
});
}
Backend* GLBackend::createBackend() {
return new GLBackend();
}
GLBackend::GLBackend() :
_input(),
_transform(),
_pipeline(),
_output()
{
initInput();
initTransform();
}

View file

@ -18,16 +18,21 @@
#include "GPUConfig.h"
#include "Context.h"
#include "Batch.h"
namespace gpu {
class GLBackend : public Backend {
public:
// Context Backend static interface required
friend class Context;
static void init();
static Backend* createBackend();
static bool makeProgram(Shader& shader, const Shader::BindingSet& bindings);
explicit GLBackend(bool syncCache);
GLBackend();
public:
virtual ~GLBackend();
virtual void render(Batch& batch);
@ -49,7 +54,6 @@ public:
static void checkGLStackStable(std::function<void()> f);
static bool makeProgram(Shader& shader, const Shader::BindingSet& bindings = Shader::BindingSet());
class GLBuffer : public GPUObject {
@ -91,9 +95,9 @@ public:
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
GLuint _transformObject_model = -1;
GLuint _transformCamera_viewInverse = -1;
GLuint _transformCamera_viewport = -1;
GLint _transformObject_model = -1;
GLint _transformCamera_viewInverse = -1;
GLint _transformCamera_viewport = -1;
#endif
GLShader();

View file

@ -8,7 +8,6 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GPULogging.h"
#include "GLBackendShared.h"

View file

@ -8,7 +8,6 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GPULogging.h"
#include "GLBackendShared.h"
#include "Format.h"
@ -542,7 +541,12 @@ ElementResource getFormatFromGLUniform(GLenum gltype) {
};
int makeUniformSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& uniforms, Shader::SlotSet& textures, Shader::SlotSet& samplers) {
int makeUniformSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,
Shader::SlotSet& uniforms, Shader::SlotSet& textures, Shader::SlotSet& samplers
#if (GPU_FEATURE_PROFILE == GPU_LEGACY)
, Shader::SlotSet& fakeBuffers
#endif
) {
GLint uniformsCount = 0;
#if (GPU_FEATURE_PROFILE == GPU_LEGACY)
@ -583,6 +587,15 @@ int makeUniformSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, S
}
if (elementResource._resource == Resource::BUFFER) {
#if (GPU_FEATURE_PROFILE == GPU_LEGACY)
// if in legacy profile, we fake the uniform buffer with an array
// this is where we detect it assuming it's explicitely assinged a binding
auto requestedBinding = slotBindings.find(std::string(sname));
if (requestedBinding != slotBindings.end()) {
// found one buffer!
fakeBuffers.insert(Shader::Slot(sname, location, elementResource._element, elementResource._resource));
}
#endif
uniforms.insert(Shader::Slot(sname, location, elementResource._element, elementResource._resource));
} else {
// For texture/Sampler, the location is the actual binding value
@ -640,14 +653,13 @@ int makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet& slotBindin
GLchar name[NAME_LENGTH];
GLint length = 0;
GLint size = 0;
GLenum type = 0;
GLint binding = -1;
glGetActiveUniformBlockiv(glprogram, i, GL_UNIFORM_BLOCK_NAME_LENGTH, &length);
glGetActiveUniformBlockName(glprogram, i, NAME_LENGTH, &length, name);
glGetActiveUniformBlockiv(glprogram, i, GL_UNIFORM_BLOCK_BINDING, &binding);
glGetActiveUniformBlockiv(glprogram, i, GL_UNIFORM_BLOCK_DATA_SIZE, &size);
GLuint blockIndex = glGetUniformBlockIndex(glprogram, name);
// CHeck if there is a requested binding for this block
@ -738,8 +750,12 @@ bool GLBackend::makeProgram(Shader& shader, const Shader::BindingSet& slotBindin
Shader::SlotSet uniforms;
Shader::SlotSet textures;
Shader::SlotSet samplers;
#if (GPU_FEATURE_PROFILE == GPU_CORE)
makeUniformSlots(object->_program, slotBindings, uniforms, textures, samplers);
#else
makeUniformSlots(object->_program, slotBindings, uniforms, textures, samplers, buffers);
#endif
Shader::SlotSet inputs;
makeInputSlots(object->_program, slotBindings, inputs);

View file

@ -11,11 +11,10 @@
#ifndef hifi_gpu_GLBackend_Shared_h
#define hifi_gpu_GLBackend_Shared_h
#include "GLBackend.h"
#include <QDebug>
#include "Batch.h"
#include "GPULogging.h"
#include "GLBackend.h"
static const GLenum _primitiveToGLmode[gpu::NUM_PRIMITIVES] = {
GL_POINTS,

View file

@ -484,6 +484,7 @@ void GLBackend::syncPipelineStateCache() {
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
// Point size is always on
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
glEnable(GL_PROGRAM_POINT_SIZE_EXT);
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);

View file

@ -34,11 +34,11 @@
#elif defined(ANDROID)
#else
#include <GL/gl.h>
#include <GL/glext.h>
#define GPU_FEATURE_PROFILE GPU_LEGACY
#define GPU_TRANSFORM_PROFILE GPU_LEGACY
#include <GL/glew.h>
#define GPU_FEATURE_PROFILE GPU_CORE
#define GPU_TRANSFORM_PROFILE GPU_CORE
#endif

View file

@ -13,13 +13,13 @@
#include <bitset>
#include <map>
#include <qurl.h>
#include <glm/glm.hpp>
#include "gpu/Resource.h"
#include "gpu/Texture.h"
#include <qurl.h>
namespace model {

View file

@ -46,7 +46,7 @@ std::unique_ptr<NLPacket> NLPacket::create(PacketType::Value type, qint64 size)
return packet;
}
std::unique_ptr<NLPacket> NLPacket::fromReceivedPacket(std::unique_ptr<char> data, qint64 size,
std::unique_ptr<NLPacket> NLPacket::fromReceivedPacket(std::unique_ptr<char[]> data, qint64 size,
const HifiSockAddr& senderSockAddr) {
// Fail with null data
Q_ASSERT(data);
@ -85,7 +85,7 @@ NLPacket::NLPacket(const NLPacket& other) : Packet(other) {
}
NLPacket::NLPacket(std::unique_ptr<char> data, qint64 size, const HifiSockAddr& senderSockAddr) :
NLPacket::NLPacket(std::unique_ptr<char[]> data, qint64 size, const HifiSockAddr& senderSockAddr) :
Packet(std::move(data), size, senderSockAddr)
{
adjustPayloadStartAndCapacity();

View file

@ -20,7 +20,7 @@ class NLPacket : public Packet {
Q_OBJECT
public:
static std::unique_ptr<NLPacket> create(PacketType::Value type, qint64 size = -1);
static std::unique_ptr<NLPacket> fromReceivedPacket(std::unique_ptr<char> data, qint64 size,
static std::unique_ptr<NLPacket> fromReceivedPacket(std::unique_ptr<char[]> data, qint64 size,
const HifiSockAddr& senderSockAddr);
// Provided for convenience, try to limit use
static std::unique_ptr<NLPacket> createCopy(const NLPacket& other);
@ -45,7 +45,7 @@ protected:
NLPacket(PacketType::Value type);
NLPacket(PacketType::Value type, qint64 size);
NLPacket(std::unique_ptr<char> data, qint64 size, const HifiSockAddr& senderSockAddr);
NLPacket(std::unique_ptr<char[]> data, qint64 size, const HifiSockAddr& senderSockAddr);
NLPacket(const NLPacket& other);
void readSourceID();

View file

@ -243,7 +243,7 @@ void PacketReceiver::processDatagrams() {
while (nodeList && nodeList->getNodeSocket().hasPendingDatagrams()) {
// setup a buffer to read the packet into
int packetSizeWithHeader = nodeList->getNodeSocket().pendingDatagramSize();
std::unique_ptr<char> buffer = std::unique_ptr<char>(new char[packetSizeWithHeader]);
auto buffer = std::unique_ptr<char[]>(new char[packetSizeWithHeader]);
// if we're supposed to drop this packet then break out here
if (_shouldDropPackets) {

View file

@ -31,7 +31,7 @@ std::unique_ptr<Packet> Packet::create(PacketType::Value type, qint64 size) {
return packet;
}
std::unique_ptr<Packet> Packet::fromReceivedPacket(std::unique_ptr<char> data, qint64 size, const HifiSockAddr& senderSockAddr) {
std::unique_ptr<Packet> Packet::fromReceivedPacket(std::unique_ptr<char[]> data, qint64 size, const HifiSockAddr& senderSockAddr) {
// Fail with invalid size
Q_ASSERT(size >= 0);
@ -82,7 +82,7 @@ Packet::Packet(PacketType::Value type, qint64 size) :
}
}
Packet::Packet(std::unique_ptr<char> data, qint64 size, const HifiSockAddr& senderSockAddr) :
Packet::Packet(std::unique_ptr<char[]> data, qint64 size, const HifiSockAddr& senderSockAddr) :
_packetSize(size),
_packet(std::move(data)),
_senderSockAddr(senderSockAddr)
@ -110,7 +110,7 @@ Packet& Packet::operator=(const Packet& other) {
_type = other._type;
_packetSize = other._packetSize;
_packet = std::unique_ptr<char>(new char[_packetSize]);
_packet = std::unique_ptr<char[]>(new char[_packetSize]);
memcpy(_packet.get(), other._packet.get(), _packetSize);
_payloadStart = _packet.get() + (other._payloadStart - other._packet.get());

View file

@ -27,7 +27,7 @@ public:
static const qint64 PACKET_WRITE_ERROR;
static std::unique_ptr<Packet> create(PacketType::Value type, qint64 size = -1);
static std::unique_ptr<Packet> fromReceivedPacket(std::unique_ptr<char> data, qint64 size, const HifiSockAddr& senderSockAddr);
static std::unique_ptr<Packet> fromReceivedPacket(std::unique_ptr<char[]> data, qint64 size, const HifiSockAddr& senderSockAddr);
// Provided for convenience, try to limit use
static std::unique_ptr<Packet> createCopy(const Packet& other);
@ -88,7 +88,7 @@ public:
protected:
Packet(PacketType::Value type, qint64 size);
Packet(std::unique_ptr<char> data, qint64 size, const HifiSockAddr& senderSockAddr);
Packet(std::unique_ptr<char[]> data, qint64 size, const HifiSockAddr& senderSockAddr);
Packet(const Packet& other);
Packet& operator=(const Packet& other);
Packet(Packet&& other);
@ -109,7 +109,7 @@ protected:
PacketVersion _version; // Packet version
qint64 _packetSize = 0; // Total size of the allocated memory
std::unique_ptr<char> _packet; // Allocated memory
std::unique_ptr<char[]> _packet; // Allocated memory
char* _payloadStart = nullptr; // Start of the payload
qint64 _payloadCapacity = 0; // Total capacity of the payload

View file

@ -9,15 +9,12 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// include this before QOpenGLFramebufferObject, which includes an earlier version of OpenGL
#include <gpu/GPUConfig.h>
#include <gpu/GLBackend.h>
#include <glm/gtc/random.hpp>
#include <PathUtils.h>
#include <SharedUtil.h>
#include <gpu/Context.h>
#include "gpu/StandardShaderLib.h"
#include "AmbientOcclusionEffect.h"
@ -164,7 +161,7 @@ const gpu::PipelinePointer& AmbientOcclusion::getBlendPipeline() {
// Blend on transparent
state->setBlendFunction(true,
gpu::State::SRC_COLOR, gpu::State::BLEND_OP_ADD, gpu::State::DEST_COLOR);
gpu::State::INV_SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::SRC_ALPHA);
// Good to go add the brand new pipeline
_blendPipeline.reset(gpu::Pipeline::create(program, state));
@ -176,7 +173,6 @@ void AmbientOcclusion::run(const render::SceneContextPointer& sceneContext, cons
assert(renderContext->args);
assert(renderContext->args->_viewFrustum);
RenderArgs* args = renderContext->args;
auto& scene = sceneContext->_scene;
gpu::Batch batch;

View file

@ -12,12 +12,12 @@
#include "DeferredLightingEffect.h"
#include <GLMHelpers.h>
#include <gpu/GPUConfig.h>
#include <PathUtils.h>
#include <ViewFrustum.h>
#include <gpu/Batch.h>
#include <gpu/Context.h>
#include <gpu/StandardShaderLib.h>
#include <PathUtils.h>
#include <ViewFrustum.h>
#include "AbstractViewStateInterface.h"
#include "GeometryCache.h"
@ -291,7 +291,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
locations = &_directionalAmbientSphereLightCascadedShadowMapLocations;
}
batch.setPipeline(program);
batch._glUniform3fv(locations->shadowDistances, 1, (const GLfloat*) &_viewState->getShadowDistances());
batch._glUniform3fv(locations->shadowDistances, 1, (const float*) &_viewState->getShadowDistances());
} else {
if (useSkyboxCubemap) {
@ -325,7 +325,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
sh = (*_skybox->getCubemap()->getIrradiance());
}
for (int i =0; i <gpu::SphericalHarmonics::NUM_COEFFICIENTS; i++) {
batch._glUniform4fv(locations->ambientSphere + i, 1, (const GLfloat*) (&sh) + i * 4);
batch._glUniform4fv(locations->ambientSphere + i, 1, (const float*) (&sh) + i * 4);
}
}
@ -340,7 +340,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
if (_atmosphere && (locations->atmosphereBufferUnit >= 0)) {
batch.setUniformBuffer(locations->atmosphereBufferUnit, _atmosphere->getDataBuffer());
}
batch._glUniformMatrix4fv(locations->invViewMat, 1, false, reinterpret_cast< const GLfloat* >(&invViewMat));
batch._glUniformMatrix4fv(locations->invViewMat, 1, false, reinterpret_cast< const float* >(&invViewMat));
}
float left, right, bottom, top, nearVal, farVal;
@ -419,9 +419,9 @@ void DeferredLightingEffect::render(RenderArgs* args) {
batch._glUniform2f(_pointLightLocations.depthTexCoordOffset, depthTexCoordOffsetS, depthTexCoordOffsetT);
batch._glUniform2f(_pointLightLocations.depthTexCoordScale, depthTexCoordScaleS, depthTexCoordScaleT);
batch._glUniformMatrix4fv(_pointLightLocations.invViewMat, 1, false, reinterpret_cast< const GLfloat* >(&invViewMat));
batch._glUniformMatrix4fv(_pointLightLocations.invViewMat, 1, false, reinterpret_cast< const float* >(&invViewMat));
batch._glUniformMatrix4fv(_pointLightLocations.texcoordMat, 1, false, reinterpret_cast< const GLfloat* >(&texcoordMat));
batch._glUniformMatrix4fv(_pointLightLocations.texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));
for (auto lightID : _pointLights) {
auto& light = _allocatedLights[lightID];
@ -467,9 +467,9 @@ void DeferredLightingEffect::render(RenderArgs* args) {
batch._glUniform2f(_spotLightLocations.depthTexCoordOffset, depthTexCoordOffsetS, depthTexCoordOffsetT);
batch._glUniform2f(_spotLightLocations.depthTexCoordScale, depthTexCoordScaleS, depthTexCoordScaleT);
batch._glUniformMatrix4fv(_spotLightLocations.invViewMat, 1, false, reinterpret_cast< const GLfloat* >(&invViewMat));
batch._glUniformMatrix4fv(_spotLightLocations.invViewMat, 1, false, reinterpret_cast< const float* >(&invViewMat));
batch._glUniformMatrix4fv(_spotLightLocations.texcoordMat, 1, false, reinterpret_cast< const GLfloat* >(&texcoordMat));
batch._glUniformMatrix4fv(_spotLightLocations.texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));
for (auto lightID : _spotLights) {
auto light = _allocatedLights[lightID];
@ -489,7 +489,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
if ((eyeHalfPlaneDistance > -nearRadius) &&
(glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius)) {
coneParam.w = 0.0f;
batch._glUniform4fv(_spotLightLocations.coneParam, 1, reinterpret_cast< const GLfloat* >(&coneParam));
batch._glUniform4fv(_spotLightLocations.coneParam, 1, reinterpret_cast< const float* >(&coneParam));
Transform model;
model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
@ -509,7 +509,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
batch.setViewTransform(viewMat);
} else {
coneParam.w = 1.0f;
batch._glUniform4fv(_spotLightLocations.coneParam, 1, reinterpret_cast< const GLfloat* >(&coneParam));
batch._glUniform4fv(_spotLightLocations.coneParam, 1, reinterpret_cast< const float* >(&coneParam));
Transform model;
model.setTranslation(light->getPosition());
@ -595,9 +595,9 @@ void DeferredLightingEffect::loadLightProgram(const char* vertSource, const char
slotBindings.insert(gpu::Shader::Binding(std::string("depthMap"), 3));
slotBindings.insert(gpu::Shader::Binding(std::string("shadowMap"), 4));
slotBindings.insert(gpu::Shader::Binding(std::string("skyboxMap"), 5));
const GLint LIGHT_GPU_SLOT = 3;
const int LIGHT_GPU_SLOT = 3;
slotBindings.insert(gpu::Shader::Binding(std::string("lightBuffer"), LIGHT_GPU_SLOT));
const GLint ATMOSPHERE_GPU_SLOT = 4;
const int ATMOSPHERE_GPU_SLOT = 4;
slotBindings.insert(gpu::Shader::Binding(std::string("atmosphereBufferUnit"), ATMOSPHERE_GPU_SLOT));
gpu::Shader::makeProgram(*program, slotBindings);
@ -614,13 +614,8 @@ void DeferredLightingEffect::loadLightProgram(const char* vertSource, const char
locations.texcoordMat = program->getUniforms().findLocation("texcoordMat");
locations.coneParam = program->getUniforms().findLocation("coneParam");
#if (GPU_FEATURE_PROFILE == GPU_CORE)
locations.lightBufferUnit = program->getBuffers().findLocation("lightBuffer");
locations.atmosphereBufferUnit = program->getBuffers().findLocation("atmosphereBufferUnit");
#else
locations.lightBufferUnit = program->getUniforms().findLocation("lightBuffer");
locations.atmosphereBufferUnit = program->getUniforms().findLocation("atmosphereBufferUnit");
#endif
auto state = std::make_shared<gpu::State>();
if (lightVolume) {
@ -677,10 +672,10 @@ model::MeshPointer DeferredLightingEffect::getSpotLightMesh() {
int ringFloatOffset = slices * 3;
GLfloat* vertexData = new GLfloat[verticesSize];
GLfloat* vertexRing0 = vertexData;
GLfloat* vertexRing1 = vertexRing0 + ringFloatOffset;
GLfloat* vertexRing2 = vertexRing1 + ringFloatOffset;
float* vertexData = new float[verticesSize];
float* vertexRing0 = vertexData;
float* vertexRing1 = vertexRing0 + ringFloatOffset;
float* vertexRing2 = vertexRing1 + ringFloatOffset;
for (int i = 0; i < slices; i++) {
float theta = TWO_PI * i / slices;
@ -746,7 +741,7 @@ model::MeshPointer DeferredLightingEffect::getSpotLightMesh() {
*(index++) = capVertex;
}
_spotLightMesh->setIndexBuffer(gpu::BufferView(new gpu::Buffer(sizeof(GLushort) * indices, (gpu::Byte*) indexData), gpu::Element::INDEX_UINT16));
_spotLightMesh->setIndexBuffer(gpu::BufferView(new gpu::Buffer(sizeof(unsigned short) * indices, (gpu::Byte*) indexData), gpu::Element::INDEX_UINT16));
delete[] indexData;
model::Mesh::Part part(0, indices, 0, model::Mesh::TRIANGLES);

View file

@ -18,7 +18,6 @@
#include <QMap>
#include <QQueue>
#include <gpu/Batch.h>
#include <gpu/GPUConfig.h>
#include "RenderUtilsLogging.h"
static QQueue<gpu::FramebufferPointer> _cachedFramebuffers;

View file

@ -9,22 +9,22 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// include this before QOpenGLBuffer, which includes an earlier version of OpenGL
#include "GeometryCache.h"
#include <cmath>
#include <QNetworkReply>
#include <QRunnable>
#include <QThreadPool>
#include <gpu/Batch.h>
#include <gpu/GLBackend.h>
#include <FSTReader.h>
#include <NumericalConstants.h>
#include <gpu/Batch.h>
#include <gpu/GLBackend.h>
#include "TextureCache.h"
#include "RenderUtilsLogging.h"
#include "GeometryCache.h"
#include "standardTransformPNTC_vert.h"
#include "standardDrawTexture_frag.h"
@ -1998,7 +1998,7 @@ void GeometryReader::run() {
} else if (_url.path().toLower().endsWith("palaceoforinthilian4.fbx")) {
lightmapLevel = 3.5f;
}
fbxgeo = readFBX(_reply, _mapping, grabLightmaps, lightmapLevel);
fbxgeo = readFBX(_reply, _mapping, _url.path(), grabLightmaps, lightmapLevel);
} else if (_url.path().toLower().endsWith(".obj")) {
fbxgeo = OBJReader().readOBJ(_reply, _mapping, &_url);
}

View file

@ -18,20 +18,18 @@
#include <CapsuleShape.h>
#include <GeometryUtil.h>
#include <gpu/Batch.h>
#include <gpu/GLBackend.h>
#include <PathUtils.h>
#include <PerfStat.h>
#include "PhysicsEntity.h"
#include <ShapeCollider.h>
#include <SphereShape.h>
#include <ViewFrustum.h>
#include <render/Scene.h>
#include <gpu/Batch.h>
#include "AbstractViewStateInterface.h"
#include "AnimationHandle.h"
#include "DeferredLightingEffect.h"
#include "Model.h"
#include "RenderUtilsLogging.h"
#include "model_vert.h"
#include "model_shadow_vert.h"
@ -96,7 +94,7 @@ Model::~Model() {
}
Model::RenderPipelineLib Model::_renderPipelineLib;
const GLint MATERIAL_GPU_SLOT = 3;
const int MATERIAL_GPU_SLOT = 3;
void Model::RenderPipelineLib::addRenderPipeline(Model::RenderKey key,
gpu::ShaderPointer& vertexShader,
@ -189,13 +187,9 @@ void Model::RenderPipelineLib::initLocations(gpu::ShaderPointer& program, Model:
locations.specularTextureUnit = program->getTextures().findLocation("specularMap");
locations.emissiveTextureUnit = program->getTextures().findLocation("emissiveMap");
#if (GPU_FEATURE_PROFILE == GPU_CORE)
locations.materialBufferUnit = program->getBuffers().findLocation("materialBuffer");
locations.lightBufferUnit = program->getBuffers().findLocation("lightBuffer");
#else
locations.materialBufferUnit = program->getUniforms().findLocation("materialBuffer");
locations.lightBufferUnit = program->getUniforms().findLocation("lightBuffer");
#endif
locations.clusterMatrices = program->getUniforms().findLocation("clusterMatrices");
locations.clusterIndices = program->getInputs().findLocation("clusterIndices");;

View file

@ -11,12 +11,10 @@
//
#include "RenderDeferredTask.h"
#include <gpu/GPUConfig.h>
#include <gpu/Batch.h>
#include <gpu/Context.h>
#include <PerfStat.h>
#include <RenderArgs.h>
#include <ViewFrustum.h>
#include <gpu/Context.h>
#include "FramebufferCache.h"
#include "DeferredLightingEffect.h"

View file

@ -16,16 +16,15 @@
#include <glm/glm.hpp>
#include <glm/gtc/random.hpp>
#include <gpu/Batch.h>
#include <gpu/GLBackend.h>
#include <gpu/GPUConfig.h>
#include <QNetworkReply>
#include <QPainter>
#include <QRunnable>
#include <QThreadPool>
#include <qimagereader.h>
#include <gpu/Batch.h>
#include "RenderUtilsLogging.h"

View file

@ -18,6 +18,9 @@
<$declareStandardTransform()$>
// Based on NVidia HBAO implementation in D3D11
// http://www.nvidia.co.uk/object/siggraph-2008-HBAO.html
varying vec2 varTexcoord;
uniform sampler2D depthTexture;
@ -30,80 +33,213 @@ uniform float g_intensity;
uniform float bufferWidth;
uniform float bufferHeight;
#define SAMPLE_COUNT 4
const float PI = 3.14159265;
float getRandom(vec2 uv) {
const vec2 FocalLen = vec2(1.0, 1.0);
const vec2 LinMAD = vec2(0.1-10.0, 0.1+10.0) / (2.0*0.1*10.0);
const vec2 AORes = vec2(1024.0, 768.0);
const vec2 InvAORes = vec2(1.0/1024.0, 1.0/768.0);
const vec2 NoiseScale = vec2(1024.0, 768.0) / 4.0;
const float AOStrength = 1.9;
const float R = 0.3;
const float R2 = 0.3*0.3;
const float NegInvR2 = - 1.0 / (0.3*0.3);
const float TanBias = tan(30.0 * PI / 180.0);
const float MaxRadiusPixels = 50.0;
const int NumDirections = 6;
const int NumSamples = 4;
float ViewSpaceZFromDepth(float d){
// [0,1] -> [-1,1] clip space
d = d * 2.0 - 1.0;
// Get view space Z
return -1.0 / (LinMAD.x * d + LinMAD.y);
}
vec3 UVToViewSpace(vec2 uv, float z){
//uv = UVToViewA * uv + UVToViewB;
return vec3(uv * z, z);
}
vec3 GetViewPos(vec2 uv){
float z = ViewSpaceZFromDepth(texture2D(depthTexture, uv).r);
return UVToViewSpace(uv, z);
}
vec3 GetViewPosPoint(ivec2 uv){
vec2 coord = vec2(gl_FragCoord.xy) + uv;
//float z = texelFetch(texture0, coord, 0).r;
float z = texture2D(depthTexture, uv).r;
return UVToViewSpace(uv, z);
}
float TanToSin(float x){
return x * inversesqrt(x*x + 1.0);
}
float InvLength(vec2 V){
return inversesqrt(dot(V,V));
}
float Tangent(vec3 V){
return V.z * InvLength(V.xy);
}
float BiasedTangent(vec3 V){
return V.z * InvLength(V.xy) + TanBias;
}
float Tangent(vec3 P, vec3 S){
return -(P.z - S.z) * InvLength(S.xy - P.xy);
}
float Length2(vec3 V){
return dot(V,V);
}
vec3 MinDiff(vec3 P, vec3 Pr, vec3 Pl){
vec3 V1 = Pr - P;
vec3 V2 = P - Pl;
return (Length2(V1) < Length2(V2)) ? V1 : V2;
}
vec2 SnapUVOffset(vec2 uv){
return round(uv * AORes) * InvAORes;
}
float Falloff(float d2){
return d2 * NegInvR2 + 1.0f;
}
float HorizonOcclusion( vec2 deltaUV, vec3 P, vec3 dPdu, vec3 dPdv, float randstep, float numSamples){
float ao = 0;
// Offset the first coord with some noise
vec2 uv = varTexcoord + SnapUVOffset(randstep*deltaUV);
deltaUV = SnapUVOffset( deltaUV );
// Calculate the tangent vector
vec3 T = deltaUV.x * dPdu + deltaUV.y * dPdv;
// Get the angle of the tangent vector from the viewspace axis
float tanH = BiasedTangent(T);
float sinH = TanToSin(tanH);
float tanS;
float d2;
vec3 S;
// Sample to find the maximum angle
for(float s = 1; s <= numSamples; ++s){
uv += deltaUV;
S = GetViewPos(uv);
tanS = Tangent(P, S);
d2 = Length2(S - P);
// Is the sample within the radius and the angle greater?
if(d2 < R2 && tanS > tanH)
{
float sinS = TanToSin(tanS);
// Apply falloff based on the distance
ao += Falloff(d2) * (sinS - sinH);
tanH = tanS;
sinH = sinS;
}
}
return ao;
}
vec2 RotateDirections(vec2 Dir, vec2 CosSin){
return vec2(Dir.x*CosSin.x - Dir.y*CosSin.y, Dir.x*CosSin.y + Dir.y*CosSin.x);
}
void ComputeSteps(inout vec2 stepSizeUv, inout float numSteps, float rayRadiusPix, float rand){
// Avoid oversampling if numSteps is greater than the kernel radius in pixels
numSteps = min(NumSamples, rayRadiusPix);
// Divide by Ns+1 so that the farthest samples are not fully attenuated
float stepSizePix = rayRadiusPix / (numSteps + 1);
// Clamp numSteps if it is greater than the max kernel footprint
float maxNumSteps = MaxRadiusPixels / stepSizePix;
if (maxNumSteps < numSteps)
{
// Use dithering to avoid AO discontinuities
numSteps = floor(maxNumSteps + rand);
numSteps = max(numSteps, 1);
stepSizePix = MaxRadiusPixels / numSteps;
}
// Step size in uv space
stepSizeUv = stepSizePix * InvAORes;
}
float getRandom(vec2 uv){
return fract(sin(dot(uv.xy ,vec2(12.9898,78.233))) * 43758.5453);
}
void main(void) {
vec3 sampleKernel[4] = { vec3(0.2, 0.0, 0.0),
vec3(0.0, 0.2, 0.0),
vec3(0.0, 0.0, 0.2),
vec3(0.2, 0.2, 0.2) };
void main(void){
float numDirections = NumDirections;
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
vec3 P, Pr, Pl, Pt, Pb;
P = GetViewPos(varTexcoord);
vec3 eyeDir = vec3(0.0, 0.0, -3.0);
vec3 cameraPositionWorldSpace;
<$transformEyeToWorldDir(cam, eyeDir, cameraPositionWorldSpace)$>
// Sample neighboring pixels
Pr = GetViewPos(varTexcoord + vec2( InvAORes.x, 0));
Pl = GetViewPos(varTexcoord + vec2(-InvAORes.x, 0));
Pt = GetViewPos(varTexcoord + vec2( 0, InvAORes.y));
Pb = GetViewPos(varTexcoord + vec2( 0,-InvAORes.y));
vec4 depthColor = texture2D(depthTexture, varTexcoord);
// Calculate tangent basis vectors using the minimum difference
vec3 dPdu = MinDiff(P, Pr, Pl);
vec3 dPdv = MinDiff(P, Pt, Pb) * (AORes.y * InvAORes.x);
// z in non linear range [0,1]
float depthVal = depthColor.r;
// conversion into NDC [-1,1]
float zNDC = depthVal * 2.0 - 1.0;
float n = 1.0; // the near plane
float f = 30.0; // the far plane
float l = -1.0; // left
float r = 1.0; // right
float b = -1.0; // bottom
float t = 1.0; // top
// conversion into eye space
float zEye = 2*f*n / (zNDC*(f-n)-(f+n));
// Converting from pixel coordinates to NDC
float xNDC = gl_FragCoord.x/bufferWidth * 2.0 - 1.0;
float yNDC = gl_FragCoord.y/bufferHeight * 2.0 - 1.0;
// Unprojecting X and Y from NDC to eye space
float xEye = -zEye*(xNDC*(r-l)+(r+l))/(2.0*n);
float yEye = -zEye*(yNDC*(t-b)+(t+b))/(2.0*n);
vec3 currentFragEyeSpace = vec3(xEye, yEye, zEye);
vec3 currentFragWorldSpace;
<$transformEyeToWorldDir(cam, currentFragEyeSpace, currentFragWorldSpace)$>
vec3 cameraToPositionRay = normalize(currentFragWorldSpace - cameraPositionWorldSpace);
vec3 origin = cameraToPositionRay * depthVal + cameraPositionWorldSpace;
vec3 normal = normalize(texture2D(normalTexture, varTexcoord).xyz);
//normal = normalize(normal * normalMatrix);
vec3 rvec = vec3(getRandom(varTexcoord.xy), getRandom(varTexcoord.yx), getRandom(varTexcoord.xx)) * 2.0 - 1.0;
vec3 tangent = normalize(rvec - normal * dot(rvec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 tbn = mat3(tangent, bitangent, normal);
float occlusion = 0.0;
for (int i = 0; i < SAMPLE_COUNT; ++i) {
vec3 samplePos = origin + (tbn * sampleKernel[i]) * g_sample_rad;
vec4 offset = cam._projectionViewUntranslated * vec4(samplePos, 1.0);
offset.xy = (offset.xy / offset.w) * 0.5 + 0.5;
float depth = length(samplePos - cameraPositionWorldSpace);
float sampleDepthVal = texture2D(depthTexture, offset.xy).r;
float rangeDelta = abs(depthVal - sampleDepthVal);
float rangeCheck = smoothstep(0.0, 1.0, g_sample_rad / rangeDelta);
occlusion += rangeCheck * step(sampleDepthVal, depth);
}
occlusion = 1.0 - occlusion / float(SAMPLE_COUNT);
occlusion = clamp(pow(occlusion, g_intensity), 0.0, 1.0);
gl_FragColor = vec4(vec3(occlusion), 1.0);
}
// Get the random samples from the noise function
vec3 random = vec3(getRandom(varTexcoord.xy), getRandom(varTexcoord.yx), getRandom(varTexcoord.xx));
// Calculate the projected size of the hemisphere
vec2 rayRadiusUV = 0.5 * R * FocalLen / -P.z;
float rayRadiusPix = rayRadiusUV.x * AORes.x;
float ao = 1.0;
// Make sure the radius of the evaluated hemisphere is more than a pixel
if(rayRadiusPix > 1.0){
ao = 0.0;
float numSteps;
vec2 stepSizeUV;
// Compute the number of steps
ComputeSteps(stepSizeUV, numSteps, rayRadiusPix, random.z);
float alpha = 2.0 * PI / numDirections;
// Calculate the horizon occlusion of each direction
for(float d = 0; d < numDirections; ++d){
float theta = alpha * d;
// Apply noise to the direction
vec2 dir = RotateDirections(vec2(cos(theta), sin(theta)), random.xy);
vec2 deltaUV = dir * stepSizeUV;
// Sample the pixels along the direction
ao += HorizonOcclusion( deltaUV,
P,
dPdu,
dPdv,
random.z,
numSteps);
}
// Average the results and produce the final AO
ao = 1.0 - ao / numDirections * AOStrength;
}
gl_FragColor = vec4(vec3(ao), 1.0);
}

View file

@ -21,9 +21,6 @@ uniform sampler2D blurredOcclusionTexture;
void main(void) {
vec4 occlusionColor = texture2D(blurredOcclusionTexture, varTexcoord);
if(occlusionColor.r > 0.8 && occlusionColor.r <= 1.0) {
gl_FragColor = vec4(vec3(0.0), 0.0);
} else {
gl_FragColor = vec4(vec3(occlusionColor.r), 1.0);
}
gl_FragColor = vec4(vec3(0.0), occlusionColor.r);
}

View file

@ -15,15 +15,11 @@
#include <assert.h>
#include <PerfStat.h>
#include <gpu/Batch.h>
#include <gpu/Context.h>
#include <gpu/GPUConfig.h>
#include <gpu/GPULogging.h>
#include <ViewFrustum.h>
#include <RenderArgs.h>
#include <gpu/Context.h>
#include "drawItemBounds_vert.h"
#include "drawItemBounds_frag.h"
#include "drawItemStatus_vert.h"
@ -152,17 +148,17 @@ void DrawStatus::run(const SceneContextPointer& sceneContext, const RenderContex
const unsigned int VEC3_ADRESS_OFFSET = 3;
for (int i = 0; i < nbItems; i++) {
batch._glUniform3fv(_drawItemBoundPosLoc, 1, (const GLfloat*) (itemAABox + i));
batch._glUniform3fv(_drawItemBoundDimLoc, 1, ((const GLfloat*) (itemAABox + i)) + VEC3_ADRESS_OFFSET);
batch._glUniform3fv(_drawItemBoundPosLoc, 1, (const float*) (itemAABox + i));
batch._glUniform3fv(_drawItemBoundDimLoc, 1, ((const float*) (itemAABox + i)) + VEC3_ADRESS_OFFSET);
batch.draw(gpu::LINES, 24, 0);
}
batch.setPipeline(getDrawItemStatusPipeline());
for (int i = 0; i < nbItems; i++) {
batch._glUniform3fv(_drawItemStatusPosLoc, 1, (const GLfloat*) (itemAABox + i));
batch._glUniform3fv(_drawItemStatusDimLoc, 1, ((const GLfloat*) (itemAABox + i)) + VEC3_ADRESS_OFFSET);
batch._glUniform4iv(_drawItemStatusValueLoc, 1, (const GLint*) (itemStatus + i));
batch._glUniform3fv(_drawItemStatusPosLoc, 1, (const float*) (itemAABox + i));
batch._glUniform3fv(_drawItemStatusDimLoc, 1, ((const float*) (itemAABox + i)) + VEC3_ADRESS_OFFSET);
batch._glUniform4iv(_drawItemStatusValueLoc, 1, (const int*) (itemStatus + i));
batch.draw(gpu::TRIANGLES, 24, 0);
}
@ -171,4 +167,4 @@ void DrawStatus::run(const SceneContextPointer& sceneContext, const RenderContex
args->_context->syncCache();
renderContext->args->_context->syncCache();
args->_context->render((batch));
}
}

View file

@ -14,12 +14,10 @@
#include <algorithm>
#include <assert.h>
#include <gpu/Batch.h>
#include <gpu/Context.h>
#include <gpu/GPUConfig.h>
#include <PerfStat.h>
#include <RenderArgs.h>
#include <ViewFrustum.h>
#include <gpu/Context.h>
using namespace render;

View file

@ -8,33 +8,35 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <gpu/GPUConfig.h>
#include <unordered_map>
#include <memory>
#include <mutex>
#include <QWindow>
#include <QtGlobal>
#include <QFile>
#include <QTime>
#include <QImage>
#include <QTimer>
#include <QElapsedTimer>
#include <QOpenGLContext>
#include <QOpenGLBuffer>
#include <QOpenGLShaderProgram>
#include <QResizeEvent>
#include <QLoggingCategory>
#include <gpu/Context.h>
#include <gpu/GLBackend.h>
#include <QOpenGLBuffer>
#include <QOpenGLContext>
#include <QOpenGLDebugLogger>
#include <QOpenGLShaderProgram>
#include <QOpenGLTexture>
#include <QOpenGLVertexArrayObject>
#include <QApplication>
#include <QOpenGLDebugLogger>
#include <unordered_map>
#include <memory>
#include <glm/glm.hpp>
#include <QResizeEvent>
#include <QTime>
#include <QTimer>
#include <QWindow>
#include <QElapsedTimer>
#include <QDir>
#include <QGuiApplication>
#include <PathUtils.h>
#include <QDir>
#include "gpu/Batch.h"
@ -174,22 +176,20 @@ public:
show();
makeCurrent();
#ifdef WIN32
glewExperimental = true;
GLenum err = glewInit();
if (GLEW_OK != err) {
/* Problem: glewInit failed, something is seriously wrong. */
const GLubyte * errStr = glewGetErrorString(err);
qDebug("Error: %s\n", errStr);
}
qDebug("Status: Using GLEW %s\n", glewGetString(GLEW_VERSION));
gpu::Context::init<gpu::GLBackend>();
if (wglewGetExtension("WGL_EXT_swap_control")) {
int swapInterval = wglGetSwapIntervalEXT();
qDebug("V-Sync is %s\n", (swapInterval > 0 ? "ON" : "OFF"));
{
QOpenGLDebugLogger* logger = new QOpenGLDebugLogger(this);
logger->initialize(); // initializes in the current context, i.e. ctx
logger->enableMessages();
connect(logger, &QOpenGLDebugLogger::messageLogged, this, [&](const QOpenGLDebugMessage & debugMessage) {
qDebug() << debugMessage;
});
// logger->startLogging(QOpenGLDebugLogger::SynchronousLogging);
}
glGetError();
#endif
qDebug() << (const char*)glGetString(GL_VERSION);
//_textRenderer[0] = TextRenderer::getInstance(SANS_FONT_FAMILY, 12, false);
//_textRenderer[1] = TextRenderer::getInstance(SERIF_FONT_FAMILY, 12, false,

View file

@ -38,7 +38,7 @@ bool vhacd::VHACDUtil::loadFBX(const QString filename, FBXGeometry& result) {
if (filename.toLower().endsWith(".obj")) {
result = OBJReader().readOBJ(fbxContents, QVariantHash());
} else if (filename.toLower().endsWith(".fbx")) {
result = readFBX(fbxContents, QVariantHash());
result = readFBX(fbxContents, QVariantHash(), filename);
} else {
qDebug() << "unknown file extension";
return false;