Merge pull request #5495 from highfidelity/core

Update the rendering pipeline to use OpenGL 4.1 Core profile everywhere
This commit is contained in:
samcake 2015-08-05 15:10:46 -07:00
commit a4f105f6c3
129 changed files with 1661 additions and 1487 deletions

View file

@ -186,13 +186,10 @@ option(GET_POLYVOX "Get polyvox library automatically as external project" 1)
option(GET_OPENVR "Get OpenVR library automatically as external project" 1)
option(GET_BOOSTCONFIG "Get Boost-config library automatically as external project" 1)
option(GET_OGLPLUS "Get OGLplus library automatically as external project" 1)
option(GET_GLEW "Get GLEW library automatically as external project" 1)
option(USE_NSIGHT "Attempt to find the nSight libraries" 1)
if (WIN32)
option(GET_GLEW "Get GLEW library automatically as external project" 1)
endif ()
option(GET_SDL2 "Get SDL2 library automatically as external project" 0)
if (WIN32)

View file

@ -1,34 +1,34 @@
if (WIN32)
set(EXTERNAL_NAME glew)
set(EXTERNAL_NAME glew)
include(ExternalProject)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://hifi-public.s3.amazonaws.com/dependencies/glew-1.10.0-win32.zip
URL_MD5 37514e4e595a3b3dc587eee8f7e8ec2f
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
LOG_DOWNLOAD 1
)
if (ANDROID)
set(ANDROID_CMAKE_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}" "-DANDROID_NATIVE_API_LEVEL=19")
endif ()
# Hide this external target (for ide users)
set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals")
include(ExternalProject)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://hifi-public.s3.amazonaws.com/dependencies/glew_simple.zip
URL_MD5 0507dc08337a82a5e7ecbc5417f92cc1
CONFIGURE_COMMAND CMAKE_ARGS ${ANDROID_CMAKE_ARGS} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
LOG_DOWNLOAD 1
LOG_CONFIGURE 1
LOG_BUILD 1
)
ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR)
# Hide this external target (for ide users)
set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals")
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${SOURCE_DIR}/include CACHE PATH "List of glew include directories")
ExternalProject_Get_Property(${EXTERNAL_NAME} INSTALL_DIR)
if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set(_LIB_DIR ${SOURCE_DIR}/lib/Release/x64)
set(${EXTERNAL_NAME_UPPER}_DLL_PATH ${SOURCE_DIR}/bin/Release/x64 CACHE FILEPATH "Location of GLEW DLL")
else()
set(_LIB_DIR ${SOURCE_DIR}/lib/Release/Win32)
set(${EXTERNAL_NAME_UPPER}_DLL_PATH ${SOURCE_DIR}/bin/Release/Win32 CACHE FILEPATH "Location of GLEW DLL")
endif()
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${INSTALL_DIR}/include CACHE PATH "List of glew include directories")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${_LIB_DIR}/glew32.lib CACHE FILEPATH "Location of GLEW release library")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG "" CACHE FILEPATH "Location of GLEW debug library")
if (UNIX)
set(LIB_PREFIX "lib")
set(LIB_EXT "a")
elseif (WIN32)
set(LIB_EXT "lib")
endif ()
endif ()
set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG ${INSTALL_DIR}/lib/${LIB_PREFIX}glew_d.${LIB_EXT} CACHE FILEPATH "Path to glew debug library")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${INSTALL_DIR}/lib/${LIB_PREFIX}glew.${LIB_EXT} CACHE FILEPATH "Path to glew release library")

28
cmake/externals/zlib/CMakeLists.txt vendored Normal file
View file

@ -0,0 +1,28 @@
if (WIN32)
set(EXTERNAL_NAME zlib)
string(TOUPPER ${EXTERNAL_NAME} EXTERNAL_NAME_UPPER)
include(ExternalProject)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://zlib.net/zlib128.zip
URL_MD5 126f8676442ffbd97884eb4d6f32afb4
INSTALL_COMMAND ""
LOG_DOWNLOAD 1
)
# Hide this external target (for ide users)
set_target_properties(${EXTERNAL_NAME} PROPERTIES FOLDER "hidden/externals")
ExternalProject_Get_Property(${EXTERNAL_NAME} SOURCE_DIR)
set(${EXTERNAL_NAME_UPPER}_INCLUDE_DIRS ${SOURCE_DIR}/include CACHE PATH "List of zlib include directories")
ExternalProject_Get_Property(${EXTERNAL_NAME} BINARY_DIR)
set(${EXTERNAL_NAME_UPPER}_DLL_PATH ${BINARY_DIR}/Release CACHE FILEPATH "Location of GLEW DLL")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_RELEASE ${BINARY_DIR}/Release/zlib.lib CACHE FILEPATH "Location of ZLib release library")
set(${EXTERNAL_NAME_UPPER}_LIBRARY_DEBUG "" CACHE FILEPATH "Location of ZLib debug library")
endif ()

View file

@ -12,7 +12,7 @@ macro(SETUP_HIFI_LIBRARY)
project(${TARGET_NAME})
# grab the implemenation and header files
file(GLOB_RECURSE LIB_SRCS "src/*.h" "src/*.cpp")
file(GLOB_RECURSE LIB_SRCS "src/*.h" "src/*.cpp" "src/*.c")
list(APPEND ${TARGET_NAME}_SRCS ${LIB_SRCS})
# create a library and set the property so it can be referenced later

View file

@ -10,11 +10,6 @@ macro(SETUP_HIFI_OPENGL)
elseif (WIN32)
add_dependency_external_projects(glew)
find_package(GLEW REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARIES} opengl32.lib)
if (USE_NSIGHT)
# try to find the Nsight package and add it to the build if we find it
find_package(NSIGHT)

View file

@ -15,7 +15,7 @@
#
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
hifi_library_search_hints("3dconnexionclient")
hifi_library_search_hints("connexionclient")
if (APPLE)
find_library(3DCONNEXIONCLIENT_LIBRARIES NAMES 3DConnexionClient HINTS 3DCONNEXIONCLIENT_SEARCH_DIRS)

View file

@ -1,7 +1,7 @@
#
# FindGLEW.cmake
#
# Try to find GLEW library and include path.
# Try to find GLEW library and include path. Note that this only handles static GLEW.
# Once done this will define
#
# GLEW_FOUND
@ -18,39 +18,18 @@
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
#
if (WIN32)
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
hifi_library_search_hints("glew")
find_path(GLEW_INCLUDE_DIRS GL/glew.h PATH_SUFFIXES include HINTS ${GLEW_SEARCH_DIRS})
find_library(GLEW_LIBRARY_RELEASE glew32 PATH_SUFFIXES "lib/Release/Win32" "lib" HINTS ${GLEW_SEARCH_DIRS})
find_library(GLEW_LIBRARY_DEBUG glew32d PATH_SUFFIXES "lib/Debug/Win32" "lib" HINTS ${GLEW_SEARCH_DIRS})
find_path(GLEW_DLL_PATH glew32.dll PATH_SUFFIXES "bin/Release/Win32" HINTS ${GLEW_SEARCH_DIRS})
include(SelectLibraryConfigurations)
select_library_configurations(GLEW)
set(GLEW_LIBRARIES ${GLEW_LIBRARY})
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
hifi_library_search_hints("glew")
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GLEW DEFAULT_MSG GLEW_INCLUDE_DIRS GLEW_LIBRARIES GLEW_DLL_PATH)
add_paths_to_fixup_libs(${GLEW_DLL_PATH})
elseif (APPLE)
else ()
find_path(GLEW_INCLUDE_DIR GL/glew.h)
find_library(GLEW_LIBRARY NAMES GLEW glew32 glew glew32s PATH_SUFFIXES lib64)
find_path(GLEW_INCLUDE_DIRS GL/glew.h PATH_SUFFIXES include HINTS ${GLEW_SEARCH_DIRS})
set(GLEW_INCLUDE_DIRS ${GLEW_INCLUDE_DIR})
set(GLEW_LIBRARIES ${GLEW_LIBRARY})
find_library(GLEW_LIBRARY_RELEASE glew32 PATH_SUFFIXES "lib/Release/Win32" "lib" HINTS ${GLEW_SEARCH_DIRS})
find_library(GLEW_LIBRARY_DEBUG glew32d PATH_SUFFIXES "lib/Debug/Win32" "lib" HINTS ${GLEW_SEARCH_DIRS})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GLEW
REQUIRED_VARS GLEW_INCLUDE_DIR GLEW_LIBRARY)
include(SelectLibraryConfigurations)
select_library_configurations(GLEW)
mark_as_advanced(GLEW_INCLUDE_DIR GLEW_LIBRARY)
endif ()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GLEW DEFAULT_MSG GLEW_INCLUDE_DIRS GLEW_LIBRARIES)
message(STATUS "Found GLEW - Assuming that GLEW is static and defining GLEW_STATIC")

View file

@ -893,14 +893,15 @@ void Application::paintGL() {
{
float ratio = ((float)QApplication::desktop()->windowHandle()->devicePixelRatio() * getRenderResolutionScale());
auto mirrorViewport = glm::ivec4(0, 0, _mirrorViewRect.width() * ratio, _mirrorViewRect.height() * ratio);
auto mirrorViewportDest = mirrorViewport;
// Flip the src and destination rect horizontally to do the mirror
auto mirrorRect = glm::ivec4(0, 0, _mirrorViewRect.width() * ratio, _mirrorViewRect.height() * ratio);
auto mirrorRectDest = glm::ivec4(mirrorRect.z, mirrorRect.y, mirrorRect.x, mirrorRect.w);
auto selfieFbo = DependencyManager::get<FramebufferCache>()->getSelfieFramebuffer();
gpu::Batch batch;
batch.setFramebuffer(selfieFbo);
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLOR0, glm::vec4(0.0f, 0.0f, 0.0f, 0.0f));
batch.blit(primaryFbo, mirrorViewport, selfieFbo, mirrorViewportDest);
batch.blit(primaryFbo, mirrorRect, selfieFbo, mirrorRectDest);
batch.setFramebuffer(nullptr);
renderArgs._context->render(batch);
}
@ -991,8 +992,14 @@ void Application::paintGL() {
auto geometryCache = DependencyManager::get<GeometryCache>();
auto primaryFbo = DependencyManager::get<FramebufferCache>()->getPrimaryFramebufferDepthColor();
gpu::Batch batch;
batch.blit(primaryFbo, glm::ivec4(0, 0, _renderResolution.x, _renderResolution.y),
nullptr, glm::ivec4(0, 0, _glWidget->getDeviceSize().width(), _glWidget->getDeviceSize().height()));
if (renderArgs._renderMode == RenderArgs::MIRROR_RENDER_MODE) {
batch.blit(primaryFbo, glm::ivec4(0, 0, _renderResolution.x, _renderResolution.y),
nullptr, glm::ivec4(_glWidget->getDeviceSize().width(), 0, 0, _glWidget->getDeviceSize().height()));
} else {
batch.blit(primaryFbo, glm::ivec4(0, 0, _renderResolution.x, _renderResolution.y),
nullptr, glm::ivec4(0, 0, _glWidget->getDeviceSize().width(), _glWidget->getDeviceSize().height()));
}
batch.setFramebuffer(nullptr);

View file

@ -19,7 +19,22 @@
const int MSECS_PER_FRAME_WHEN_THROTTLED = 66;
GLCanvas::GLCanvas() : QGLWidget(QGL::NoDepthBuffer | QGL::NoStencilBuffer),
static QGLFormat& getDesiredGLFormat() {
// Specify an OpenGL 3.3 format using the Core profile.
// That is, no old-school fixed pipeline functionality
static QGLFormat glFormat;
static std::once_flag once;
std::call_once(once, [] {
glFormat.setVersion(4, 1);
glFormat.setProfile(QGLFormat::CoreProfile); // Requires >=Qt-4.8.0
glFormat.setSampleBuffers(false);
glFormat.setDepth(false);
glFormat.setStencil(false);
});
return glFormat;
}
GLCanvas::GLCanvas() : QGLWidget(getDesiredGLFormat()),
_throttleRendering(false),
_idleRenderInterval(MSECS_PER_FRAME_WHEN_THROTTLED)
{

View file

@ -137,7 +137,7 @@ void AudioScope::render(RenderArgs* renderArgs, int width, int height) {
batch.setProjectionTransform(legacyProjection);
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch._glLineWidth(1.0f); // default
geometryCache->renderQuad(batch, x, y, w, h, backgroundColor, _audioScopeBackground);
geometryCache->renderGrid(batch, x, y, w, h, gridRows, gridCols, gridColor, _audioScopeGrid);
renderLineStrip(batch, _inputID, inputColor, x, y, _samplesPerScope, _scopeInputOffset, _scopeInput);

View file

@ -397,7 +397,7 @@ void Head::renderLookatVectors(RenderArgs* renderArgs, glm::vec3 leftEyePosition
auto& batch = *renderArgs->_batch;
auto transform = Transform{};
batch.setModelTransform(transform);
batch._glLineWidth(2.0f);
// FIXME: THe line width of 2.0f is not supported anymore, we ll need a workaround
auto deferredLighting = DependencyManager::get<DeferredLightingEffect>();
deferredLighting->bindSimpleProgram(batch);

View file

@ -264,7 +264,8 @@ void SkeletonModel::renderJointConstraints(gpu::Batch& batch, int jointIndex) {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
const float BASE_DIRECTION_SIZE = 0.3f;
float directionSize = BASE_DIRECTION_SIZE * extractUniformScale(_scale);
batch._glLineWidth(3.0f);
// FIXME: THe line width of 3.0f is not supported anymore, we ll need a workaround
do {
const FBXJoint& joint = geometry.joints.at(jointIndex);
const JointState& jointState = _rig->getJointState(jointIndex);

View file

@ -137,8 +137,7 @@ void ApplicationOverlay::renderAudioScope(RenderArgs* renderArgs) {
batch.setProjectionTransform(legacyProjection);
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch._glLineWidth(1.0f); // default
// Render the audio scope
DependencyManager::get<AudioScope>()->render(renderArgs, width, height);
}
@ -157,8 +156,7 @@ void ApplicationOverlay::renderOverlays(RenderArgs* renderArgs) {
batch.setProjectionTransform(legacyProjection);
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch._glLineWidth(1.0f); // default
// Render all of the Script based "HUD" aka 2D overlays.
// note: we call them HUD, as opposed to 2D, only because there are some cases of 3D HUD overlays, like the
// cameral controls for the edit.js
@ -195,6 +193,7 @@ void ApplicationOverlay::renderRearView(RenderArgs* renderArgs) {
glm::vec2 texCoordMinCorner(0.0f, 0.0f);
glm::vec2 texCoordMaxCorner(viewport.width() * renderRatio / float(selfieTexture->getWidth()), viewport.height() * renderRatio / float(selfieTexture->getHeight()));
geometryCache->useSimpleDrawPipeline(batch, true);
batch.setResourceTexture(0, selfieTexture);
geometryCache->renderQuad(batch, bottomLeft, topRight, texCoordMinCorner, texCoordMaxCorner, glm::vec4(1.0f, 1.0f, 1.0f, 1.0f));
@ -247,7 +246,7 @@ void ApplicationOverlay::renderDomainConnectionStatusBorder(RenderArgs* renderAr
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch.setResourceTexture(0, DependencyManager::get<TextureCache>()->getWhiteTexture());
batch._glLineWidth(CONNECTION_STATUS_BORDER_LINE_WIDTH);
// FIXME: THe line width of CONNECTION_STATUS_BORDER_LINE_WIDTH is not supported anymore, we ll need a workaround
// TODO animate the disconnect border for some excitement while not connected?
//double usecs = usecTimestampNow();

View file

@ -101,8 +101,9 @@ void Circle3DOverlay::render(RenderArgs* args) {
Q_ASSERT(args->_batch);
auto& batch = *args->_batch;
batch._glLineWidth(_lineWidth);
// FIXME: THe line width of _lineWidth is not supported anymore, we ll need a workaround
auto transform = _transform;
transform.postScale(glm::vec3(getDimensions(), 1.0f));
batch.setModelTransform(transform);

View file

@ -60,7 +60,6 @@ void Grid3DOverlay::render(RenderArgs* args) {
// Minor grid
{
batch->_glLineWidth(1.0f);
auto position = glm::vec3(_minorGridWidth * (floorf(rotated.x / spacing) - MINOR_GRID_DIVISIONS / 2),
spacing * (floorf(rotated.y / spacing) - MINOR_GRID_DIVISIONS / 2),
getPosition().z);
@ -76,7 +75,6 @@ void Grid3DOverlay::render(RenderArgs* args) {
// Major grid
{
batch->_glLineWidth(4.0f);
spacing *= _majorGridEvery;
auto position = glm::vec3(spacing * (floorf(rotated.x / spacing) - MAJOR_GRID_DIVISIONS / 2),
spacing * (floorf(rotated.y / spacing) - MAJOR_GRID_DIVISIONS / 2),
@ -86,6 +84,8 @@ void Grid3DOverlay::render(RenderArgs* args) {
transform.setTranslation(position);
transform.setScale(scale);
// FIXME: THe line width of 4.0f is not supported anymore, we ll need a workaround
batch->setModelTransform(transform);
DependencyManager::get<GeometryCache>()->renderGrid(*batch, MAJOR_GRID_DIVISIONS, MAJOR_GRID_DIVISIONS, gridColor);

View file

@ -119,7 +119,6 @@ void Overlays::renderHUD(RenderArgs* renderArgs) {
batch.setProjectionTransform(legacyProjection);
batch.setModelTransform(Transform());
batch.setViewTransform(Transform());
batch._glLineWidth(1.0f); // default
thisOverlay->render(renderArgs);
}

View file

@ -3,7 +3,7 @@ set(TARGET_NAME entities-renderer)
AUTOSCRIBE_SHADER_LIB(gpu model render)
# use setup_hifi_library macro to setup our project and link appropriate Qt modules
setup_hifi_library(Widgets OpenGL Network Script)
setup_hifi_library(Widgets Network Script)
add_dependency_external_projects(glm)
find_package(GLM REQUIRED)

View file

@ -46,13 +46,11 @@ void RenderableLineEntityItem::render(RenderArgs* args) {
transform.setTranslation(getPosition());
transform.setRotation(getRotation());
batch.setModelTransform(transform);
batch._glLineWidth(getLineWidth());
if (getLinePoints().size() > 1) {
DependencyManager::get<DeferredLightingEffect>()->bindSimpleProgram(batch);
DependencyManager::get<GeometryCache>()->renderVertices(batch, gpu::LINE_STRIP, _lineVerticesID);
}
batch._glLineWidth(1.0f);
RenderableDebugableEntityItem::render(this, args);
};

View file

@ -11,10 +11,12 @@
uniform sampler2D colorMap;
varying vec4 varColor;
varying vec2 varTexCoord;
in vec4 _color;
in vec2 _texCoord0;
out vec4 outFragColor;
void main(void) {
vec4 color = texture2D(colorMap, varTexCoord);
gl_FragColor = color * varColor;
vec4 color = texture(colorMap, _texCoord0);
outFragColor = color * _color;
}

View file

@ -10,19 +10,21 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec4 varColor;
varying vec2 varTexCoord;
out vec4 _color;
out vec2 _texCoord0;
void main(void) {
// pass along the color & uvs to fragment shader
varColor = gl_Color;
varTexCoord = gl_MultiTexCoord0.xy;
_color = inColor;
_texCoord0 = inTexCoord0.xy;
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, gl_Vertex, gl_Position)$>
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
}

View file

@ -9,8 +9,10 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
varying vec4 varColor;
in vec4 _color;
out vec4 outFragColor;
void main(void) {
gl_FragColor = varColor;
outFragColor = _color;
}

View file

@ -8,17 +8,19 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec4 varColor;
out vec4 _color;
void main(void) {
// pass along the diffuse color
varColor = gl_Color;
_color = inColor;
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, gl_Vertex, gl_Position)$>
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
}

View file

@ -7,17 +7,18 @@ setup_hifi_library()
link_hifi_libraries(shared)
add_dependency_external_projects(glew)
find_package(GLEW REQUIRED)
add_definitions(-DGLEW_STATIC)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARY})
if (APPLE)
# link in required OS X frameworks and include the right GL headers
find_library(OpenGL OpenGL)
target_link_libraries(${TARGET_NAME} ${OpenGL})
elseif (WIN32)
add_dependency_external_projects(glew)
find_package(GLEW REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARIES} opengl32.lib)
target_link_libraries(${TARGET_NAME} ${GLEW_LIBRARY} opengl32.lib)
if (USE_NSIGHT)
# try to find the Nsight package and add it to the build if we find it
@ -32,8 +33,6 @@ elseif (WIN32)
elseif (ANDROID)
target_link_libraries(${TARGET_NAME} "-lGLESv3" "-lEGL")
else ()
find_package(GLEW REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLEW_INCLUDE_DIRS})
find_package(OpenGL REQUIRED)
@ -41,6 +40,5 @@ else ()
include_directories(SYSTEM "${OPENGL_INCLUDE_DIR}")
endif ()
target_link_libraries(${TARGET_NAME} "${GLEW_LIBRARIES}" "${OPENGL_LIBRARY}")
target_link_libraries(${TARGET_NAME} "${OPENGL_LIBRARY}")
endif (APPLE)

View file

@ -106,7 +106,10 @@ public:
void clearStencilFramebuffer(int stencil, bool enableScissor = false); // not a command, just a shortcut for clearFramebuffer, it touches only stencil target
void clearDepthStencilFramebuffer(float depth, int stencil, bool enableScissor = false); // not a command, just a shortcut for clearFramebuffer, it touches depth and stencil target
void blit(const FramebufferPointer& src, const Vec4i& srcViewport, const FramebufferPointer& dst, const Vec4i& dstViewport);
// Blit src framebuffer to destination
// the srcRect and dstRect are the rect region in source and destination framebuffers expressed in pixel space
// with xy and zw the bounding corners of the rect region.
void blit(const FramebufferPointer& src, const Vec4i& srcRect, const FramebufferPointer& dst, const Vec4i& dstRect);
// Query Section
void beginQuery(const QueryPointer& query);
@ -134,7 +137,6 @@ public:
void _glUniformMatrix4fv(int location, int count, unsigned char transpose, const float* value);
void _glColor4f(float red, float green, float blue, float alpha);
void _glLineWidth(float width);
enum Command {
COMMAND_draw = 0,
@ -183,7 +185,6 @@ public:
COMMAND_glUniformMatrix4fv,
COMMAND_glColor4f,
COMMAND_glLineWidth,
NUM_COMMANDS,
};

View file

@ -13,17 +13,11 @@
<@if GLPROFILE == PC_GL @>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def GPU_TRANSFORM_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 430 compatibility@>
<@def VERSION_HEADER #version 410 core@>
<@elif GLPROFILE == MAC_GL @>
<@def GPU_FEATURE_PROFILE GPU_LEGACY@>
<@def GPU_TRANSFORM_PROFILE GPU_LEGACY@>
<@def VERSION_HEADER #version 120
#extension GL_EXT_gpu_shader4 : enable@>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 410 core@>
<@else@>
<@def GPU_FEATURE_PROFILE GPU_CORE@>
<@def GPU_TRANSFORM_PROFILE GPU_CORE@>
<@def VERSION_HEADER #version 430 compatibility@>
<@endif@>
<@def VERSION_HEADER #version 410 core@>
<@endif@>

View file

@ -11,12 +11,13 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D colorMap;
uniform vec4 color;
varying vec2 varTexcoord;
in vec2 varTexCoord0;
out vec4 outFragColor;
void main(void) {
gl_FragColor = texture2D(colorMap, varTexcoord) * color;
outFragColor = texture(colorMap, varTexCoord0) * color;
}

View file

@ -13,27 +13,27 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Transform.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
uniform vec4 texcoordRect;
varying vec2 varTexcoord;
out vec2 varTexCoord0;
void main(void) {
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, 0.0, 1.0),
vec4(1.0, -1.0, 0.0, 1.0),
vec4(-1.0, 1.0, 0.0, 1.0),
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, 0.0, 1.0),
vec4(1.0, -1.0, 0.0, 1.0),
vec4(-1.0, 1.0, 0.0, 1.0),
vec4(1.0, 1.0, 0.0, 1.0)
);
vec4 pos = UNIT_QUAD[gl_VertexID];
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
varTexcoord = ((pos.xy + 1) * 0.5) * texcoordRect.zw + texcoordRect.xy;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
varTexCoord0 = ((pos.xy + 1) * 0.5) * texcoordRect.zw + texcoordRect.xy;
}

View file

@ -11,11 +11,12 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D colorMap;
varying vec2 varTexcoord;
in vec2 varTexCoord0;
out vec4 outFragColor;
void main(void) {
gl_FragColor = texture2D(colorMap, varTexcoord);
outFragColor = texture(colorMap, varTexCoord0);
}

View file

@ -12,11 +12,12 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
uniform sampler2D colorMap;
varying vec2 varTexcoord;
in vec2 varTexCoord0;
out vec4 outFragColor;
void main(void) {
gl_FragColor = vec4(texture2D(colorMap, varTexcoord).xyz, 1.0);
outFragColor = vec4(texture(colorMap, varTexCoord0).xyz, 1.0);
}

View file

@ -12,25 +12,25 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Transform.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec2 varTexcoord;
out vec2 varTexCoord0;
void main(void) {
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, 0.0, 1.0),
vec4(1.0, -1.0, 0.0, 1.0),
vec4(-1.0, 1.0, 0.0, 1.0),
const vec4 UNIT_QUAD[4] = vec4[4](
vec4(-1.0, -1.0, 0.0, 1.0),
vec4(1.0, -1.0, 0.0, 1.0),
vec4(-1.0, 1.0, 0.0, 1.0),
vec4(1.0, 1.0, 0.0, 1.0)
);
vec4 pos = UNIT_QUAD[gl_VertexID];
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
varTexcoord = (pos.xy + 1) * 0.5;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, pos, gl_Position)$>
varTexCoord0 = (pos.xy + 1) * 0.5;
}

View file

@ -16,7 +16,7 @@
<$declareStandardTransform()$>
varying vec2 varTexcoord;
out vec2 varTexCoord0;
void main(void) {
const vec4 UNIT_QUAD[4] = vec4[4](
@ -34,5 +34,5 @@ void main(void) {
<$transformModelToWorldPos(obj, tc, tc)$>
gl_Position = pos;
varTexcoord = tc.xy;
varTexCoord0 = tc.xy;
}

View file

@ -88,7 +88,26 @@ static const int TYPE_SIZE[NUM_TYPES] = {
1,
1
};
// Array answering the question Does this type is integer or not
static const bool TYPE_IS_INTEGER[NUM_TYPES] = {
false,
true,
true,
false,
true,
true,
true,
true,
false,
true,
true,
false,
true,
true,
true,
true
};
// Dimension of an Element
enum Dimension {
@ -168,6 +187,7 @@ public:
Type getType() const { return (Type)_type; }
bool isNormalized() const { return (getType() >= NFLOAT); }
bool isInteger() const { return TYPE_IS_INTEGER[getType()]; }
uint32 getSize() const { return DIMENSION_COUNT[_dimension] * TYPE_SIZE[_type]; }

View file

@ -60,7 +60,6 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::GLBackend::do_glUniformMatrix4fv),
(&::gpu::GLBackend::do_glColor4f),
(&::gpu::GLBackend::do_glLineWidth),
};
void GLBackend::init() {
@ -74,14 +73,16 @@ void GLBackend::init() {
qCDebug(gpulogging) << "GL Renderer: " << QString((const char*) glGetString(GL_RENDERER));
#ifdef WIN32
glewExperimental = true;
GLenum err = glewInit();
glGetError();
if (GLEW_OK != err) {
/* Problem: glewInit failed, something is seriously wrong. */
qCDebug(gpulogging, "Error: %s\n", glewGetErrorString(err));
}
qCDebug(gpulogging, "Status: Using GLEW %s\n", glewGetString(GLEW_VERSION));
#if defined(Q_OS_WIN)
if (wglewGetExtension("WGL_EXT_swap_control")) {
int swapInterval = wglGetSwapIntervalEXT();
qCDebug(gpulogging, "V-Sync is %s\n", (swapInterval > 0 ? "ON" : "OFF"));
@ -89,13 +90,6 @@ void GLBackend::init() {
#endif
#if defined(Q_OS_LINUX)
GLenum err = glewInit();
if (GLEW_OK != err) {
/* Problem: glewInit failed, something is seriously wrong. */
qCDebug(gpulogging, "Error: %s\n", glewGetErrorString(err));
}
qCDebug(gpulogging, "Status: Using GLEW %s\n", glewGetString(GLEW_VERSION));
// TODO: Write the correct code for Linux...
/* if (wglewGetExtension("WGL_EXT_swap_control")) {
int swapInterval = wglGetSwapIntervalEXT();
@ -200,7 +194,6 @@ void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
GLenum mode = _primitiveToGLmode[primitiveType];
uint32 numVertices = batch._params[paramOffset + 1]._uint;
uint32 startVertex = batch._params[paramOffset + 0]._uint;
glDrawArrays(mode, startVertex, numVertices);
(void) CHECK_GL_ERROR();
}
@ -494,22 +487,11 @@ void Batch::_glColor4f(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha)
DO_IT_NOW(_glColor4f, 4);
}
void GLBackend::do_glColor4f(Batch& batch, uint32 paramOffset) {
glColor4f(
// TODO Replace this with a proper sticky Input attribute buffer with frequency 0
glVertexAttrib4f( gpu::Stream::COLOR,
batch._params[paramOffset + 3]._float,
batch._params[paramOffset + 2]._float,
batch._params[paramOffset + 1]._float,
batch._params[paramOffset + 0]._float);
(void) CHECK_GL_ERROR();
}
void Batch::_glLineWidth(GLfloat width) {
ADD_COMMAND_GL(glLineWidth);
_params.push_back(width);
DO_IT_NOW(_glLineWidth, 1);
}
void GLBackend::do_glLineWidth(Batch& batch, uint32 paramOffset) {
glLineWidth(batch._params[paramOffset]._float);
(void) CHECK_GL_ERROR();
}

View file

@ -93,13 +93,6 @@ public:
GLint _transformCameraSlot = -1;
GLint _transformObjectSlot = -1;
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
GLint _transformObject_model = -1;
GLint _transformCamera_viewInverse = -1;
GLint _transformCamera_viewport = -1;
#endif
GLShader();
~GLShader();
};
@ -327,8 +320,6 @@ protected:
bool _invalidProj;
bool _invalidViewport;
GLenum _lastMode;
TransformStageState() :
_transformObjectBuffer(0),
_transformCameraBuffer(0),
@ -339,14 +330,13 @@ protected:
_invalidModel(true),
_invalidView(true),
_invalidProj(false),
_invalidViewport(false),
_lastMode(GL_TEXTURE) {}
_invalidViewport(false) {}
} _transform;
// Uniform Stage
void do_setUniformBuffer(Batch& batch, uint32 paramOffset);
void releaseUniformBuffer(int slot);
void releaseUniformBuffer(uint32_t slot);
void resetUniformStage();
struct UniformStageState {
Buffers _buffers;
@ -359,7 +349,7 @@ protected:
// Resource Stage
void do_setResourceTexture(Batch& batch, uint32 paramOffset);
void releaseResourceTexture(int slot);
void releaseResourceTexture(uint32_t slot);
void resetResourceStage();
struct ResourceStageState {
Textures _textures;
@ -392,13 +382,6 @@ protected:
GLuint _program;
bool _invalidProgram;
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
GLint _program_transformObject_model = -1;
GLint _program_transformCamera_viewInverse = -1;
GLint _program_transformCamera_viewport = -1;
#endif
State::Data _stateCache;
State::Signature _stateSignatureCache;
@ -462,7 +445,6 @@ protected:
void do_glUniformMatrix4fv(Batch& batch, uint32 paramOffset);
void do_glColor4f(Batch& batch, uint32 paramOffset);
void do_glLineWidth(Batch& batch, uint32 paramOffset);
typedef void (GLBackend::*CommandCall)(Batch&, uint32);
static CommandCall _commandCalls[Batch::NUM_COMMANDS];

View file

@ -57,66 +57,41 @@ void GLBackend::do_setInputBuffer(Batch& batch, uint32 paramOffset) {
}
}
#define NOT_SUPPORT_VAO
#if defined(SUPPORT_VAO)
#if (GPU_INPUT_PROFILE == GPU_CORE_41)
#define NO_SUPPORT_VERTEX_ATTRIB_FORMAT
#else
#define SUPPORT_VERTEX_ATTRIB_FORMAT
#endif
#define SUPPORT_LEGACY_OPENGL
#if defined(SUPPORT_LEGACY_OPENGL)
static const int NUM_CLASSIC_ATTRIBS = Stream::TANGENT;
static const GLenum attributeSlotToClassicAttribName[NUM_CLASSIC_ATTRIBS] = {
GL_VERTEX_ARRAY,
GL_NORMAL_ARRAY,
GL_COLOR_ARRAY,
GL_TEXTURE_COORD_ARRAY
};
#endif
#endif
void GLBackend::initInput() {
#if defined(SUPPORT_VAO)
if(!_input._defaultVAO) {
glGenVertexArrays(1, &_input._defaultVAO);
}
glBindVertexArray(_input._defaultVAO);
(void) CHECK_GL_ERROR();
#endif
}
void GLBackend::killInput() {
#if defined(SUPPORT_VAO)
glBindVertexArray(0);
if(_input._defaultVAO) {
glDeleteVertexArrays(1, &_input._defaultVAO);
}
(void) CHECK_GL_ERROR();
#endif
}
void GLBackend::syncInputStateCache() {
#if defined(SUPPORT_VAO)
for (int i = 0; i < NUM_CLASSIC_ATTRIBS; i++) {
_input._attributeActivation[i] = glIsEnabled(attributeSlotToClassicAttribName[i]);
}
//_input._defaultVAO
glBindVertexArray(_input._defaultVAO);
#else
size_t i = 0;
#if defined(SUPPORT_LEGACY_OPENGL)
for (; i < NUM_CLASSIC_ATTRIBS; i++) {
_input._attributeActivation[i] = glIsEnabled(attributeSlotToClassicAttribName[i]);
}
#endif
for (; i < _input._attributeActivation.size(); i++) {
for (uint32_t i = 0; i < _input._attributeActivation.size(); i++) {
GLint active = 0;
glGetVertexAttribiv(i, GL_VERTEX_ATTRIB_ARRAY_ENABLED, &active);
_input._attributeActivation[i] = active;
}
#endif
//_input._defaultVAO
glBindVertexArray(_input._defaultVAO);
}
void GLBackend::updateInput() {
#if defined(SUPPORT_VAO)
#if defined(SUPPORT_VERTEX_ATTRIB_FORMAT)
if (_input._invalidFormat) {
InputStageState::ActivationCache newActivation;
@ -193,21 +168,11 @@ void GLBackend::updateInput() {
for (unsigned int i = 0; i < newActivation.size(); i++) {
bool newState = newActivation[i];
if (newState != _input._attributeActivation[i]) {
#if defined(SUPPORT_LEGACY_OPENGL)
if (i < NUM_CLASSIC_ATTRIBS) {
if (newState) {
glEnableClientState(attributeSlotToClassicAttribName[i]);
} else {
glDisableClientState(attributeSlotToClassicAttribName[i]);
}
} else
#endif
{
if (newState) {
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
if (newState) {
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
(void) CHECK_GL_ERROR();
@ -249,30 +214,13 @@ void GLBackend::updateInput() {
GLenum type = _elementTypeToGLType[attrib._element.getType()];
GLuint stride = strides[bufferNum];
GLuint pointer = attrib._offset + offsets[bufferNum];
#if defined(SUPPORT_LEGACY_OPENGL)
const bool useClientState = slot < NUM_CLASSIC_ATTRIBS;
if (useClientState) {
switch (slot) {
case Stream::POSITION:
glVertexPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::NORMAL:
glNormalPointer(type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::COLOR:
glColorPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::TEXCOORD:
glTexCoordPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
};
} else
#endif
{
GLboolean isNormalized = attrib._element.isNormalized();
glVertexAttribPointer(slot, count, type, isNormalized, stride,
GLboolean isNormalized = attrib._element.isNormalized();
glVertexAttribPointer(slot, count, type, isNormalized, stride,
reinterpret_cast<GLvoid*>(pointer));
}
// TODO: Support properly the IAttrib version
(void) CHECK_GL_ERROR();
}
}
@ -293,35 +241,20 @@ void GLBackend::resetInputStage() {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
(void) CHECK_GL_ERROR();
#if defined(SUPPORT_VAO)
// TODO
#else
glBindBuffer(GL_ARRAY_BUFFER, 0);
size_t i = 0;
#if defined(SUPPORT_LEGACY_OPENGL)
for (; i < NUM_CLASSIC_ATTRIBS; i++) {
glDisableClientState(attributeSlotToClassicAttribName[i]);
}
glVertexPointer(4, GL_FLOAT, 0, 0);
glNormalPointer(GL_FLOAT, 0, 0);
glColorPointer(4, GL_FLOAT, 0, 0);
glTexCoordPointer(4, GL_FLOAT, 0, 0);
#endif
for (; i < _input._attributeActivation.size(); i++) {
for (uint32_t i = 0; i < _input._attributeActivation.size(); i++) {
glDisableVertexAttribArray(i);
glVertexAttribPointer(i, 4, GL_FLOAT, GL_FALSE, 0, 0);
}
#endif
// Reset vertex buffer and format
_input._format.reset();
_input._invalidFormat = false;
_input._attributeActivation.reset();
for (int i = 0; i < _input._buffers.size(); i++) {
for (uint32_t i = 0; i < _input._buffers.size(); i++) {
_input._buffers[i].reset();
_input._bufferOffsets[i] = 0;
_input._bufferStrides[i] = 0;

View file

@ -71,13 +71,6 @@ void GLBackend::do_setPipeline(Batch& batch, uint32 paramOffset) {
_pipeline._program = 0;
_pipeline._invalidProgram = true;
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
_pipeline._program_transformObject_model = -1;
_pipeline._program_transformCamera_viewInverse = -1;
_pipeline._program_transformCamera_viewport = -1;
#endif
_pipeline._state = nullptr;
_pipeline._invalidState = true;
} else {
@ -90,13 +83,6 @@ void GLBackend::do_setPipeline(Batch& batch, uint32 paramOffset) {
if (_pipeline._program != pipelineObject->_program->_program) {
_pipeline._program = pipelineObject->_program->_program;
_pipeline._invalidProgram = true;
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
_pipeline._program_transformObject_model = pipelineObject->_program->_transformObject_model;
_pipeline._program_transformCamera_viewInverse = pipelineObject->_program->_transformCamera_viewInverse;
_pipeline._program_transformCamera_viewport = pipelineObject->_program->_transformCamera_viewport;
#endif
}
// Now for the state
@ -144,24 +130,6 @@ void GLBackend::updatePipeline() {
}
_pipeline._invalidState = false;
}
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
#else
// If shader program needs the model we need to provide it
if (_pipeline._program_transformObject_model >= 0) {
glUniformMatrix4fv(_pipeline._program_transformObject_model, 1, false, (const GLfloat*) &_transform._transformObject._model);
}
// If shader program needs the inverseView we need to provide it
if (_pipeline._program_transformCamera_viewInverse >= 0) {
glUniformMatrix4fv(_pipeline._program_transformCamera_viewInverse, 1, false, (const GLfloat*) &_transform._transformCamera._viewInverse);
}
// If shader program needs the viewport we need to provide it
if (_pipeline._program_transformCamera_viewport >= 0) {
glUniform4fv(_pipeline._program_transformCamera_viewport, 1, (const GLfloat*) &_transform._transformCamera._viewport);
}
#endif
}
void GLBackend::resetPipelineStage() {
@ -179,7 +147,7 @@ void GLBackend::resetPipelineStage() {
}
void GLBackend::releaseUniformBuffer(int slot) {
void GLBackend::releaseUniformBuffer(uint32_t slot) {
#if (GPU_FEATURE_PROFILE == GPU_CORE)
auto& buf = _uniform._buffers[slot];
if (buf) {
@ -196,7 +164,7 @@ void GLBackend::releaseUniformBuffer(int slot) {
}
void GLBackend::resetUniformStage() {
for (int i = 0; i < _uniform._buffers.size(); i++) {
for (uint32_t i = 0; i < _uniform._buffers.size(); i++) {
releaseUniformBuffer(i);
}
}
@ -249,7 +217,7 @@ void GLBackend::do_setUniformBuffer(Batch& batch, uint32 paramOffset) {
#endif
}
void GLBackend::releaseResourceTexture(int slot) {
void GLBackend::releaseResourceTexture(uint32_t slot) {
auto& tex = _resource._textures[slot];
if (tex) {
auto* object = Backend::getGPUObject<GLBackend::GLTexture>(*tex);
@ -266,7 +234,7 @@ void GLBackend::releaseResourceTexture(int slot) {
}
void GLBackend::resetResourceStage() {
for (int i = 0; i < _resource._textures.size(); i++) {
for (uint32_t i = 0; i < _resource._textures.size(); i++) {
releaseResourceTexture(i);
}
}

View file

@ -35,71 +35,44 @@ void makeBindings(GLBackend::GLShader* shader) {
GLint loc = -1;
//Check for gpu specific attribute slotBindings
loc = glGetAttribLocation(glprogram, "position");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::POSITION, "position");
loc = glGetAttribLocation(glprogram, "inPosition");
if (loc >= 0 && loc != gpu::Stream::POSITION) {
glBindAttribLocation(glprogram, gpu::Stream::POSITION, "inPosition");
}
loc = glGetAttribLocation(glprogram, "attribPosition");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::POSITION, "attribPosition");
loc = glGetAttribLocation(glprogram, "inNormal");
if (loc >= 0 && loc != gpu::Stream::NORMAL) {
glBindAttribLocation(glprogram, gpu::Stream::NORMAL, "inNormal");
}
//Check for gpu specific attribute slotBindings
loc = glGetAttribLocation(glprogram, "gl_Vertex");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::POSITION, "gl_Vertex");
loc = glGetAttribLocation(glprogram, "inColor");
if (loc >= 0 && loc != gpu::Stream::COLOR) {
glBindAttribLocation(glprogram, gpu::Stream::COLOR, "inColor");
}
loc = glGetAttribLocation(glprogram, "normal");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::NORMAL, "normal");
}
loc = glGetAttribLocation(glprogram, "attribNormal");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::NORMAL, "attribNormal");
loc = glGetAttribLocation(glprogram, "inTexCoord0");
if (loc >= 0 && loc != gpu::Stream::TEXCOORD) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD, "inTexCoord0");
}
loc = glGetAttribLocation(glprogram, "color");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::COLOR, "color");
}
loc = glGetAttribLocation(glprogram, "attribColor");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::COLOR, "attribColor");
loc = glGetAttribLocation(glprogram, "inTangent");
if (loc >= 0 && loc != gpu::Stream::TANGENT) {
glBindAttribLocation(glprogram, gpu::Stream::TANGENT, "inTangent");
}
loc = glGetAttribLocation(glprogram, "texcoord");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD, "texcoord");
}
loc = glGetAttribLocation(glprogram, "attribTexcoord");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD, "attribTexcoord");
}
loc = glGetAttribLocation(glprogram, "tangent");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TANGENT, "tangent");
loc = glGetAttribLocation(glprogram, "inTexCoord1");
if (loc >= 0 && loc != gpu::Stream::TEXCOORD1) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD1, "inTexCoord1");
}
loc = glGetAttribLocation(glprogram, "texcoord1");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD1, "texcoord1");
}
loc = glGetAttribLocation(glprogram, "attribTexcoord1");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::TEXCOORD1, "texcoord1");
loc = glGetAttribLocation(glprogram, "inSkinClusterIndex");
if (loc >= 0 && loc != gpu::Stream::SKIN_CLUSTER_INDEX) {
glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_INDEX, "inSkinClusterIndex");
}
loc = glGetAttribLocation(glprogram, "clusterIndices");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_INDEX, "clusterIndices");
}
loc = glGetAttribLocation(glprogram, "clusterWeights");
if (loc >= 0) {
glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_WEIGHT, "clusterWeights");
loc = glGetAttribLocation(glprogram, "inSkinClusterWeight");
if (loc >= 0 && loc != gpu::Stream::SKIN_CLUSTER_WEIGHT) {
glBindAttribLocation(glprogram, gpu::Stream::SKIN_CLUSTER_WEIGHT, "inSkinClusterWeight");
}
// Link again to take into account the assigned attrib location
@ -114,7 +87,6 @@ void makeBindings(GLBackend::GLShader* shader) {
// now assign the ubo binding, then DON't relink!
//Check for gpu specific uniform slotBindings
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
loc = glGetUniformBlockIndex(glprogram, "transformObjectBuffer");
if (loc >= 0) {
glUniformBlockBinding(glprogram, loc, gpu::TRANSFORM_OBJECT_SLOT);
@ -126,22 +98,6 @@ void makeBindings(GLBackend::GLShader* shader) {
glUniformBlockBinding(glprogram, loc, gpu::TRANSFORM_CAMERA_SLOT);
shader->_transformCameraSlot = gpu::TRANSFORM_CAMERA_SLOT;
}
#else
loc = glGetUniformLocation(glprogram, "transformObject_model");
if (loc >= 0) {
shader->_transformObject_model = loc;
}
loc = glGetUniformLocation(glprogram, "transformCamera_viewInverse");
if (loc >= 0) {
shader->_transformCamera_viewInverse = loc;
}
loc = glGetUniformLocation(glprogram, "transformCamera_viewport");
if (loc >= 0) {
shader->_transformCamera_viewport = loc;
}
#endif
}
GLBackend::GLShader* compileShader(const Shader& shader) {
@ -191,8 +147,8 @@ GLBackend::GLShader* compileShader(const Shader& shader) {
char* temp = new char[infoLength] ;
glGetShaderInfoLog(glshader, infoLength, NULL, temp);
qCDebug(gpulogging) << "GLShader::compileShader - failed to compile the gl shader object:";
qCDebug(gpulogging) << temp;
qCWarning(gpulogging) << "GLShader::compileShader - failed to compile the gl shader object:";
qCWarning(gpulogging) << temp;
/*
filestream.open("debugshader.glsl.info.txt");
@ -635,8 +591,6 @@ bool isUnusedSlot(GLint binding) {
int makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& buffers) {
GLint buffersCount = 0;
#if (GPU_FEATURE_PROFILE == GPU_CORE)
glGetProgramiv(glprogram, GL_ACTIVE_UNIFORM_BLOCKS, &buffersCount);
// fast exit
@ -689,7 +643,6 @@ int makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet& slotBindin
Element element(SCALAR, gpu::UINT32, gpu::UNIFORM_BUFFER);
buffers.insert(Shader::Slot(name, binding, element, Resource::BUFFER));
}
#endif
return buffersCount;
}
@ -750,11 +703,7 @@ bool GLBackend::makeProgram(Shader& shader, const Shader::BindingSet& slotBindin
Shader::SlotSet uniforms;
Shader::SlotSet textures;
Shader::SlotSet samplers;
#if (GPU_FEATURE_PROFILE == GPU_CORE)
makeUniformSlots(object->_program, slotBindings, uniforms, textures, samplers);
#else
makeUniformSlots(object->_program, slotBindings, uniforms, textures, samplers, buffers);
#endif
Shader::SlotSet inputs;
makeInputSlots(object->_program, slotBindings, inputs);

View file

@ -484,10 +484,14 @@ void GLBackend::syncPipelineStateCache() {
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
// Point size is always on
glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
// FIXME CORE
//glHint(GL_POINT_SMOOTH_HINT, GL_NICEST);
glEnable(GL_PROGRAM_POINT_SIZE_EXT);
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
// Default line width accross the board
glLineWidth(1.0f);
getCurrentGLState(state);
State::Signature signature = State::evalSignature(state);
@ -583,10 +587,8 @@ void GLBackend::do_setStateMultisampleEnable(bool enable) {
void GLBackend::do_setStateAntialiasedLineEnable(bool enable) {
if (_pipeline._stateCache.antialisedLineEnable != enable) {
if (enable) {
glEnable(GL_POINT_SMOOTH);
glEnable(GL_LINE_SMOOTH);
} else {
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
}
(void) CHECK_GL_ERROR();

View file

@ -41,7 +41,6 @@ void GLBackend::do_setViewportTransform(Batch& batch, uint32 paramOffset) {
}
void GLBackend::initTransform() {
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
glGenBuffers(1, &_transform._transformObjectBuffer);
glGenBuffers(1, &_transform._transformCameraBuffer);
@ -51,18 +50,12 @@ void GLBackend::initTransform() {
glBindBuffer(GL_UNIFORM_BUFFER, _transform._transformCameraBuffer);
glBufferData(GL_UNIFORM_BUFFER, sizeof(_transform._transformCamera), (const void*) &_transform._transformCamera, GL_DYNAMIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
#else
#endif
}
void GLBackend::killTransform() {
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
glDeleteBuffers(1, &_transform._transformObjectBuffer);
glDeleteBuffers(1, &_transform._transformCameraBuffer);
#else
#endif
}
void GLBackend::syncTransformStateCache() {
@ -73,14 +66,7 @@ void GLBackend::syncTransformStateCache() {
glGetIntegerv(GL_VIEWPORT, (GLint*) &_transform._viewport);
GLint currentMode;
glGetIntegerv(GL_MATRIX_MODE, &currentMode);
_transform._lastMode = currentMode;
glGetFloatv(GL_PROJECTION_MATRIX, (float*) &_transform._projection);
Mat4 modelView;
glGetFloatv(GL_MODELVIEW_MATRIX, (float*) &modelView);
auto modelViewInv = glm::inverse(modelView);
_transform._view.evalFromRawMatrix(modelViewInv);
_transform._model.setIdentity();
@ -113,7 +99,8 @@ void GLBackend::updateTransform() {
_transform._transformCamera._projectionViewUntranslated = _transform._transformCamera._projection * viewUntranslated;
}
#if (GPU_TRANSFORM_PROFILE == GPU_CORE)
// TODO: WE need a ring buffer to do effcient dynamic updates here
// FOr now let's just do that bind and update sequence
if (_transform._invalidView || _transform._invalidProj || _transform._invalidViewport) {
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_CAMERA_SLOT, 0);
glBindBuffer(GL_ARRAY_BUFFER, _transform._transformCameraBuffer);
@ -133,52 +120,6 @@ void GLBackend::updateTransform() {
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_OBJECT_SLOT, _transform._transformObjectBuffer);
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_CAMERA_SLOT, _transform._transformCameraBuffer);
CHECK_GL_ERROR();
#endif
#if (GPU_TRANSFORM_PROFILE == GPU_LEGACY)
// Do it again for fixed pipeline until we can get rid of it
GLint originalMatrixMode;
glGetIntegerv(GL_MATRIX_MODE, &originalMatrixMode);
if (_transform._invalidProj) {
if (_transform._lastMode != GL_PROJECTION) {
glMatrixMode(GL_PROJECTION);
_transform._lastMode = GL_PROJECTION;
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&_transform._projection));
(void) CHECK_GL_ERROR();
}
if (_transform._invalidModel || _transform._invalidView) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
if (!_transform._model.isIdentity()) {
Transform::Mat4 modelView;
if (!_transform._view.isIdentity()) {
Transform mvx;
Transform::inverseMult(mvx, _transform._view, _transform._model);
mvx.getMatrix(modelView);
} else {
_transform._model.getMatrix(modelView);
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else if (!_transform._view.isIdentity()) {
Transform::Mat4 modelView;
_transform._view.getInverseMatrix(modelView);
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
glLoadIdentity();
}
(void) CHECK_GL_ERROR();
}
glMatrixMode(originalMatrixMode);
#endif
// Flags are clean
_transform._invalidView = _transform._invalidProj = _transform._invalidModel = _transform._invalidViewport = false;

View file

@ -18,18 +18,18 @@
#define GPU_LEGACY 0
#if defined(__APPLE__)
#include <OpenGL/gl.h>
#include <OpenGL/glext.h>
#define GPU_FEATURE_PROFILE GPU_LEGACY
#define GPU_TRANSFORM_PROFILE GPU_LEGACY
#include <GL/glew.h>
#define GPU_FEATURE_PROFILE GPU_CORE
#define GPU_INPUT_PROFILE GPU_CORE_41
#elif defined(WIN32)
#include <GL/glew.h>
#include <GL/wglew.h>
#define GPU_FEATURE_PROFILE GPU_CORE
#define GPU_TRANSFORM_PROFILE GPU_CORE
#define GPU_INPUT_PROFILE GPU_CORE_41
#elif defined(ANDROID)
@ -38,7 +38,7 @@
#include <GL/glew.h>
#define GPU_FEATURE_PROFILE GPU_CORE
#define GPU_TRANSFORM_PROFILE GPU_CORE
#define GPU_INPUT_PROFILE GPU_CORE_41
#endif

View file

@ -0,0 +1,21 @@
<!
// Input.slh
// interface/src
//
// Created by Bradley Austin Davis on 2015/06/19.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
!>
<@if not GPU_INPUTS_SLH@>
<@def GPU_INPUTS_SLH@>
in vec4 inPosition;
in vec4 inNormal;
in vec4 inColor;
in vec4 inTexCoord0;
in vec4 inTangent;
in vec4 inSkinClusterIndex;
in vec4 inSkinClusterWeight;
in vec4 inTexCoord1;
<@endif@>

View file

@ -14,7 +14,7 @@
using namespace gpu;
const Element Element::COLOR_RGBA_32 = Element(VEC4, UINT8, RGBA);
const Element Element::COLOR_RGBA_32 = Element(VEC4, NUINT8, RGBA);
const Element Element::VEC3F_XYZ = Element(VEC3, FLOAT, XYZ);
const Element Element::INDEX_UINT16 = Element(SCALAR, UINT16, INDEX);
const Element Element::PART_DRAWCALL = Element(VEC4, UINT32, PART);

View file

@ -260,7 +260,7 @@ public:
depthClampEnable(false),
scissorEnable(false),
multisampleEnable(false),
antialisedLineEnable(false),
antialisedLineEnable(true),
alphaToCoverageEnable(false)
{}
};

View file

@ -27,13 +27,17 @@ public:
// Possible input slots identifiers
enum InputSlot {
POSITION = 0,
NORMAL,
COLOR,
TEXCOORD,
TANGENT,
SKIN_CLUSTER_INDEX,
SKIN_CLUSTER_WEIGHT,
TEXCOORD1,
NORMAL = 1,
COLOR = 2,
TEXCOORD0 = 3,
TEXCOORD = TEXCOORD0,
TANGENT = 4,
SKIN_CLUSTER_INDEX = 5,
SKIN_CLUSTER_WEIGHT = 6,
TEXCOORD1 = 7,
INSTANCE_XFM = 8,
INSTANCE_SCALE = 9,
INSTANCE_TRANSLATE = 10,
NUM_INPUT_SLOTS,
};

View file

@ -25,7 +25,6 @@ struct TransformCamera {
vec4 _viewport;
};
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
uniform transformObjectBuffer {
TransformObject _object;
};
@ -39,83 +38,22 @@ uniform transformCameraBuffer {
TransformCamera getTransformCamera() {
return _camera;
}
<@else@>
//uniform vec4 transformObjectBuffer[8];
TransformObject getTransformObject() {
TransformObject object;
/* object._model[0] = transformObjectBuffer[0];
object._model[1] = transformObjectBuffer[1];
object._model[2] = transformObjectBuffer[2];
object._model[3] = transformObjectBuffer[3];
object._modelInverse[0] = transformObjectBuffer[4];
object._modelInverse[1] = transformObjectBuffer[5];
object._modelInverse[2] = transformObjectBuffer[6];
object._modelInverse[3] = transformObjectBuffer[7];
*/
return object;
}
//uniform vec4 transformCameraBuffer[17];
TransformCamera getTransformCamera() {
TransformCamera camera;
/* camera._view[0] = transformCameraBuffer[0];
camera._view[1] = transformCameraBuffer[1];
camera._view[2] = transformCameraBuffer[2];
camera._view[3] = transformCameraBuffer[3];
camera._viewInverse[0] = transformCameraBuffer[4];
camera._viewInverse[1] = transformCameraBuffer[5];
camera._viewInverse[2] = transformCameraBuffer[6];
camera._viewInverse[3] = transformCameraBuffer[7];
camera._projectionViewUntranslated[0] = transformCameraBuffer[8];
camera._projectionViewUntranslated[1] = transformCameraBuffer[9];
camera._projectionViewUntranslated[2] = transformCameraBuffer[10];
camera._projectionViewUntranslated[3] = transformCameraBuffer[11];
camera._projection[0] = transformCameraBuffer[12];
camera._projection[1] = transformCameraBuffer[13];
camera._projection[2] = transformCameraBuffer[14];
camera._projection[3] = transformCameraBuffer[15];
camera._viewport = transformCameraBuffer[16];
*/
return camera;
}
uniform mat4 transformObject_model;
uniform mat4 transformCamera_viewInverse;
uniform vec4 transformCamera_viewport;
<@endif@>
<@endfunc@>
<@func transformCameraViewport(cameraTransform, viewport)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
<$viewport$> = <$cameraTransform$>._viewport;
<@else@>
<$viewport$> = transformCamera_viewport;
<@endif@>
<@endfunc@>
<@func transformModelToClipPos(cameraTransform, objectTransform, modelPos, clipPos)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
<!// Equivalent to the following but hoppefully a tad more accurate
//return camera._projection * camera._view * object._model * pos; !>
{ // transformModelToClipPos
vec4 _eyepos = (<$objectTransform$>._model * <$modelPos$>) + vec4(-<$modelPos$>.w * <$cameraTransform$>._viewInverse[3].xyz, 0.0);
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * _eyepos;
}
<@else@>
<$clipPos$> = gl_ModelViewProjectionMatrix * <$modelPos$>;
<@endif@>
<@endfunc@>
<@func $transformModelToEyeAndClipPos(cameraTransform, objectTransform, modelPos, eyePos, clipPos)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
<!// Equivalent to the following but hoppefully a tad more accurate
//return camera._projection * camera._view * object._model * pos; !>
{ // transformModelToClipPos
@ -125,24 +63,15 @@ uniform vec4 transformCamera_viewport;
<$clipPos$> = <$cameraTransform$>._projectionViewUntranslated * _eyepos;
// <$eyePos$> = (<$cameraTransform$>._projectionInverse * <$clipPos$>);
}
<@else@>
<$eyePos$> = gl_ModelViewMatrix * <$modelPos$>;
<$clipPos$> = gl_ModelViewProjectionMatrix * <$modelPos$>;
<@endif@>
<@endfunc@>
<@func transformModelToWorldPos(objectTransform, modelPos, worldPos)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
{ // transformModelToWorldPos
<$worldPos$> = (<$objectTransform$>._model * <$modelPos$>);
}
<@else@>
<$worldPos$> = (transformObject_model * <$modelPos$>);
<@endif@>
<@endfunc@>
<@func transformModelToEyeDir(cameraTransform, objectTransform, modelDir, eyeDir)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
{ // transformModelToEyeDir
vec3 mr0 = vec3(<$objectTransform$>._modelInverse[0].x, <$objectTransform$>._modelInverse[1].x, <$objectTransform$>._modelInverse[2].x);
vec3 mr1 = vec3(<$objectTransform$>._modelInverse[0].y, <$objectTransform$>._modelInverse[1].y, <$objectTransform$>._modelInverse[2].y);
@ -154,29 +83,18 @@ uniform vec4 transformCamera_viewport;
<$eyeDir$> = vec3(dot(mvc0, <$modelDir$>), dot(mvc1, <$modelDir$>), dot(mvc2, <$modelDir$>));
}
<@else@>
<$eyeDir$> = gl_NormalMatrix * <$modelDir$>;
<@endif@>
<@endfunc@>
<@func transformEyeToWorldDir(cameraTransform, eyeDir, worldDir)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
{ // transformEyeToWorldDir
<$worldDir$> = vec3(<$cameraTransform$>._viewInverse * vec4(<$eyeDir$>.xyz, 0.0));
}
<@else@>
<$worldDir$> = vec3(transformCamera_viewInverse * vec4(<$eyeDir$>.xyz, 0.0));
<@endif@>
<@endfunc@>
<@func transformClipToEyeDir(cameraTransform, clipPos, eyeDir)@>
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
{ // transformClipToEyeDir
<$eyeDir$> = vec3(<$cameraTransform$>._projectionInverse * vec4(<$clipPos$>.xyz, 1.0));
}
<@else@>
<$eyeDir$> = vec3(gl_ProjectionMatrixInverse * vec4(<$clipPos$>.xyz, 1.0));
<@endif@>
<@endfunc@>
<@endif@>

View file

@ -1,6 +1,6 @@
set(TARGET_NAME model)
AUTOSCRIBE_SHADER_LIB(gpu)
AUTOSCRIBE_SHADER_LIB(gpu model)
# use setup_hifi_library macro to setup our project and link appropriate Qt modules
setup_hifi_library()

View file

@ -6,53 +6,53 @@
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
!>
!>
<@if not MODEL_ATMOSPHERE_SLH@>
<@def MODEL_ATMOSPHERE_SLH@>
<!
// Code is a modified version of:
// http://http.developer.nvidia.com/GPUGems/gpugems_app01.html
// Atmospheric scattering fragment shader
//
// Author: Sean O'Neil
//
// Copyright (c) 2004 Sean O'Neil
//
// For licensing information, see http://http.developer.nvidia.com/GPUGems/gpugems_app01.html:
//
// NVIDIA Statement on the Software
//
// The source code provided is freely distributable, so long as the NVIDIA header remains unaltered and user modifications are
// detailed.
//
// No Warranty
//
// THE SOFTWARE AND ANY OTHER MATERIALS PROVIDED BY NVIDIA ON THE ENCLOSED CD-ROM ARE PROVIDED "AS IS." NVIDIA DISCLAIMS ALL
// WARRANTIES, EXPRESS, IMPLIED OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
//
// Limitation of Liability
//
// NVIDIA SHALL NOT BE LIABLE TO ANY USER, DEVELOPER, DEVELOPER'S CUSTOMERS, OR ANY OTHER PERSON OR ENTITY CLAIMING THROUGH OR
// UNDER DEVELOPER FOR ANY LOSS OF PROFITS, INCOME, SAVINGS, OR ANY OTHER CONSEQUENTIAL, INCIDENTAL, SPECIAL, PUNITIVE, DIRECT
// OR INDIRECT DAMAGES (WHETHER IN AN ACTION IN CONTRACT, TORT OR BASED ON A WARRANTY), EVEN IF NVIDIA HAS BEEN ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS SHALL APPLY NOTWITHSTANDING ANY FAILURE OF THE ESSENTIAL PURPOSE OF ANY
// LIMITED REMEDY. IN NO EVENT SHALL NVIDIA'S AGGREGATE LIABILITY TO DEVELOPER OR ANY OTHER PERSON OR ENTITY CLAIMING THROUGH
// OR UNDER DEVELOPER EXCEED THE AMOUNT OF MONEY ACTUALLY PAID BY DEVELOPER TO NVIDIA FOR THE SOFTWARE OR ANY OTHER MATERIALS.
//
!>
<@def MODEL_ATMOSPHERE_SLH@>
<!
// Code is a modified version of:
// http://http.developer.nvidia.com/GPUGems/gpugems_app01.html
// Atmospheric scattering fragment shader
//
// Author: Sean O'Neil
//
// Copyright (c) 2004 Sean O'Neil
//
// For licensing information, see http://http.developer.nvidia.com/GPUGems/gpugems_app01.html:
//
// NVIDIA Statement on the Software
//
// The source code provided is freely distributable, so long as the NVIDIA header remains unaltered and user modifications are
// detailed.
//
// No Warranty
//
// THE SOFTWARE AND ANY OTHER MATERIALS PROVIDED BY NVIDIA ON THE ENCLOSED CD-ROM ARE PROVIDED "AS IS." NVIDIA DISCLAIMS ALL
// WARRANTIES, EXPRESS, IMPLIED OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
//
// Limitation of Liability
//
// NVIDIA SHALL NOT BE LIABLE TO ANY USER, DEVELOPER, DEVELOPER'S CUSTOMERS, OR ANY OTHER PERSON OR ENTITY CLAIMING THROUGH OR
// UNDER DEVELOPER FOR ANY LOSS OF PROFITS, INCOME, SAVINGS, OR ANY OTHER CONSEQUENTIAL, INCIDENTAL, SPECIAL, PUNITIVE, DIRECT
// OR INDIRECT DAMAGES (WHETHER IN AN ACTION IN CONTRACT, TORT OR BASED ON A WARRANTY), EVEN IF NVIDIA HAS BEEN ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS SHALL APPLY NOTWITHSTANDING ANY FAILURE OF THE ESSENTIAL PURPOSE OF ANY
// LIMITED REMEDY. IN NO EVENT SHALL NVIDIA'S AGGREGATE LIABILITY TO DEVELOPER OR ANY OTHER PERSON OR ENTITY CLAIMING THROUGH
// OR UNDER DEVELOPER EXCEED THE AMOUNT OF MONEY ACTUALLY PAID BY DEVELOPER TO NVIDIA FOR THE SOFTWARE OR ANY OTHER MATERIALS.
//
!>
struct Atmosphere {
vec4 _invWaveLength;
vec4 _radiuses;
vec4 _scales;
vec4 _scatterings;
vec4 _control;
};
const int numSamples = 2;
};
const int numSamples = 2;
vec3 getAtmosphereInvWaveLength(Atmosphere a) { return a._invWaveLength.xyz; } // 1 / pow(wavelength, 4) for the red, green, and blue channels
@ -68,88 +68,88 @@ float getAtmosphereKrESun(Atmosphere a) { return a._scatterings.x; } // Kr * ESu
float getAtmosphereKmESun(Atmosphere a) { return a._scatterings.y; } // Km * ESun
float getAtmosphereKr4PI(Atmosphere a) { return a._scatterings.z; } // Kr * 4 * PI
float getAtmosphereKm4PI(Atmosphere a) { return a._scatterings.w; } // Km * 4 * PI
float getAtmosphereNumSamples(Atmosphere a) { return a._control.x; } // numSamples
vec2 getAtmosphereGAndG2(Atmosphere a) { return a._control.yz; } // g and g2
float atmosphereScale(float scaleDepth, float fCos)
{
float x = 1.0 - fCos;
return scaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
vec4 evalAtmosphereContribution(Atmosphere atmospheric, vec3 position, vec3 cameraPos, vec3 lightPos) {
float fInnerRadius = getAtmosphereInnerRadius(atmospheric);
float fSamples = getAtmosphereNumSamples(atmospheric);
vec3 v3InvWavelength = getAtmosphereInvWaveLength(atmospheric);
vec4 scatteringCoefs = getAtmosphereScattering(atmospheric);
float fKrESun = scatteringCoefs.x;
float fKmESun = scatteringCoefs.y;
float fKr4PI = scatteringCoefs.z;
float fKm4PI = scatteringCoefs.w;
vec2 gAndg2 = getAtmosphereGAndG2(atmospheric);
float g = gAndg2.x;
float g2 = gAndg2.y;
float fScale = getAtmosphereScale(atmospheric);
float fScaleDepth = getAtmosphereScaleDepth(atmospheric);
float fScaleOverScaleDepth = getAtmosphereScaleOverScaleDepth(atmospheric);
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - cameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = cameraPos;
float fHeight = length(v3Start);
float fDepthStart = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepthStart * atmosphereScale(fScaleDepth, fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
// int nSamples = numSamples;
int nSamples = int(fSamples);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(lightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (atmosphereScale(fScaleDepth, fLightAngle) - atmosphereScale(fScaleDepth, fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = cameraPos - v3Pos;
float fCos = dot(lightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
vec4 finalColor;
finalColor.rgb = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
finalColor.a = finalColor.b;
finalColor.rgb = pow(finalColor.rgb, vec3(1.0/2.2));
return finalColor;
}
float atmosphereScale(float scaleDepth, float fCos)
{
float x = 1.0 - fCos;
return scaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
vec4 evalAtmosphereContribution(Atmosphere atmospheric, vec3 position, vec3 cameraPos, vec3 lightPos) {
float fInnerRadius = getAtmosphereInnerRadius(atmospheric);
float fSamples = getAtmosphereNumSamples(atmospheric);
vec3 v3InvWavelength = getAtmosphereInvWaveLength(atmospheric);
vec4 scatteringCoefs = getAtmosphereScattering(atmospheric);
float fKrESun = scatteringCoefs.x;
float fKmESun = scatteringCoefs.y;
float fKr4PI = scatteringCoefs.z;
float fKm4PI = scatteringCoefs.w;
vec2 gAndg2 = getAtmosphereGAndG2(atmospheric);
float g = gAndg2.x;
float g2 = gAndg2.y;
float fScale = getAtmosphereScale(atmospheric);
float fScaleDepth = getAtmosphereScaleDepth(atmospheric);
float fScaleOverScaleDepth = getAtmosphereScaleOverScaleDepth(atmospheric);
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - cameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = cameraPos;
float fHeight = length(v3Start);
float fDepthStart = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepthStart * atmosphereScale(fScaleDepth, fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
// int nSamples = numSamples;
int nSamples = int(fSamples);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(lightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (atmosphereScale(fScaleDepth, fLightAngle) - atmosphereScale(fScaleDepth, fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = cameraPos - v3Pos;
float fCos = dot(lightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
vec4 finalColor;
finalColor.rgb = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
finalColor.a = finalColor.b;
finalColor.rgb = pow(finalColor.rgb, vec3(1.0/2.2));
return finalColor;
}
<@if GLPROFILE == PC_GL@>
uniform atmosphereBuffer {
@ -171,75 +171,75 @@ Atmosphere getAtmosphere() {
return atmosphere;
}
<@endif@>
<!
/*
// uniform vec3 v3CameraPos; // The camera's current position
const int nSamples = 2;
const float fSamples = 2.0;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
varying vec3 position;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main (void)
{
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos;
float fHeight = length(v3Start);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepth * scale(fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = v3CameraPos - v3Pos;
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
gl_FragColor.rgb = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
gl_FragColor.a = gl_FragColor.b;
gl_FragColor.rgb = pow(gl_FragColor.rgb, vec3(1.0/2.2));
}
*/
!>
<!
/*
// uniform vec3 v3CameraPos; // The camera's current position
const int nSamples = 2;
const float fSamples = 2.0;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
varying vec3 position;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main (void)
{
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos;
float fHeight = length(v3Start);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepth * scale(fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = v3CameraPos - v3Pos;
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
outFragColor.rgb = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
outFragColor.a = outFragColor.b;
outFragColor.rgb = pow(outFragColor.rgb, vec3(1.0/2.2));
}
*/
!>
<@endif@>

View file

@ -18,32 +18,17 @@ struct Material {
vec4 _spare;
};
uniform materialBuffer {
Material _mat;
};
Material getMaterial() {
return _mat;
}
float getMaterialOpacity(Material m) { return m._diffuse.a; }
vec3 getMaterialDiffuse(Material m) { return m._diffuse.rgb; }
vec3 getMaterialSpecular(Material m) { return m._specular.rgb; }
float getMaterialShininess(Material m) { return m._specular.a; }
<@if GPU_FEATURE_PROFILE == GPU_CORE@>
uniform materialBuffer {
Material _mat;
};
Material getMaterial() {
return _mat;
}
<@else@>
uniform vec4 materialBuffer[4];
Material getMaterial() {
Material mat;
mat._diffuse = materialBuffer[0];
mat._specular = materialBuffer[1];
mat._emissive = materialBuffer[2];
mat._spare = materialBuffer[3];
return mat;
}
<@endif@>
<@endif@>

View file

@ -2,6 +2,7 @@
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
// skybox.frag
// fragment shader
//
// Created by Sam Gateau on 5/5/2015.
// Copyright 2015 High Fidelity, Inc.
@ -12,14 +13,22 @@
uniform samplerCube cubeMap;
varying vec3 normal;
varying vec2 texcoord;
varying vec3 color;
struct Skybox {
vec4 _color;
};
uniform skyboxBuffer {
Skybox _skybox;
};
in vec3 _normal;
out vec4 _fragColor;
void main(void) {
vec3 coord = normalize(normal);
vec4 texel = textureCube(cubeMap, coord);
vec3 pixel = pow(texel.xyz * color, vec3(1.0/2.2)); // manual Gamma correction
gl_FragData[0] = vec4(pixel, 0.0);
vec3 coord = normalize(_normal);
vec3 texel = texture(cubeMap, coord).rgb;
vec3 color = texel * _skybox._color.rgb;
vec3 pixel = pow(color, vec3(1.0/2.2)); // manual Gamma correction
_fragColor = vec4(pixel, 0.0);
}

View file

@ -11,48 +11,24 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
struct Skybox {
vec4 _color;
};
<@if GPU_FEATURE_PROFILE == GPU_CORE @>
uniform skyboxBuffer {
Skybox _skybox;
};
Skybox getSkybox() {
return _skybox;
}
<@else@>
uniform vec4 skyboxBuffer[1];
Skybox getSkybox() {
Skybox _skybox;
_skybox._color = skyboxBuffer[0];
return _skybox;
}
<@endif@>
varying vec3 normal;
varying vec2 texcoord;
varying vec3 color;
void main(void) {
texcoord = gl_Vertex.xy;
Skybox skybox = getSkybox();
color = skybox._color.xyz;
out vec3 _normal;
void main(void) {
// standard transform
TransformCamera cam = getTransformCamera();
vec3 clipDir = vec3(texcoord.xy, 0.0);
vec3 clipDir = vec3(inPosition.xy, 0.0);
vec3 eyeDir;
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>;
<$transformEyeToWorldDir(cam, eyeDir, normal)$>;
// Position is supposed to cmoe in clip space
gl_Position = vec4(texcoord.xy, 0.0, 1.0);
}
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>
<$transformEyeToWorldDir(cam, eyeDir, _normal)$>
// Position is supposed to come in clip space
gl_Position = vec4(inPosition.xy, 0.0, 1.0);
}

View file

@ -8,8 +8,6 @@ qt5_add_resources(QT_RESOURCES_FILE "${CMAKE_CURRENT_SOURCE_DIR}/res/fonts/fonts
# use setup_hifi_library macro to setup our project and link appropriate Qt modules
setup_hifi_library(Widgets OpenGL Network Qml Quick Script)
setup_hifi_opengl()
add_dependency_external_projects(glm)
find_package(GLM REQUIRED)
target_include_directories(${TARGET_NAME} PUBLIC ${GLM_INCLUDE_DIRS})

View file

@ -50,10 +50,10 @@ struct DeferredFragment {
DeferredFragment unpackDeferredFragment(vec2 texcoord) {
DeferredFragment frag;
frag.depthVal = texture2D(depthMap, texcoord).r;
frag.normalVal = texture2D(normalMap, texcoord);
frag.diffuseVal = texture2D(diffuseMap, texcoord);
frag.specularVal = texture2D(specularMap, texcoord);
frag.depthVal = texture(depthMap, texcoord).r;
frag.normalVal = texture(normalMap, texcoord);
frag.diffuseVal = texture(diffuseMap, texcoord);
frag.specularVal = texture(specularMap, texcoord);
// compute the view space position using the depth
float z = near / (frag.depthVal * depthScale - 1.0);

View file

@ -11,6 +11,10 @@
<@if not DEFERRED_BUFFER_WRITE_SLH@>
<@def DEFERRED_BUFFER_WRITE_SLH@>
layout(location = 0) out vec4 _fragColor0;
layout(location = 1) out vec4 _fragColor1;
layout(location = 2) out vec4 _fragColor2;
// the glow intensity
uniform float glowIntensity;
@ -28,9 +32,9 @@ void packDeferredFragment(vec3 normal, float alpha, vec3 diffuse, vec3 specular,
if (alpha != glowIntensity) {
discard;
}
gl_FragData[0] = vec4(diffuse.rgb, alpha);
gl_FragData[1] = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
gl_FragData[2] = vec4(specular, shininess / 128.0);
_fragColor0 = vec4(diffuse.rgb, alpha);
_fragColor1 = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
_fragColor2 = vec4(specular, shininess / 128.0);
}
void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 diffuse, vec3 specular, float shininess, vec3 emissive) {
@ -38,10 +42,10 @@ void packDeferredFragmentLightmap(vec3 normal, float alpha, vec3 diffuse, vec3 s
discard;
}
gl_FragData[0] = vec4(diffuse.rgb, alpha);
//gl_FragData[1] = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
gl_FragData[1] = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 0.5);
gl_FragData[2] = vec4(emissive, shininess / 128.0);
_fragColor0 = vec4(diffuse.rgb, alpha);
//_fragColor1 = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
_fragColor1 = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 0.5);
_fragColor2 = vec4(emissive, shininess / 128.0);
}
void packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 diffuse, vec3 specular, float shininess) {
@ -49,9 +53,9 @@ void packDeferredFragmentTranslucent(vec3 normal, float alpha, vec3 diffuse, vec
discard;
}
gl_FragData[0] = vec4(diffuse.rgb, alpha);
// gl_FragData[1] = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
// gl_FragData[2] = vec4(specular, shininess / 128.0);
_fragColor0 = vec4(diffuse.rgb, alpha);
// _fragColor1 = vec4(normal, 0.0) * 0.5 + vec4(0.5, 0.5, 0.5, 1.0);
// _fragColor2 = vec4(specular, shininess / 128.0);
}
<@endif@>

View file

@ -16,12 +16,9 @@
uniform samplerCube skyboxMap;
vec4 evalSkyboxLight(vec3 direction, float lod) {
<@if GPU_TRANSFORM_PROFILE == GPU_CORE@>
vec4 skytexel = textureCubeLod(skyboxMap, direction, lod * textureQueryLevels(skyboxMap));
<@else@>
vec4 skytexel = textureCube(skyboxMap, direction);
<@endif@>
// FIXME
//vec4 skytexel = textureLod(skyboxMap, direction, lod * textureQueryLevels(skyboxMap));
vec4 skytexel = texture(skyboxMap, direction);
return skytexel;
}

View file

@ -406,9 +406,7 @@ void DeferredLightingEffect::render(RenderArgs* args) {
Transform viewMat;
args->_viewFrustum->evalProjectionMatrix(projMat);
args->_viewFrustum->evalViewTransform(viewMat);
if (args->_renderMode == RenderArgs::MIRROR_RENDER_MODE) {
viewMat.postScale(glm::vec3(-1.0f, 1.0f, 1.0f));
}
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);

View file

@ -277,7 +277,7 @@ void GeometryCache::renderSphere(gpu::Batch& batch, float radius, int slices, in
streamFormat = std::make_shared<gpu::Stream::Format>(); // 1 for everyone
streamFormat->setAttribute(gpu::Stream::POSITION, VERTICES_SLOT, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
streamFormat->setAttribute(gpu::Stream::NORMAL, NORMALS_SLOT, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
positionElement = streamFormat->getAttributes().at(gpu::Stream::POSITION)._element;
normalElement = streamFormat->getAttributes().at(gpu::Stream::NORMAL)._element;
colorElement = streamFormat->getAttributes().at(gpu::Stream::COLOR)._element;
@ -363,7 +363,7 @@ void GeometryCache::renderGrid(gpu::Batch& batch, int xDivisions, int yDivisions
auto streamFormat = std::make_shared<gpu::Stream::Format>(); // 1 for everyone
streamFormat->setAttribute(gpu::Stream::POSITION, VERTICES_SLOT, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ), 0);
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
gpu::BufferView verticesView(verticesBuffer, 0, verticesBuffer->getSize(), streamFormat->getAttributes().at(gpu::Stream::POSITION)._element);
gpu::BufferView colorView(colorBuffer, streamFormat->getAttributes().at(gpu::Stream::COLOR)._element);
@ -470,7 +470,7 @@ void GeometryCache::renderGrid(gpu::Batch& batch, int x, int y, int width, int h
auto streamFormat = std::make_shared<gpu::Stream::Format>(); // 1 for everyone
streamFormat->setAttribute(gpu::Stream::POSITION, VERTICES_SLOT, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ), 0);
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
gpu::BufferView verticesView(verticesBuffer, 0, verticesBuffer->getSize(), streamFormat->getAttributes().at(gpu::Stream::POSITION)._element);
gpu::BufferView colorView(colorBuffer, streamFormat->getAttributes().at(gpu::Stream::COLOR)._element);
@ -507,7 +507,7 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec2>& points, con
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -568,7 +568,7 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec3>& points, con
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -632,7 +632,7 @@ void GeometryCache::updateVertices(int id, const QVector<glm::vec3>& points, con
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::TEXCOORD, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV), 3 * sizeof(float));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -786,7 +786,7 @@ void GeometryCache::renderSolidCube(gpu::Batch& batch, float size, const glm::ve
streamFormat = std::make_shared<gpu::Stream::Format>(); // 1 for everyone
streamFormat->setAttribute(gpu::Stream::POSITION, VERTICES_SLOT, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
streamFormat->setAttribute(gpu::Stream::NORMAL, NORMALS_SLOT, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
positionElement = streamFormat->getAttributes().at(gpu::Stream::POSITION)._element;
normalElement = streamFormat->getAttributes().at(gpu::Stream::NORMAL)._element;
colorElement = streamFormat->getAttributes().at(gpu::Stream::COLOR)._element;
@ -873,7 +873,7 @@ void GeometryCache::renderWireCube(gpu::Batch& batch, float size, const glm::vec
if (!streamFormat) {
streamFormat = std::make_shared<gpu::Stream::Format>(); // 1 for everyone
streamFormat->setAttribute(gpu::Stream::POSITION, VERTICES_SLOT, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
streamFormat->setAttribute(gpu::Stream::COLOR, COLOR_SLOT, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
positionElement = streamFormat->getAttributes().at(gpu::Stream::POSITION)._element;
colorElement = streamFormat->getAttributes().at(gpu::Stream::COLOR)._element;
}
@ -929,7 +929,7 @@ void GeometryCache::renderBevelCornersRect(gpu::Batch& batch, int x, int y, int
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -1031,7 +1031,7 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec2& minCorner, co
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -1126,7 +1126,7 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec2& minCorner, co
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::TEXCOORD, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV), VERTEX_TEXCOORD_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -1200,7 +1200,7 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec3& minCorner, co
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -1293,7 +1293,7 @@ void GeometryCache::renderQuad(gpu::Batch& batch, const glm::vec3& topLeft, cons
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::TEXCOORD, 0, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV), VERTEX_TEXCOORD_OFFSET);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -1373,7 +1373,7 @@ void GeometryCache::renderDashedLine(gpu::Batch& batch, const glm::vec3& start,
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -1531,7 +1531,7 @@ void GeometryCache::renderLine(gpu::Batch& batch, const glm::vec3& p1, const glm
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);
@ -1615,7 +1615,7 @@ void GeometryCache::renderLine(gpu::Batch& batch, const glm::vec2& p1, const glm
details.stream = stream;
details.streamFormat->setAttribute(gpu::Stream::POSITION, 0, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA));
details.streamFormat->setAttribute(gpu::Stream::COLOR, 1, gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA));
details.stream->addBuffer(details.verticesBuffer, 0, details.streamFormat->getChannels().at(0)._stride);
details.stream->addBuffer(details.colorBuffer, 0, details.streamFormat->getChannels().at(1)._stride);

View file

@ -156,8 +156,6 @@ void Model::RenderPipelineLib::addRenderPipeline(Model::RenderKey key,
RenderKey mirrorKey(key.getRaw() | RenderKey::IS_MIRROR);
auto mirrorState = std::make_shared<gpu::State>(state->getValues());
mirrorState->setFrontFaceClockwise(true);
// create a new RenderPipeline with the same shader side and the mirrorState
auto mirrorPipeline = gpu::PipelinePointer(gpu::Pipeline::create(program, mirrorState));
insert(value_type(mirrorKey.getRaw(), RenderPipeline(mirrorPipeline, locations)));
@ -190,11 +188,8 @@ void Model::RenderPipelineLib::initLocations(gpu::ShaderPointer& program, Model:
locations.clusterMatrices = program->getUniforms().findLocation("clusterMatrices");
locations.clusterIndices = program->getInputs().findLocation("clusterIndices");;
locations.clusterWeights = program->getInputs().findLocation("clusterWeights");;
locations.clusterIndices = program->getInputs().findLocation("inSkinClusterIndex");
locations.clusterWeights = program->getInputs().findLocation("inSkinClusterWeight");
}
AbstractViewStateInterface* Model::_viewState = NULL;

View file

@ -166,9 +166,7 @@ void DrawOpaqueDeferred::run(const SceneContextPointer& sceneContext, const Rend
Transform viewMat;
args->_viewFrustum->evalProjectionMatrix(projMat);
args->_viewFrustum->evalViewTransform(viewMat);
if (args->_renderMode == RenderArgs::MIRROR_RENDER_MODE) {
viewMat.preScale(glm::vec3(-1.0f, 1.0f, 1.0f));
}
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
@ -197,9 +195,7 @@ void DrawTransparentDeferred::run(const SceneContextPointer& sceneContext, const
Transform viewMat;
args->_viewFrustum->evalProjectionMatrix(projMat);
args->_viewFrustum->evalViewTransform(viewMat);
if (args->_renderMode == RenderArgs::MIRROR_RENDER_MODE) {
viewMat.postScale(glm::vec3(-1.0f, 1.0f, 1.0f));
}
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
@ -259,9 +255,7 @@ void DrawOverlay3D::run(const SceneContextPointer& sceneContext, const RenderCon
Transform viewMat;
args->_viewFrustum->evalProjectionMatrix(projMat);
args->_viewFrustum->evalViewTransform(viewMat);
if (args->_renderMode == RenderArgs::MIRROR_RENDER_MODE) {
viewMat.postScale(glm::vec3(-1.0f, 1.0f, 1.0f));
}
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
batch.setViewportTransform(args->_viewport);

View file

@ -14,13 +14,25 @@
// the shadow texture
uniform sampler2DShadow shadowMap;
struct EyePlanes {
vec4 _S[1];
vec4 _T[1];
vec4 _R[1];
vec4 _Q[1];
};
uniform eyePlanes {
EyePlanes _eyePlanes;
};
EyePlanes getEyePlanes() {
return _eyePlanes;
}
// Fetching it
float fetchShadow(vec3 texcoord) {
<@if GPU_FEATURE_PROFILE == GPU_CORE @>
return texture(shadowMap, texcoord);
<@else@>
return shadow2D(shadowMap, texcoord).r;
<@endif@>
}
// the distances to the cascade sections
@ -44,17 +56,22 @@ vec2 samples[8] = vec2[8](
vec4 evalShadowTexcoord(vec4 position) {
// compute the corresponding texture coordinates
vec3 shadowTexcoord = vec3(dot(gl_EyePlaneS[0], position), dot(gl_EyePlaneT[0], position), dot(gl_EyePlaneR[0], position));
EyePlanes eyePlanes = getEyePlanes();
vec3 shadowTexcoord = vec3(dot(eyePlanes._S[0], position), dot(eyePlanes._T[0], position), dot(eyePlanes._R[0], position));
return vec4(shadowTexcoord, 0.0);
}
vec4 evalCascadedShadowTexcoord(vec4 position) {
EyePlanes eyePlanes = getEyePlanes();
// compute the index of the cascade to use and the corresponding texture coordinates
int shadowIndex = int(dot(step(vec3(position.z), shadowDistances), vec3(1.0, 1.0, 1.0)));
vec3 shadowTexcoord = vec3(dot(gl_EyePlaneS[shadowIndex], position), dot(gl_EyePlaneT[shadowIndex], position),
dot(gl_EyePlaneR[shadowIndex], position));
vec3 shadowTexcoord = vec3(
dot(eyePlanes._S[shadowIndex], position),
dot(eyePlanes._T[shadowIndex], position),
dot(eyePlanes._R[shadowIndex], position));
return vec4(shadowTexcoord, shadowIndex);
return vec4(shadowTexcoord, shadowIndex);
}
float evalShadowAttenuationPCF(vec4 shadowTexcoord) {
@ -76,7 +93,7 @@ float evalShadowAttenuationPCF(vec4 shadowTexcoord) {
));
}
return shadowAttenuation;
return shadowAttenuation;
}
float evalShadowAttenuationBasic(vec4 shadowTexcoord) {
@ -87,11 +104,11 @@ float evalShadowAttenuationBasic(vec4 shadowTexcoord) {
fetchShadow(shadowTexcoord.xyz + radiusScale * shadowScale * vec3(samples[2], 0.0)) +
fetchShadow(shadowTexcoord.xyz + radiusScale * shadowScale * vec3(samples[3], 0.0))
));
return shadowAttenuation;
return shadowAttenuation;
}
float evalShadowAttenuation(vec4 shadowTexcoord) {
return evalShadowAttenuationBasic(shadowTexcoord);
return evalShadowAttenuationBasic(shadowTexcoord);
}

View file

@ -33,16 +33,16 @@
// Copyright (c) 2004 Sean O'Neil
//
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 2;
const float fSamples = 2.0;
@ -51,7 +51,8 @@ uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
varying vec3 position;
in vec3 position;
out vec4 outFragColor;
float scale(float fCos)
@ -97,7 +98,7 @@ void main (void)
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
// Finally, scale the Mie and Rayleigh colors and set up the in variables for the pixel shader
vec3 secondaryFrontColor = v3FrontColor * fKmESun;
vec3 frontColor = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 v3Direction = v3CameraPos - v3Pos;
@ -106,6 +107,6 @@ void main (void)
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
vec3 finalColor = frontColor.rgb + fMiePhase * secondaryFrontColor.rgb;
gl_FragColor.a = finalColor.b;
gl_FragColor.rgb = pow(finalColor.rgb, vec3(1.0/2.2));
outFragColor.a = finalColor.b;
outFragColor.rgb = pow(finalColor.rgb, vec3(1.0/2.2));
}

View file

@ -33,32 +33,33 @@
// Copyright (c) 2004 Sean O'Neil
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 2;
const float fSamples = 2.0;
varying vec3 position;
out vec3 position;
void main(void)
{
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
position = gl_Vertex.xyz * fOuterRadius;
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
position = inPosition.xyz * fOuterRadius;
// standard transform
TransformCamera cam = getTransformCamera();

View file

@ -1,4 +1,5 @@
#version 120
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
//
// For licensing information, see http://http.developer.nvidia.com/GPUGems/gpugems_app01.html:
@ -32,20 +33,20 @@
// Copyright (c) 2004 Sean O'Neil
//
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
uniform float g;
uniform float g2;
@ -53,63 +54,65 @@ uniform float g2;
const int nSamples = 2;
const float fSamples = 2.0;
varying vec3 position;
in vec3 position;
out vec4 outFragColor;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main (void)
{
// Get the ray from the camera to the vertex and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
vec3 v3Pos = position;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the closest intersection of the ray with the outer atmosphere (which is the near point of the ray passing through the atmosphere)
float B = 2.0 * dot(v3CameraPos, v3Ray);
float C = fCameraHeight2 - fOuterRadius2;
float fDet = max(0.0, B*B - 4.0 * C);
float fNear = 0.5 * (-B - sqrt(fDet));
// Calculate the closest intersection of the ray with the outer atmosphere (which is the near point of the ray passing through the atmosphere)
float B = 2.0 * dot(v3CameraPos, v3Ray);
float C = fCameraHeight2 - fOuterRadius2;
float fDet = max(0.0, B*B - 4.0 * C);
float fNear = 0.5 * (-B - sqrt(fDet));
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos + v3Ray * fNear;
fFar -= fNear;
float fStartAngle = dot(v3Ray, v3Start) / fOuterRadius;
float fStartDepth = exp(-1.0 / fScaleDepth);
float fStartOffset = fStartDepth * scale(fStartAngle);
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos + v3Ray * fNear;
fFar -= fNear;
float fStartAngle = dot(v3Ray, v3Start) / fOuterRadius;
float fStartDepth = exp(-1.0 / fScaleDepth);
float fStartOffset = fStartDepth * scale(fStartAngle);
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Initialize the scattering loop variables
//gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
vec3 v3Direction = v3CameraPos - v3Pos;
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
vec3 color = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 secondaryColor = v3FrontColor * fKmESun;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot((v3Ray), v3SamplePoint) / fHeight * 0.99;
float fScatter = (fStartOffset + fDepth * (scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
vec3 v3Direction = v3CameraPos - v3Pos;
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
vec3 color = v3FrontColor * (v3InvWavelength * fKrESun);
vec3 secondaryColor = v3FrontColor * fKmESun;
vec3 finalColor = color + fMiePhase * secondaryColor;
gl_FragColor.a = finalColor.b;
gl_FragColor.rgb = pow(finalColor.rgb, vec3(1.0/2.2));
outFragColor.a = finalColor.b;
outFragColor.rgb = pow(finalColor.rgb, vec3(1.0/2.2));
}

View file

@ -33,16 +33,17 @@
// Copyright (c) 2004 Sean O'Neil
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius; // The outer (atmosphere) radius
varying vec3 position;
out vec3 position;
void main(void) {
position = gl_Vertex.xyz * fOuterRadius;
position = inPosition.xyz * fOuterRadius;
// standard transform
TransformCamera cam = getTransformCamera();

View file

@ -21,7 +21,7 @@
// Based on NVidia HBAO implementation in D3D11
// http://www.nvidia.co.uk/object/siggraph-2008-HBAO.html
varying vec2 varTexcoord;
in vec2 varTexcoord;
uniform sampler2D depthTexture;
uniform sampler2D normalTexture;
@ -47,12 +47,15 @@ const float AOStrength = 1.9;
const float R = 0.3;
const float R2 = 0.3*0.3;
const float NegInvR2 = - 1.0 / (0.3*0.3);
const float TanBias = tan(30.0 * PI / 180.0);
// can't use tan to initialize a const value
const float TanBias = 0.57735027; // tan(30.0 * PI / 180.0);
const float MaxRadiusPixels = 50.0;
const int NumDirections = 6;
const int NumSamples = 4;
out vec4 outFragColor;
float ViewSpaceZFromDepth(float d){
// [0,1] -> [-1,1] clip space
d = d * 2.0 - 1.0;
@ -67,14 +70,14 @@ vec3 UVToViewSpace(vec2 uv, float z){
}
vec3 GetViewPos(vec2 uv){
float z = ViewSpaceZFromDepth(texture2D(depthTexture, uv).r);
float z = ViewSpaceZFromDepth(texture(depthTexture, uv).r);
return UVToViewSpace(uv, z);
}
vec3 GetViewPosPoint(ivec2 uv){
vec2 coord = vec2(gl_FragCoord.xy) + uv;
//float z = texelFetch(texture0, coord, 0).r;
float z = texture2D(depthTexture, uv).r;
float z = texture(depthTexture, uv).r;
return UVToViewSpace(uv, z);
}
@ -241,5 +244,5 @@ void main(void){
ao = 1.0 - ao / numDirections * AOStrength;
}
gl_FragColor = vec4(vec3(ao), 1.0);
outFragColor = vec4(vec3(ao), 1.0);
}

View file

@ -12,13 +12,15 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec2 varTexcoord;
out vec2 varTexcoord;
void main(void) {
varTexcoord = gl_MultiTexCoord0.xy;
gl_Position = gl_Vertex;
varTexcoord = inTexCoord0.xy;
gl_Position = inPosition;
}

View file

@ -12,7 +12,11 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
out vec2 _texCoord0;
void main(void) {
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = gl_Vertex;
_texCoord0 = inTexCoord0.st;
gl_Position = inPosition;
}

View file

@ -14,17 +14,21 @@
<@include gpu/Transform.slh@>
<@include gpu/Inputs.slh@>
<$declareStandardTransform()$>
uniform mat4 texcoordMat;
out vec4 _texCoord0;
void main(void) {
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, gl_Vertex, gl_Position)$>;
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>;
vec4 projected = gl_Position / gl_Position.w;
gl_TexCoord[0] = vec4(dot(projected, texcoordMat[0]) * gl_Position.w,
_texCoord0 = vec4(dot(projected, texcoordMat[0]) * gl_Position.w,
dot(projected, texcoordMat[1]) * gl_Position.w, 0.0, gl_Position.w);
}

View file

@ -12,6 +12,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
@ -19,8 +21,10 @@
uniform mat4 texcoordMat;
uniform vec4 coneParam;
out vec4 _texCoord0;
void main(void) {
vec4 coneVertex = gl_Vertex;
vec4 coneVertex = inPosition;
if (coneParam.w != 0.0) {
if(coneVertex.z >= 0.0) {
// Evaluate the true position of the spot volume
@ -42,6 +46,6 @@ void main(void) {
<$transformModelToClipPos(cam, obj, coneVertex, gl_Position)$>;
vec4 projected = gl_Position / gl_Position.w;
gl_TexCoord[0] = vec4(dot(projected, texcoordMat[0]) * gl_Position.w,
_texCoord0 = vec4(dot(projected, texcoordMat[0]) * gl_Position.w,
dot(projected, texcoordMat[1]) * gl_Position.w, 0.0, gl_Position.w);
}

View file

@ -17,8 +17,11 @@
<@include DeferredGlobalLight.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Light mapped or not ?
@ -29,7 +32,7 @@ void main(void) {
frag.diffuse,
frag.specularVal.xyz);
gl_FragColor = vec4(color, 1.0);
_fragColor = vec4(color, 1.0);
} else {
vec3 color = evalAmbienSphereGlobalColor(1.0,
frag.position.xyz,
@ -38,6 +41,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -20,8 +20,11 @@
// Everything about shadow
<@include Shadow.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Eval shadow Texcoord and then Attenuation
vec4 shadowTexcoord = evalCascadedShadowTexcoord(frag.position);
@ -29,7 +32,7 @@ void main(void) {
// Light mapped or not ?
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
gl_FragColor = vec4(evalLightmappedColor(
_fragColor = vec4(evalLightmappedColor(
shadowAttenuation,
frag.normal,
frag.diffuse,
@ -43,6 +46,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -20,9 +20,11 @@
// Everything about shadow
<@include Shadow.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Eval shadow Texcoord and then Attenuation
vec4 shadowTexcoord = evalShadowTexcoord(frag.position);
@ -30,7 +32,7 @@ void main(void) {
// Light mapped or not ?
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
gl_FragColor = vec4(evalLightmappedColor(
_fragColor = vec4(evalLightmappedColor(
shadowAttenuation,
frag.normal,
frag.diffuse,
@ -44,6 +46,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -17,12 +17,15 @@
<@include DeferredGlobalLight.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Light mapped or not ?
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
gl_FragColor = vec4( evalLightmappedColor(
_fragColor = vec4( evalLightmappedColor(
1.0,
frag.normal,
frag.diffuse,
@ -36,6 +39,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -20,8 +20,11 @@
// Everything about shadow
<@include Shadow.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Eval shadow Texcoord and then Attenuation
vec4 shadowTexcoord = evalCascadedShadowTexcoord(frag.position);
@ -29,7 +32,7 @@ void main(void) {
// Light mapped or not ?
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
gl_FragColor = vec4(evalLightmappedColor(
_fragColor = vec4(evalLightmappedColor(
shadowAttenuation,
frag.normal,
frag.diffuse,
@ -43,6 +46,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -20,9 +20,11 @@
// Everything about shadow
<@include Shadow.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Eval shadow Texcoord and then Attenuation
vec4 shadowTexcoord = evalShadowTexcoord(frag.position);
@ -30,7 +32,7 @@ void main(void) {
// Light mapped or not ?
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
gl_FragColor = vec4(evalLightmappedColor(
_fragColor = vec4(evalLightmappedColor(
shadowAttenuation,
frag.normal,
frag.diffuse,
@ -44,6 +46,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -17,8 +17,11 @@
<@include DeferredGlobalLight.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Light mapped or not ?
@ -29,7 +32,7 @@ void main(void) {
frag.diffuse,
frag.specularVal.xyz);
gl_FragColor = vec4(color, 1.0);
_fragColor = vec4(color, 1.0);
} else {
vec3 color = evalSkyboxGlobalColor(1.0,
frag.position.xyz,
@ -38,6 +41,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -20,8 +20,11 @@
// Everything about shadow
<@include Shadow.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Eval shadow Texcoord and then Attenuation
vec4 shadowTexcoord = evalCascadedShadowTexcoord(frag.position);
@ -29,7 +32,7 @@ void main(void) {
// Light mapped or not ?
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
gl_FragColor = vec4(evalLightmappedColor(
_fragColor = vec4(evalLightmappedColor(
shadowAttenuation,
frag.normal,
frag.diffuse,
@ -43,6 +46,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -20,9 +20,11 @@
// Everything about shadow
<@include Shadow.slh@>
in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
DeferredFragment frag = unpackDeferredFragment(gl_TexCoord[0].st);
DeferredFragment frag = unpackDeferredFragment(_texCoord0);
// Eval shadow Texcoord and then Attenuation
vec4 shadowTexcoord = evalShadowTexcoord(frag.position);
@ -30,7 +32,7 @@ void main(void) {
// Light mapped or not ?
if ((frag.normalVal.a >= 0.45) && (frag.normalVal.a <= 0.55)) {
gl_FragColor = vec4(evalLightmappedColor(
_fragColor = vec4(evalLightmappedColor(
shadowAttenuation,
frag.normal,
frag.diffuse,
@ -44,6 +46,6 @@ void main(void) {
frag.specular,
frag.gloss);
gl_FragColor = vec4(color, frag.normalVal.a);
_fragColor = vec4(color, frag.normalVal.a);
}
}

View file

@ -15,28 +15,29 @@
<@include DeferredBufferWrite.slh@>
// the interpolated normal
//varying vec4 interpolatedNormal;
//in vec4 interpolatedNormal;
varying vec2 varTexcoord;
varying vec2 varBlurTexcoords[14];
in vec2 varTexcoord;
in vec2 varBlurTexcoords[14];
uniform sampler2D occlusionTexture;
out vec4 outFragColor;
void main(void) {
gl_FragColor = vec4(0.0);
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[0])*0.0044299121055113265;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[1])*0.00895781211794;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[2])*0.0215963866053;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[3])*0.0443683338718;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[4])*0.0776744219933;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[5])*0.115876621105;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[6])*0.147308056121;
gl_FragColor += texture2D(occlusionTexture, varTexcoord)*0.159576912161;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[7])*0.147308056121;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[8])*0.115876621105;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[9])*0.0776744219933;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[10])*0.0443683338718;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[11])*0.0215963866053;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[12])*0.00895781211794;
gl_FragColor += texture2D(occlusionTexture, varBlurTexcoords[13])*0.0044299121055113265;
outFragColor = vec4(0.0);
outFragColor += texture(occlusionTexture, varBlurTexcoords[0])*0.0044299121055113265;
outFragColor += texture(occlusionTexture, varBlurTexcoords[1])*0.00895781211794;
outFragColor += texture(occlusionTexture, varBlurTexcoords[2])*0.0215963866053;
outFragColor += texture(occlusionTexture, varBlurTexcoords[3])*0.0443683338718;
outFragColor += texture(occlusionTexture, varBlurTexcoords[4])*0.0776744219933;
outFragColor += texture(occlusionTexture, varBlurTexcoords[5])*0.115876621105;
outFragColor += texture(occlusionTexture, varBlurTexcoords[6])*0.147308056121;
outFragColor += texture(occlusionTexture, varTexcoord)*0.159576912161;
outFragColor += texture(occlusionTexture, varBlurTexcoords[7])*0.147308056121;
outFragColor += texture(occlusionTexture, varBlurTexcoords[8])*0.115876621105;
outFragColor += texture(occlusionTexture, varBlurTexcoords[9])*0.0776744219933;
outFragColor += texture(occlusionTexture, varBlurTexcoords[10])*0.0443683338718;
outFragColor += texture(occlusionTexture, varBlurTexcoords[11])*0.0215963866053;
outFragColor += texture(occlusionTexture, varBlurTexcoords[12])*0.00895781211794;
outFragColor += texture(occlusionTexture, varBlurTexcoords[13])*0.0044299121055113265;
}

View file

@ -12,16 +12,18 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec2 varTexcoord;
varying vec2 varBlurTexcoords[14];
out vec2 varTexcoord;
out vec2 varBlurTexcoords[14];
void main(void) {
varTexcoord = gl_MultiTexCoord0.xy;
gl_Position = gl_Vertex;
varTexcoord = inTexCoord0.xy;
gl_Position = inPosition;
varBlurTexcoords[0] = varTexcoord + vec2(-0.028, 0.0);
varBlurTexcoords[1] = varTexcoord + vec2(-0.024, 0.0);

View file

@ -12,16 +12,18 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec2 varTexcoord;
varying vec2 varBlurTexcoords[14];
out vec2 varTexcoord;
out vec2 varBlurTexcoords[14];
void main(void) {
varTexcoord = gl_MultiTexCoord0.xy;
gl_Position = gl_Vertex;
varTexcoord = inTexCoord0.xy;
gl_Position = inPosition;
varBlurTexcoords[0] = varTexcoord + vec2(0.0, -0.028);
varBlurTexcoords[1] = varTexcoord + vec2(0.0, -0.024);

View file

@ -14,11 +14,12 @@
<@include DeferredBufferWrite.slh@>
varying vec2 varQuadPosition;
in vec2 varQuadPosition;
out vec4 outFragColor;
void main(void) {
vec2 center = vec2(0.0, 0.0);
float distFromCenter = distance( vec2(0.0, 0.0), varQuadPosition);
float alpha = mix(0.0, 0.5, pow(distFromCenter,5.));
gl_FragColor = vec4(1.0, 0.0, 0.0, alpha);
outFragColor = vec4(1.0, 0.0, 0.0, alpha);
}

View file

@ -12,13 +12,15 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
varying vec2 varQuadPosition;
out vec2 varQuadPosition;
void main(void) {
varQuadPosition = gl_Vertex.xy;
gl_Position = gl_Vertex;
varQuadPosition = inPosition.xy;
gl_Position = inPosition;
}

View file

@ -18,22 +18,22 @@
// the diffuse texture
uniform sampler2D diffuseMap;
// the interpolated normal
varying vec4 interpolatedNormal;
varying vec3 color;
in vec4 _position;
in vec3 _normal;
in vec3 _color;
in vec2 _texCoord0;
void main(void) {
// Fetch diffuse map
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec4 diffuse = texture(diffuseMap, _texCoord0);
Material mat = getMaterial();
packDeferredFragment(
normalize(interpolatedNormal.xyz),
normalize(_normal.xyz),
evalOpaqueFinalAlpha(getMaterialOpacity(mat), diffuse.a),
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialDiffuse(mat) * diffuse.rgb * _color,
getMaterialSpecular(mat),
getMaterialShininess(mat));
}

View file

@ -11,6 +11,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
@ -19,27 +21,22 @@ const int MAX_TEXCOORDS = 2;
uniform mat4 texcoordMatrices[MAX_TEXCOORDS];
// interpolated eye position
varying vec4 interpolatedPosition;
// the interpolated normal
varying vec4 interpolatedNormal;
varying vec3 color;
out vec4 _position;
out vec3 _normal;
out vec3 _color;
out vec2 _texCoord0;
void main(void) {
// pass along the diffuse color
color = gl_Color.xyz;
_color = inColor.xyz;
// and the texture coordinates
gl_TexCoord[0] = texcoordMatrices[0] * vec4(gl_MultiTexCoord0.xy, 0.0, 1.0);
_texCoord0 = (texcoordMatrices[0] * vec4(inTexCoord0.st, 0.0, 1.0)).st;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, gl_Vertex, interpolatedPosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, gl_Normal, interpolatedNormal.xyz)$>
interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
}

View file

@ -23,25 +23,22 @@ uniform sampler2D diffuseMap;
uniform sampler2D emissiveMap;
uniform vec2 emissiveParams;
// the interpolated normal
varying vec4 interpolatedNormal;
varying vec3 color;
// the interpolated texcoord1
varying vec2 interpolatedTexcoord1;
in vec4 _position;
in vec2 _texCoord0;
in vec2 _texCoord1;
in vec3 _normal;
in vec3 _color;
void main(void) {
// set the diffuse, normal, specular data
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec4 emissive = texture2D(emissiveMap, interpolatedTexcoord1.st);
vec4 diffuse = texture(diffuseMap, _texCoord0);
vec4 emissive = texture(emissiveMap, _texCoord1);
Material mat = getMaterial();
packDeferredFragmentLightmap(
normalize(interpolatedNormal.xyz),
normalize(_normal),
evalOpaqueFinalAlpha(getMaterialOpacity(mat), diffuse.a),
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialDiffuse(mat) * diffuse.rgb * _color,
getMaterialSpecular(mat),
getMaterialShininess(mat),
(vec3(emissiveParams.x) + emissiveParams.y * emissive.rgb));

View file

@ -12,6 +12,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
@ -20,32 +22,23 @@ const int MAX_TEXCOORDS = 2;
uniform mat4 texcoordMatrices[MAX_TEXCOORDS];
attribute vec2 texcoord1;
// the interpolated normal
varying vec4 interpolatedNormal;
// the interpolated texcoord1
varying vec2 interpolatedTexcoord1;
varying vec3 color;
out vec4 _position;
out vec2 _texCoord0;
out vec2 _texCoord1;
out vec3 _normal;
out vec3 _color;
void main(void) {
// pass along the diffuse color
color = gl_Color.xyz;
_color = inColor.xyz;
// and the texture coordinates
gl_TexCoord[0] = texcoordMatrices[0] * vec4(gl_MultiTexCoord0.xy, 0.0, 1.0);
// interpolatedTexcoord1 = vec2(texcoordMatrices[1] * vec4(gl_MultiTexCoord0.xy, 0.0, 1.0)).xy;
interpolatedTexcoord1 = vec2(texcoordMatrices[1] * vec4(texcoord1.xy, 0.0, 1.0)).xy;
_texCoord0 = (texcoordMatrices[0] * vec4(inTexCoord0.st, 0.0, 1.0)).st;
_texCoord1 = (texcoordMatrices[1] * vec4(inTexCoord1.st, 0.0, 1.0)).st;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, gl_Vertex, gl_Position)$>
<$transformModelToEyeDir(cam, obj, gl_Normal, interpolatedNormal.xyz)$>
interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
}

View file

@ -26,35 +26,32 @@ uniform sampler2D normalMap;
uniform sampler2D emissiveMap;
uniform vec2 emissiveParams;
// the interpolated normal
varying vec4 interpolatedNormal;
// the interpolated tangent
varying vec4 interpolatedTangent;
varying vec2 interpolatedTexcoord1;
varying vec3 color;
in vec4 _position;
in vec2 _texCoord0;
in vec2 _texCoord1;
in vec3 _normal;
in vec3 _tangent;
in vec3 _color;
void main(void) {
// compute the view normal from the various bits
vec3 normalizedNormal = normalize(vec3(interpolatedNormal));
vec3 normalizedTangent = normalize(vec3(interpolatedTangent));
vec3 normalizedNormal = normalize(_normal);
vec3 normalizedTangent = normalize(_tangent);
vec3 normalizedBitangent = normalize(cross(normalizedNormal, normalizedTangent));
vec3 localNormal = vec3(texture2D(normalMap, gl_TexCoord[0].st)) - vec3(0.5, 0.5, 0.5);
vec3 localNormal = vec3(texture(normalMap, _texCoord0)) - vec3(0.5, 0.5, 0.5);
vec4 viewNormal = vec4(normalizedTangent * localNormal.x +
normalizedBitangent * localNormal.y + normalizedNormal * localNormal.z, 0.0);
// set the diffuse, normal, specular data
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec4 emissive = texture2D(emissiveMap, interpolatedTexcoord1.st);
vec4 diffuse = texture(diffuseMap, _texCoord0);
vec4 emissive = texture(emissiveMap, _texCoord1);
Material mat = getMaterial();
packDeferredFragmentLightmap(
normalize(viewNormal.xyz),
evalOpaqueFinalAlpha(getMaterialOpacity(mat), diffuse.a),
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialDiffuse(mat) * diffuse.rgb * _color,
getMaterialSpecular(mat),
getMaterialShininess(mat),
(vec3(emissiveParams.x) + emissiveParams.y * emissive.rgb));

View file

@ -12,6 +12,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
@ -20,41 +22,24 @@ const int MAX_TEXCOORDS = 2;
uniform mat4 texcoordMatrices[MAX_TEXCOORDS];
// the tangent vector
attribute vec3 tangent;
attribute vec2 texcoord1;
// the interpolated normal
varying vec4 interpolatedNormal;
// the interpolated tangent
varying vec4 interpolatedTangent;
// the interpolated texcoord1
varying vec2 interpolatedTexcoord1;
varying vec3 color;
out vec4 _position;
out vec2 _texCoord0;
out vec2 _texCoord1;
out vec3 _normal;
out vec3 _tangent;
out vec3 _color;
void main(void) {
// transform and store the normal and tangent for interpolation
//interpolatedNormal = gl_ModelViewMatrix * vec4(gl_Normal, 0.0);
//interpolatedTangent = gl_ModelViewMatrix * vec4(tangent, 0.0);
// pass along the diffuse color
color = gl_Color.xyz;
_color = inColor.xyz;
// and the texture coordinates
gl_TexCoord[0] = texcoordMatrices[0] * vec4(gl_MultiTexCoord0.xy, 0.0, 1.0);
interpolatedTexcoord1 = vec2(texcoordMatrices[1] * vec4(texcoord1.xy, 0.0, 1.0)).xy;
_texCoord0 = (texcoordMatrices[0] * vec4(inTexCoord0.st, 0.0, 1.0)).st;
_texCoord1 = (texcoordMatrices[1] * vec4(inTexCoord1.st, 0.0, 1.0)).st;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, gl_Vertex, gl_Position)$>
<$transformModelToEyeDir(cam, obj, gl_Normal, interpolatedNormal.xyz)$>
<$transformModelToEyeDir(cam, obj, tangent, interpolatedTangent.xyz)$>
interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);
interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToEyeDir(cam, obj, inTangent.xyz, _tangent)$>
}

View file

@ -29,37 +29,33 @@ uniform sampler2D normalMap;
// the specular map texture
uniform sampler2D specularMap;
// the interpolated normal
varying vec4 interpolatedNormal;
// the interpolated tangent
varying vec4 interpolatedTangent;
varying vec2 interpolatedTexcoord1;
varying vec3 color;
in vec4 _position;
in vec2 _texCoord0;
in vec2 _texCoord1;
in vec3 _normal;
in vec3 _tangent;
in vec3 _color;
void main(void) {
// compute the view normal from the various bits
vec3 normalizedNormal = normalize(vec3(interpolatedNormal));
vec3 normalizedTangent = normalize(vec3(interpolatedTangent));
vec3 normalizedNormal = normalize(_normal);
vec3 normalizedTangent = normalize(_tangent);
vec3 normalizedBitangent = normalize(cross(normalizedNormal, normalizedTangent));
vec3 localNormal = vec3(texture2D(normalMap, gl_TexCoord[0].st)) - vec3(0.5, 0.5, 0.5);
vec3 localNormal = vec3(texture(normalMap, _texCoord0)) - vec3(0.5, 0.5, 0.5);
vec4 viewNormal = vec4(normalizedTangent * localNormal.x +
normalizedBitangent * localNormal.y + normalizedNormal * localNormal.z, 0.0);
// set the diffuse, normal, specular data
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec3 specular = texture2D(specularMap, gl_TexCoord[0].st).rgb;
vec4 emissive = texture2D(emissiveMap, interpolatedTexcoord1.st);
vec4 diffuse = texture(diffuseMap, _texCoord0);
vec3 specular = texture(specularMap, _texCoord0).rgb;
vec4 emissive = texture(emissiveMap, _texCoord1);
Material mat = getMaterial();
packDeferredFragmentLightmap(
normalize(viewNormal.xyz),
evalOpaqueFinalAlpha(getMaterialOpacity(mat), diffuse.a),
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialDiffuse(mat) * diffuse.rgb * _color,
specular, // no use of getMaterialSpecular(mat)
getMaterialShininess(mat),
(vec3(emissiveParams.x) + emissiveParams.y * emissive.rgb));

View file

@ -26,25 +26,24 @@ uniform vec2 emissiveParams;
// the specular texture
uniform sampler2D specularMap;
// the interpolated normal
varying vec4 interpolatedNormal;
varying vec2 interpolatedTexcoord1;
varying vec3 color;
in vec4 _position;
in vec2 _texCoord0;
in vec2 _texCoord1;
in vec3 _normal;
in vec3 _color;
void main(void) {
// set the diffuse, normal, specular data
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec3 specular = texture2D(specularMap, gl_TexCoord[0].st).rgb;
vec4 emissive = texture2D(emissiveMap, interpolatedTexcoord1.st);
vec4 diffuse = texture(diffuseMap, _texCoord0);
vec3 specular = texture(specularMap, _texCoord0).rgb;
vec4 emissive = texture(emissiveMap, _texCoord1);
Material mat = getMaterial();
packDeferredFragmentLightmap(
normalize(interpolatedNormal.xyz),
normalize(_normal),
evalOpaqueFinalAlpha(getMaterialOpacity(mat), diffuse.a),
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialDiffuse(mat) * diffuse.rgb * _color,
specular, // no use of getMaterialSpecular(mat)
getMaterialShininess(mat),
(vec3(emissiveParams.x) + emissiveParams.y * emissive.rgb));

View file

@ -22,31 +22,29 @@ uniform sampler2D diffuseMap;
// the normal map texture
uniform sampler2D normalMap;
// the interpolated normal
varying vec4 interpolatedNormal;
// the interpolated tangent
varying vec4 interpolatedTangent;
varying vec3 color;
in vec4 _position;
in vec2 _texCoord0;
in vec3 _normal;
in vec3 _tangent;
in vec3 _color;
void main(void) {
// compute the view normal from the various bits
vec3 normalizedNormal = normalize(vec3(interpolatedNormal));
vec3 normalizedTangent = normalize(vec3(interpolatedTangent));
vec3 normalizedNormal = normalize(_normal.xyz);
vec3 normalizedTangent = normalize(_tangent.xyz);
vec3 normalizedBitangent = normalize(cross(normalizedNormal, normalizedTangent));
vec3 localNormal = normalize(vec3(texture2D(normalMap, gl_TexCoord[0].st)) - vec3(0.5, 0.5, 0.5));
vec3 localNormal = normalize(vec3(texture(normalMap, _texCoord0.st)) - vec3(0.5, 0.5, 0.5));
vec4 viewNormal = vec4(normalizedTangent * localNormal.x +
normalizedBitangent * localNormal.y + normalizedNormal * localNormal.z, 0.0);
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec4 diffuse = texture(diffuseMap, _texCoord0.st);
Material mat = getMaterial();
packDeferredFragment(
normalize(viewNormal.xyz),
evalOpaqueFinalAlpha(getMaterialOpacity(mat), diffuse.a),
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialDiffuse(mat) * diffuse.rgb * _color,
getMaterialSpecular(mat),
getMaterialShininess(mat));
}

View file

@ -2,7 +2,7 @@
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// model.vert
// model_normal_map.vert
// vertex shader
//
// Created by Andrzej Kapolka on 10/14/13.
@ -12,6 +12,8 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
@ -20,38 +22,23 @@ const int MAX_TEXCOORDS = 2;
uniform mat4 texcoordMatrices[MAX_TEXCOORDS];
// the tangent vector
attribute vec3 tangent;
// interpolated eye position
varying vec4 interpolatedPosition;
// the interpolated normal
varying vec4 interpolatedNormal;
// the interpolated tangent
varying vec4 interpolatedTangent;
varying vec3 color;
out vec4 _position;
out vec2 _texCoord0;
out vec3 _normal;
out vec3 _tangent;
out vec3 _color;
void main(void) {
// transform and store the normal and tangent for interpolation
//interpolatedNormal = gl_ModelViewMatrix * vec4(gl_Normal, 0.0);
//interpolatedTangent = gl_ModelViewMatrix * vec4(tangent, 0.0);
// pass along the diffuse color
color = gl_Color.xyz;
_color = inColor.rgb;
// and the texture coordinates
gl_TexCoord[0] = texcoordMatrices[0] * vec4(gl_MultiTexCoord0.xy, 0.0, 1.0);
_texCoord0 = (texcoordMatrices[0] * vec4(inTexCoord0.xy, 0.0, 1.0)).st;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, gl_Vertex, interpolatedPosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, gl_Normal, interpolatedNormal.xyz)$>
<$transformModelToEyeDir(cam, obj, tangent, interpolatedTangent.xyz)$>
interpolatedNormal = vec4(normalize(interpolatedNormal.xyz), 0.0);
interpolatedTangent = vec4(normalize(interpolatedTangent.xyz), 0.0);
<$transformModelToEyeAndClipPos(cam, obj, inPosition, _position, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
<$transformModelToEyeDir(cam, obj, inTangent.xyz, _tangent)$>
}

View file

@ -25,33 +25,31 @@ uniform sampler2D normalMap;
// the specular map texture
uniform sampler2D specularMap;
// the interpolated normal
varying vec4 interpolatedNormal;
// the interpolated tangent
varying vec4 interpolatedTangent;
varying vec3 color;
in vec4 _position;
in vec2 _texCoord0;
in vec3 _normal;
in vec3 _tangent;
in vec3 _color;
void main(void) {
// compute the view normal from the various bits
vec3 normalizedNormal = normalize(vec3(interpolatedNormal));
vec3 normalizedTangent = normalize(vec3(interpolatedTangent));
vec3 normalizedNormal = normalize(_normal);
vec3 normalizedTangent = normalize(_tangent);
vec3 normalizedBitangent = normalize(cross(normalizedNormal, normalizedTangent));
vec3 localNormal = normalize(vec3(texture2D(normalMap, gl_TexCoord[0].st)) - vec3(0.5, 0.5, 0.5));
vec3 localNormal = normalize(vec3(texture(normalMap, _texCoord0)) - vec3(0.5, 0.5, 0.5));
vec4 viewNormal = vec4(normalizedTangent * localNormal.x +
normalizedBitangent * localNormal.y + normalizedNormal * localNormal.z, 0.0);
// set the diffuse, normal, specular data
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec3 specular = texture2D(specularMap, gl_TexCoord[0].st).rgb;
vec4 diffuse = texture(diffuseMap, _texCoord0);
vec3 specular = texture(specularMap, _texCoord0).rgb;
Material mat = getMaterial();
packDeferredFragment(
normalize(viewNormal.xyz),
evalOpaqueFinalAlpha(getMaterialOpacity(mat), diffuse.a),
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialDiffuse(mat) * diffuse.rgb * _color,
specular, //getMaterialSpecular(mat),
getMaterialShininess(mat));
}

View file

@ -12,7 +12,9 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
out vec4 _fragColor;
void main(void) {
// fixed color for now (we may eventually want to use texture alpha)
gl_FragColor = vec4(1.0, 1.0, 1.0, 0.0);
_fragColor = vec4(1.0, 1.0, 1.0, 0.0);
}

View file

@ -1,5 +1,5 @@
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
<@include gpu/Config.slh@>
<$VERSION_HEADER$>
// Generated on <$_SCRIBE_DATE$>
//
// model_shadow.vert
@ -11,12 +11,16 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
void main(void) {
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, gl_Vertex, gl_Position)$>
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
}

View file

@ -22,22 +22,23 @@ uniform sampler2D diffuseMap;
// the specular texture
uniform sampler2D specularMap;
// the interpolated normal
varying vec4 interpolatedNormal;
in vec4 _position;
in vec2 _texCoord0;
in vec3 _normal;
in vec3 _color;
varying vec3 color;
void main(void) {
// set the diffuse, normal, specular data
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec3 specular = texture2D(specularMap, gl_TexCoord[0].st).rgb;
vec4 diffuse = texture(diffuseMap, _texCoord0);
vec3 specular = texture(specularMap, _texCoord0).rgb;
Material mat = getMaterial();
packDeferredFragment(
normalize(interpolatedNormal.xyz),
normalize(_normal),
evalOpaqueFinalAlpha(getMaterialOpacity(mat), diffuse.a),
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialDiffuse(mat) * diffuse.rgb * _color,
specular, //getMaterialSpecular(mat),
getMaterialShininess(mat));
}

View file

@ -11,34 +11,6 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<!/*<@include DeferredBufferWrite.slh@>
<@include model/Material.slh@>
// the diffuse texture
uniform sampler2D diffuseMap;
varying vec4 interpolatedNormal;
varying vec3 color;
void main(void) {
// Fetch diffuse map
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
Material mat = getMaterial();
packDeferredFragmentTranslucent(
normalize(interpolatedNormal.xyz),
getMaterialOpacity(mat) * diffuse.a,
getMaterialDiffuse(mat) * diffuse.rgb * color,
getMaterialSpecular(mat),
getMaterialShininess(mat));
// set the diffuse data
// gl_FragData[0] = gl_Color * texture2D(diffuseMap, gl_TexCoord[0].st);
}*/!>
<@include model/Material.slh@>
// Everything about global lighting
@ -99,34 +71,31 @@ vec4 evalGlobalColor(float shadowAttenuation, vec3 position, vec3 normal, vec3 d
// the diffuse texture
uniform sampler2D diffuseMap;
// the interpolated view position
varying vec4 interpolatedPosition;
in vec4 _position;
in vec2 _texCoord0;
in vec3 _normal;
in vec3 _color;
// the interpolated normal
varying vec4 interpolatedNormal;
varying vec3 color;
out vec4 _fragColor;
void main(void) {
vec3 fragPosition = interpolatedPosition.xyz;
vec3 fragPosition = _position.xyz;
// Fetch diffuse map
vec4 diffuse = texture2D(diffuseMap, gl_TexCoord[0].st);
vec4 diffuse = texture(diffuseMap, _texCoord0);
Material mat = getMaterial();
vec3 fragNormal = normalize(interpolatedNormal.xyz);
vec3 fragNormal = normalize(_normal);
float fragOpacity = getMaterialOpacity(mat) * diffuse.a;
vec3 fragDiffuse = getMaterialDiffuse(mat) * diffuse.rgb * color;
vec3 fragDiffuse = getMaterialDiffuse(mat) * diffuse.rgb * _color;
vec3 fragSpecular = getMaterialSpecular(mat);
float fragGloss = getMaterialShininess(mat);
vec4 fragColor = evalGlobalColor(1.0,
_fragColor = evalGlobalColor(1.0,
fragPosition,
fragNormal,
fragDiffuse,
fragSpecular,
fragGloss,
fragOpacity);
gl_FragColor = fragColor;
}

View file

@ -14,13 +14,14 @@
<@include DeferredBufferWrite.slh@>
varying vec2 varTexcoord;
in vec2 varTexcoord;
out vec4 outFragColor;
uniform sampler2D blurredOcclusionTexture;
void main(void) {
vec4 occlusionColor = texture2D(blurredOcclusionTexture, varTexcoord);
vec4 occlusionColor = texture(blurredOcclusionTexture, varTexcoord);
gl_FragColor = vec4(vec3(0.0), occlusionColor.r);
outFragColor = vec4(vec3(0.0), occlusionColor.r);
}

View file

@ -13,17 +13,18 @@
uniform sampler2D diffuseMap;
varying vec2 varTexcoord;
in vec2 varTexcoord;
varying vec3 varEyeNormal;
in vec3 varEyeNormal;
varying vec4 varColor;
in vec4 varColor;
out vec4 outFragColor;
void main(void) {
vec4 diffuse = texture2D(diffuseMap, varTexcoord.st);
vec4 diffuse = texture(diffuseMap, varTexcoord.st);
if (diffuse.a < 0.5) {
discard;
}
gl_FragColor = vec4(varColor * diffuse);
outFragColor = vec4(varColor * diffuse);
}

View file

@ -10,31 +10,31 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
<@include gpu/Inputs.slh@>
<@include gpu/Transform.slh@>
<$declareStandardTransform()$>
//attribute vec2 texcoord;
varying vec2 varTexcoord;
out vec2 varTexcoord;
// interpolated eye position
varying vec4 varEyePosition;
out vec4 varEyePosition;
// the interpolated normal
varying vec3 varEyeNormal;
out vec3 varEyeNormal;
varying vec4 varColor;
out vec4 varColor;
void main(void) {
varTexcoord = gl_MultiTexCoord0.xy;
varTexcoord = inTexCoord0.xy;
// pass along the color
varColor = gl_Color;
varColor = inColor;
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToEyeAndClipPos(cam, obj, gl_Vertex, varEyePosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, gl_Normal, varEyeNormal.xyz)$>
<$transformModelToEyeAndClipPos(cam, obj, inPosition, varEyePosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, varEyeNormal.xyz)$>
}

Some files were not shown because too many files have changed in this diff Show more